diff --git a/.github/jsonnetfile.json b/.github/jsonnetfile.json index 4afd3d544d070..e725dcde236de 100644 --- a/.github/jsonnetfile.json +++ b/.github/jsonnetfile.json @@ -8,7 +8,7 @@ "subdir": "workflows" } }, - "version": "5343bc71d96dc4247021a66c3da8fd5cd4c957dd" + "version": "965213a0fe2632438ab0524d606cb71d414e2388" } ], "legacyImports": true diff --git a/.github/jsonnetfile.lock.json b/.github/jsonnetfile.lock.json index d1c33af16bb49..cf74e548f5227 100644 --- a/.github/jsonnetfile.lock.json +++ b/.github/jsonnetfile.lock.json @@ -8,8 +8,8 @@ "subdir": "workflows" } }, - "version": "5343bc71d96dc4247021a66c3da8fd5cd4c957dd", - "sum": "/+ozeV2rndtz8N3cZmrWxbNJFI7fkwoDzhECMHG1RoA=" + "version": "965213a0fe2632438ab0524d606cb71d414e2388", + "sum": "DXmqwVyytIhA0tHlMQUCLD8buVjjCb04YcIxJ3BLFqM=" } ], "legacyImports": false diff --git a/.github/renovate.json5 b/.github/renovate.json5 index 494ced9f54329..d29c5cc13ca7d 100644 --- a/.github/renovate.json5 +++ b/.github/renovate.json5 @@ -36,6 +36,12 @@ "matchManagers": ["kustomize"], "enabled": false }, + { + // Disable certain npm updates for compatibility reasons + "matchManagers": ["npm"], + "matchPackageNames": ["tailwindcss"], + "enabled": false + }, { // Don't automatically merge GitHub Actions updates "matchManagers": ["github-actions"], diff --git a/.github/vendor/github.com/grafana/loki-release/workflows/build.libsonnet b/.github/vendor/github.com/grafana/loki-release/workflows/build.libsonnet index 47f357a25ff11..519182d1d91b8 100644 --- a/.github/vendor/github.com/grafana/loki-release/workflows/build.libsonnet +++ b/.github/vendor/github.com/grafana/loki-release/workflows/build.libsonnet @@ -148,15 +148,15 @@ local runner = import 'runner.libsonnet', dockerfile='Dockerfile', context='release', platform=[ - 'linux/amd64', - 'linux/arm64', + r.forPlatform('linux/amd64'), + r.forPlatform('linux/arm64'), ] ) - job.new() + job.new('${{ matrix.runs_on }}') + job.withStrategy({ 'fail-fast': true, matrix: { - platform: platform, + include: platform, }, }) + job.withSteps([ @@ -174,9 +174,9 @@ local runner = import 'runner.libsonnet', mkdir -p images mkdir -p plugins - platform="$(echo "${{ matrix.platform}}" | sed "s/\(.*\)\/\(.*\)/\1-\2/")" + platform="$(echo "${{ matrix.arch}}" | sed "s/\(.*\)\/\(.*\)/\1-\2/")" echo "platform=${platform}" >> $GITHUB_OUTPUT - echo "platform_short=$(echo ${{ matrix.platform }} | cut -d / -f 2)" >> $GITHUB_OUTPUT + echo "platform_short=$(echo ${{ matrix.arch }} | cut -d / -f 2)" >> $GITHUB_OUTPUT if [[ "${platform}" == "linux/arm64" ]]; then echo "plugin_arch=-arm64" >> $GITHUB_OUTPUT else @@ -190,7 +190,7 @@ local runner = import 'runner.libsonnet', + step.with({ context: context, file: 'release/%s/%s' % [path, dockerfile], - platforms: '${{ matrix.platform }}', + platforms: '${{ matrix.arch }}', push: false, tags: '${{ env.IMAGE_PREFIX }}/%s:${{ needs.version.outputs.version }}-${{ steps.platform.outputs.platform_short }}' % [name], outputs: 'type=local,dest=release/plugins/%s-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}' % name, diff --git a/.github/vendor/github.com/grafana/loki-release/workflows/release.libsonnet b/.github/vendor/github.com/grafana/loki-release/workflows/release.libsonnet index d35c8a76661e8..8441bad30930f 100644 --- a/.github/vendor/github.com/grafana/loki-release/workflows/release.libsonnet +++ b/.github/vendor/github.com/grafana/loki-release/workflows/release.libsonnet @@ -182,6 +182,7 @@ local pullRequestFooter = 'Merging this PR will release the [artifacts](https:// + step.with({ imageDir: 'images', imagePrefix: '${{ env.IMAGE_PREFIX }}', + isLatest: '${{ needs.createRelease.outputs.isLatest }}', }), ] ), @@ -219,6 +220,7 @@ local pullRequestFooter = 'Merging this PR will release the [artifacts](https:// imagePrefix: '${{ env.IMAGE_PREFIX }}', isPlugin: true, buildDir: 'release/%s' % path, + isLatest: '${{ needs.createRelease.outputs.isLatest }}', }), ] ), diff --git a/.github/vendor/github.com/grafana/loki-release/workflows/workflows.jsonnet b/.github/vendor/github.com/grafana/loki-release/workflows/workflows.jsonnet index 930aa539f25c6..3329a6374368b 100644 --- a/.github/vendor/github.com/grafana/loki-release/workflows/workflows.jsonnet +++ b/.github/vendor/github.com/grafana/loki-release/workflows/workflows.jsonnet @@ -10,13 +10,13 @@ local dockerPluginDir = 'clients/cmd/docker-driver'; lokiRelease.releasePRWorkflow( imageJobs={ loki: build.image('fake-loki', 'cmd/loki'), - 'loki-docker-driver': build.dockerPlugin('loki-docker-driver', dockerPluginDir, buildImage=buildImage, platform=['linux/amd64', 'linux/arm64']), + 'loki-docker-driver': build.dockerPlugin('loki-docker-driver', dockerPluginDir, buildImage=buildImage), }, buildImage=buildImage, buildArtifactsBucket='loki-build-artifacts', branches=['release-[0-9]+.[0-9]+.x'], imagePrefix='trevorwhitney075', - releaseLibRef='release-1.14.x', + releaseLibRef='main', releaseRepo='grafana/loki-release', skipValidation=false, versioningStrategy='always-bump-patch', @@ -28,14 +28,14 @@ local dockerPluginDir = 'clients/cmd/docker-driver'; lokiRelease.releasePRWorkflow( imageJobs={ loki: build.image('fake-loki', 'cmd/loki'), - 'loki-docker-driver': build.dockerPlugin('loki-docker-driver', dockerPluginDir, buildImage=buildImage, platform=['linux/amd64', 'linux/arm64']), + 'loki-docker-driver': build.dockerPlugin('loki-docker-driver', dockerPluginDir, buildImage=buildImage), }, buildImage=buildImage, buildArtifactsBucket='loki-build-artifacts', branches=['release-[0-9]+.[0-9]+.x'], dryRun=true, imagePrefix='trevorwhitney075', - releaseLibRef='release-1.14.x', + releaseLibRef='main', releaseRepo='grafana/loki-release', skipValidation=false, versioningStrategy='always-bump-patch', @@ -54,7 +54,7 @@ local dockerPluginDir = 'clients/cmd/docker-driver'; getDockerCredsFromVault=false, imagePrefix='trevorwhitney075', pluginBuildDir=dockerPluginDir, - releaseLibRef='release-1.14.x', + releaseLibRef='main', releaseRepo='grafana/loki-release', useGitHubAppToken=true, ) + { diff --git a/.github/workflows/minor-release-pr.yml b/.github/workflows/minor-release-pr.yml index 111cc1da3bdff..7ce894025d039 100644 --- a/.github/workflows/minor-release-pr.yml +++ b/.github/workflows/minor-release-pr.yml @@ -618,7 +618,7 @@ jobs: loki-docker-driver: needs: - "version" - runs-on: "ubuntu-latest" + runs-on: "${{ matrix.runs_on }}" steps: - name: "pull release library code" uses: "actions/checkout@v4" @@ -649,9 +649,9 @@ jobs: mkdir -p images mkdir -p plugins - platform="$(echo "${{ matrix.platform}}" | sed "s/\(.*\)\/\(.*\)/\1-\2/")" + platform="$(echo "${{ matrix.arch}}" | sed "s/\(.*\)\/\(.*\)/\1-\2/")" echo "platform=${platform}" >> $GITHUB_OUTPUT - echo "platform_short=$(echo ${{ matrix.platform }} | cut -d / -f 2)" >> $GITHUB_OUTPUT + echo "platform_short=$(echo ${{ matrix.arch }} | cut -d / -f 2)" >> $GITHUB_OUTPUT if [[ "${platform}" == "linux/arm64" ]]; then echo "plugin_arch=-arm64" >> $GITHUB_OUTPUT else @@ -670,7 +670,7 @@ jobs: context: "release" file: "release/clients/cmd/docker-driver/Dockerfile" outputs: "type=local,dest=release/plugins/loki-docker-driver-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}" - platforms: "${{ matrix.platform }}" + platforms: "${{ matrix.arch }}" push: false tags: "${{ env.IMAGE_PREFIX }}/loki-docker-driver:${{ needs.version.outputs.version }}-${{ steps.platform.outputs.platform_short }}" - if: "${{ fromJSON(needs.version.outputs.pr_created) }}" @@ -689,7 +689,7 @@ jobs: strategy: fail-fast: true matrix: - platform: + include: - arch: "linux/amd64" runs_on: - "github-hosted-ubuntu-x64-small" diff --git a/.github/workflows/patch-release-pr.yml b/.github/workflows/patch-release-pr.yml index 751ad5dc51538..8f2d1f1e302ad 100644 --- a/.github/workflows/patch-release-pr.yml +++ b/.github/workflows/patch-release-pr.yml @@ -618,7 +618,7 @@ jobs: loki-docker-driver: needs: - "version" - runs-on: "ubuntu-latest" + runs-on: "${{ matrix.runs_on }}" steps: - name: "pull release library code" uses: "actions/checkout@v4" @@ -649,9 +649,9 @@ jobs: mkdir -p images mkdir -p plugins - platform="$(echo "${{ matrix.platform}}" | sed "s/\(.*\)\/\(.*\)/\1-\2/")" + platform="$(echo "${{ matrix.arch}}" | sed "s/\(.*\)\/\(.*\)/\1-\2/")" echo "platform=${platform}" >> $GITHUB_OUTPUT - echo "platform_short=$(echo ${{ matrix.platform }} | cut -d / -f 2)" >> $GITHUB_OUTPUT + echo "platform_short=$(echo ${{ matrix.arch }} | cut -d / -f 2)" >> $GITHUB_OUTPUT if [[ "${platform}" == "linux/arm64" ]]; then echo "plugin_arch=-arm64" >> $GITHUB_OUTPUT else @@ -670,7 +670,7 @@ jobs: context: "release" file: "release/clients/cmd/docker-driver/Dockerfile" outputs: "type=local,dest=release/plugins/loki-docker-driver-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}" - platforms: "${{ matrix.platform }}" + platforms: "${{ matrix.arch }}" push: false tags: "${{ env.IMAGE_PREFIX }}/loki-docker-driver:${{ needs.version.outputs.version }}-${{ steps.platform.outputs.platform_short }}" - if: "${{ fromJSON(needs.version.outputs.pr_created) }}" @@ -689,7 +689,7 @@ jobs: strategy: fail-fast: true matrix: - platform: + include: - arch: "linux/amd64" runs_on: - "github-hosted-ubuntu-x64-small" diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 426f523f80da7..67eefdbe76009 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -151,6 +151,7 @@ jobs: buildDir: "release/clients/cmd/docker-driver" imageDir: "plugins" imagePrefix: "${{ env.IMAGE_PREFIX }}" + isLatest: "${{ needs.createRelease.outputs.isLatest }}" isPlugin: true publishImages: needs: @@ -186,6 +187,7 @@ jobs: with: imageDir: "images" imagePrefix: "${{ env.IMAGE_PREFIX }}" + isLatest: "${{ needs.createRelease.outputs.isLatest }}" publishRelease: needs: - "createRelease" diff --git a/docs/sources/send-data/promtail/configuration.md b/docs/sources/send-data/promtail/configuration.md index 0cd4d4eccc431..2d768f22f3667 100644 --- a/docs/sources/send-data/promtail/configuration.md +++ b/docs/sources/send-data/promtail/configuration.md @@ -836,7 +836,9 @@ replace: The `journal` block configures reading from the systemd journal from Promtail. Requires a build of Promtail that has journal support _enabled_. If -using the AMD64 Docker image, this is enabled by default. +using the AMD64 Docker image, this is enabled by default. On some systems a +permission is needed for the user promtail to access journal logs. +For Ubuntu (24.04) you need to add `promtail` to the group `systemd-journal` with `sudo usermod -a -G systemd-journal promtail`. ```yaml # When true, log messages from the journal are passed through the diff --git a/docs/sources/shared/configuration.md b/docs/sources/shared/configuration.md index 503809c69c42b..035c550db7eeb 100644 --- a/docs/sources/shared/configuration.md +++ b/docs/sources/shared/configuration.md @@ -130,6 +130,25 @@ Pass the `-config.expand-env` flag at the command line to enable this way of set # The ruler block configures the Loki ruler. [ruler: ] +ruler_storage: + # The thanos_object_store_config block configures the connection to object + # storage backend using thanos-io/objstore clients. This will become the + # default way of configuring object store clients in future releases. + # Currently this is opt-in and takes effect only when `-use-thanos-objstore` + # is set to true. + # The CLI flags prefix for this block configuration is: ruler-storage + [] + + # Backend storage to use. Supported backends are: local, s3, gcs, azure, + # swift, filesystem, alibabacloud, bos + # CLI flag: -ruler-storage.backend + [backend: | default = "filesystem"] + + local: + # Directory to scan for rules + # CLI flag: -ruler-storage.local.directory + [directory: | default = ""] + # The ingester_client block configures how the distributor will connect to # ingesters. Only appropriate when running all components, the distributor, or # the querier. @@ -659,138 +678,10 @@ pattern_ingester: [compactor: ] compactor_grpc_client: - # gRPC client max receive message size (bytes). - # CLI flag: -compactor.grpc-client.grpc-max-recv-msg-size - [max_recv_msg_size: | default = 104857600] - - # gRPC client max send message size (bytes). - # CLI flag: -compactor.grpc-client.grpc-max-send-msg-size - [max_send_msg_size: | default = 104857600] - - # Use compression when sending messages. Supported values are: 'gzip', - # 'snappy' and '' (disable compression) - # CLI flag: -compactor.grpc-client.grpc-compression - [grpc_compression: | default = ""] - - # Rate limit for gRPC client; 0 means disabled. - # CLI flag: -compactor.grpc-client.grpc-client-rate-limit - [rate_limit: | default = 0] - - # Rate limit burst for gRPC client. - # CLI flag: -compactor.grpc-client.grpc-client-rate-limit-burst - [rate_limit_burst: | default = 0] - - # Enable backoff and retry when we hit rate limits. - # CLI flag: -compactor.grpc-client.backoff-on-ratelimits - [backoff_on_ratelimits: | default = false] - - backoff_config: - # Minimum delay when backing off. - # CLI flag: -compactor.grpc-client.backoff-min-period - [min_period: | default = 100ms] - - # Maximum delay when backing off. - # CLI flag: -compactor.grpc-client.backoff-max-period - [max_period: | default = 10s] - - # Number of times to backoff and retry before failing. - # CLI flag: -compactor.grpc-client.backoff-retries - [max_retries: | default = 10] - - # Initial stream window size. Values less than the default are not supported - # and are ignored. Setting this to a value other than the default disables the - # BDP estimator. - # CLI flag: -compactor.grpc-client.initial-stream-window-size - [initial_stream_window_size: | default = 63KiB1023B] - - # Initial connection window size. Values less than the default are not - # supported and are ignored. Setting this to a value other than the default - # disables the BDP estimator. - # CLI flag: -compactor.grpc-client.initial-connection-window-size - [initial_connection_window_size: | default = 63KiB1023B] - - # Enable TLS in the gRPC client. This flag needs to be enabled when any other - # TLS flag is set. If set to false, insecure connection to gRPC server will be - # used. - # CLI flag: -compactor.grpc-client.tls-enabled - [tls_enabled: | default = false] - - # Path to the client certificate, which will be used for authenticating with - # the server. Also requires the key path to be configured. - # CLI flag: -compactor.grpc-client.tls-cert-path - [tls_cert_path: | default = ""] - - # Path to the key for the client certificate. Also requires the client - # certificate to be configured. - # CLI flag: -compactor.grpc-client.tls-key-path - [tls_key_path: | default = ""] - - # Path to the CA certificates to validate server certificate against. If not - # set, the host's root CA certificates are used. - # CLI flag: -compactor.grpc-client.tls-ca-path - [tls_ca_path: | default = ""] - - # Override the expected name on the server certificate. - # CLI flag: -compactor.grpc-client.tls-server-name - [tls_server_name: | default = ""] - - # Skip validating server certificate. - # CLI flag: -compactor.grpc-client.tls-insecure-skip-verify - [tls_insecure_skip_verify: | default = false] - - # Override the default cipher suite list (separated by commas). Allowed - # values: - # - # Secure Ciphers: - # - TLS_AES_128_GCM_SHA256 - # - TLS_AES_256_GCM_SHA384 - # - TLS_CHACHA20_POLY1305_SHA256 - # - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA - # - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA - # - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA - # - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA - # - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - # - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - # - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - # - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - # - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 - # - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 - # - # Insecure Ciphers: - # - TLS_RSA_WITH_RC4_128_SHA - # - TLS_RSA_WITH_3DES_EDE_CBC_SHA - # - TLS_RSA_WITH_AES_128_CBC_SHA - # - TLS_RSA_WITH_AES_256_CBC_SHA - # - TLS_RSA_WITH_AES_128_CBC_SHA256 - # - TLS_RSA_WITH_AES_128_GCM_SHA256 - # - TLS_RSA_WITH_AES_256_GCM_SHA384 - # - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA - # - TLS_ECDHE_RSA_WITH_RC4_128_SHA - # - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA - # - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 - # - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 - # CLI flag: -compactor.grpc-client.tls-cipher-suites - [tls_cipher_suites: | default = ""] - - # Override the default minimum TLS version. Allowed values: VersionTLS10, - # VersionTLS11, VersionTLS12, VersionTLS13 - # CLI flag: -compactor.grpc-client.tls-min-version - [tls_min_version: | default = ""] - - # The maximum amount of time to establish a connection. A value of 0 means - # default gRPC client connect timeout and backoff. - # CLI flag: -compactor.grpc-client.connect-timeout - [connect_timeout: | default = 5s] - - # Initial backoff delay after first connection failure. Only relevant if - # ConnectTimeout > 0. - # CLI flag: -compactor.grpc-client.connect-backoff-base-delay - [connect_backoff_base_delay: | default = 1s] - - # Maximum backoff delay when establishing a connection. Only relevant if - # ConnectTimeout > 0. - # CLI flag: -compactor.grpc-client.connect-backoff-max-delay - [connect_backoff_max_delay: | default = 5s] + # The grpc_client block configures the gRPC client used to communicate between + # a client and server component in Loki. + # The CLI flags prefix for this block configuration is: compactor.grpc-client + [] # The limits_config block configures global and per-tenant limits in Loki. The # values here can be overridden in the `overrides` section of the runtime_config @@ -895,6 +786,32 @@ kafka_config: # CLI flag: -kafka.max-consumer-lag-at-startup [max_consumer_lag_at_startup: | default = 15s] +dataobj_consumer: + builderconfig: + # The size of the SHA prefix to use for the data object builder. + # CLI flag: -dataobj-consumer.sha-prefix-size + [sha_prefix_size: | default = 2] + + # The size of the target page to use for the data object builder. + # CLI flag: -dataobj-consumer.target-page-size + [target_page_size: | default = 2MiB] + + # The size of the target object to use for the data object builder. + # CLI flag: -dataobj-consumer.target-object-size + [target_object_size: | default = 1GiB] + + # Configures a maximum size for sections, for sections that support it. + # CLI flag: -dataobj-consumer.target-section-size + [target_section_size: | default = 128MiB] + + # The size of the buffer to use for sorting logs. + # CLI flag: -dataobj-consumer.buffer-size + [buffer_size: | default = 16MiB] + + # The prefix to use for the storage bucket. + # CLI flag: -dataobj-consumer.storage-bucket-prefix + [storage_bucket_prefix: | default = "dataobj/"] + dataobj_explorer: # Prefix to use when exploring the bucket. If set, only objects under this # prefix will be visible. @@ -1094,110 +1011,9 @@ dynamodb: # CLI flag: -dynamodb.kms-key-id [kms_key_id: | default = ""] -# S3 endpoint URL with escaped Key and Secret encoded. If only region is -# specified as a host, proper endpoint will be deduced. Use -# inmemory:/// to use a mock in-memory implementation. -# CLI flag: -s3.url -[s3: ] - -# Set this to `true` to force the request to use path-style addressing. -# CLI flag: -s3.force-path-style -[s3forcepathstyle: | default = false] - -# Comma separated list of bucket names to evenly distribute chunks over. -# Overrides any buckets specified in s3.url flag -# CLI flag: -s3.buckets -[bucketnames: | default = ""] - -# S3 Endpoint to connect to. -# CLI flag: -s3.endpoint -[endpoint: | default = ""] - -# AWS region to use. -# CLI flag: -s3.region -[region: | default = ""] - -# AWS Access Key ID -# CLI flag: -s3.access-key-id -[access_key_id: | default = ""] - -# AWS Secret Access Key -# CLI flag: -s3.secret-access-key -[secret_access_key: | default = ""] - -# AWS Session Token -# CLI flag: -s3.session-token -[session_token: | default = ""] - -# Disable https on s3 connection. -# CLI flag: -s3.insecure -[insecure: | default = false] - -http_config: - # Timeout specifies a time limit for requests made by s3 Client. - # CLI flag: -s3.http.timeout - [timeout: | default = 0s] - - # The maximum amount of time an idle connection will be held open. - # CLI flag: -s3.http.idle-conn-timeout - [idle_conn_timeout: | default = 1m30s] - - # If non-zero, specifies the amount of time to wait for a server's response - # headers after fully writing the request. - # CLI flag: -s3.http.response-header-timeout - [response_header_timeout: | default = 0s] - - # Set to true to skip verifying the certificate chain and hostname. - # CLI flag: -s3.http.insecure-skip-verify - [insecure_skip_verify: | default = false] - - # Path to the trusted CA file that signed the SSL certificate of the S3 - # endpoint. - # CLI flag: -s3.http.ca-file - [ca_file: | default = ""] - -# The signature version to use for authenticating against S3. Supported values -# are: v4. -# CLI flag: -s3.signature-version -[signature_version: | default = "v4"] - -# The S3 storage class which objects will use. Supported values are: GLACIER, -# DEEP_ARCHIVE, GLACIER_IR, INTELLIGENT_TIERING, ONEZONE_IA, OUTPOSTS, -# REDUCED_REDUNDANCY, STANDARD, STANDARD_IA. -# CLI flag: -s3.storage-class -[storage_class: | default = "STANDARD"] - -sse: - # Enable AWS Server Side Encryption. Supported values: SSE-KMS, SSE-S3. - # CLI flag: -s3.sse.type - [type: | default = ""] - - # KMS Key ID used to encrypt objects in S3 - # CLI flag: -s3.sse.kms-key-id - [kms_key_id: | default = ""] - - # KMS Encryption Context used for object encryption. It expects JSON formatted - # string. - # CLI flag: -s3.sse.kms-encryption-context - [kms_encryption_context: | default = ""] - -# Configures back off when S3 get Object. -backoff_config: - # Minimum backoff time when s3 get Object - # CLI flag: -s3.min-backoff - [min_period: | default = 100ms] - - # Maximum backoff time when s3 get Object - # CLI flag: -s3.max-backoff - [max_period: | default = 3s] - - # Maximum number of times to retry for s3 GetObject or ObjectExists - # CLI flag: -s3.max-retries - [max_retries: | default = 5] - -# Disable forcing S3 dualstack endpoint usage. -# CLI flag: -s3.disable-dualstack -[disable_dualstack: | default = false] +# The s3_storage_config block configures the connection to Amazon S3 object +# storage backend. +[] ``` ### azure_storage_config @@ -1555,67 +1371,10 @@ memcached_client: # CLI flag: -.memcached.tls-enabled [tls_enabled: | default = false] - # Path to the client certificate, which will be used for authenticating with - # the server. Also requires the key path to be configured. - # CLI flag: -.memcached.tls-cert-path - [tls_cert_path: | default = ""] - - # Path to the key for the client certificate. Also requires the client - # certificate to be configured. - # CLI flag: -.memcached.tls-key-path - [tls_key_path: | default = ""] - - # Path to the CA certificates to validate server certificate against. If not - # set, the host's root CA certificates are used. - # CLI flag: -.memcached.tls-ca-path - [tls_ca_path: | default = ""] - - # Override the expected name on the server certificate. - # CLI flag: -.memcached.tls-server-name - [tls_server_name: | default = ""] - - # Skip validating server certificate. - # CLI flag: -.memcached.tls-insecure-skip-verify - [tls_insecure_skip_verify: | default = false] - - # Override the default cipher suite list (separated by commas). Allowed - # values: - # - # Secure Ciphers: - # - TLS_AES_128_GCM_SHA256 - # - TLS_AES_256_GCM_SHA384 - # - TLS_CHACHA20_POLY1305_SHA256 - # - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA - # - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA - # - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA - # - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA - # - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - # - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - # - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - # - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - # - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 - # - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 - # - # Insecure Ciphers: - # - TLS_RSA_WITH_RC4_128_SHA - # - TLS_RSA_WITH_3DES_EDE_CBC_SHA - # - TLS_RSA_WITH_AES_128_CBC_SHA - # - TLS_RSA_WITH_AES_256_CBC_SHA - # - TLS_RSA_WITH_AES_128_CBC_SHA256 - # - TLS_RSA_WITH_AES_128_GCM_SHA256 - # - TLS_RSA_WITH_AES_256_GCM_SHA384 - # - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA - # - TLS_ECDHE_RSA_WITH_RC4_128_SHA - # - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA - # - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 - # - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 - # CLI flag: -.memcached.tls-cipher-suites - [tls_cipher_suites: | default = ""] - - # Override the default minimum TLS version. Allowed values: VersionTLS10, - # VersionTLS11, VersionTLS12, VersionTLS13 - # CLI flag: -.memcached.tls-min-version - [tls_min_version: | default = ""] + # The TLS configuration. + # The CLI flags prefix for this block configuration is: + # store.index-cache-write.memcached + [] redis: # Redis Server or Cluster configuration endpoint to use for caching. A @@ -1744,7 +1503,7 @@ Common configuration to be shared between multiple modules. If a more specific c storage: # The s3_storage_config block configures the connection to Amazon S3 object # storage backend. - # The CLI flags prefix for this block configuration is: common + # The CLI flags prefix for this block configuration is: common.storage [s3: ] # The gcs_storage_config block configures the connection to Google Cloud @@ -1849,6 +1608,15 @@ storage: # CLI flag: -common.storage.congestion-control.hedge.strategy [strategy: | default = ""] + # The thanos_object_store_config block configures the connection to object + # storage backend using thanos-io/objstore clients. This will become the + # default way of configuring object store clients in future releases. + # Currently this is opt-in and takes effect only when `-use-thanos-objstore` + # is set to true. + # The CLI flags prefix for this block configuration is: + # common.storage.object-store + [object_store: ] + [persist_tokens: ] [replication_factor: ] @@ -2424,66 +2192,9 @@ Configuration for an ETCD v3 client. Only applies if the selected kvstore is `et # CLI flag: -.etcd.tls-enabled [tls_enabled: | default = false] -# Path to the client certificate, which will be used for authenticating with the -# server. Also requires the key path to be configured. -# CLI flag: -.etcd.tls-cert-path -[tls_cert_path: | default = ""] - -# Path to the key for the client certificate. Also requires the client -# certificate to be configured. -# CLI flag: -.etcd.tls-key-path -[tls_key_path: | default = ""] - -# Path to the CA certificates to validate server certificate against. If not -# set, the host's root CA certificates are used. -# CLI flag: -.etcd.tls-ca-path -[tls_ca_path: | default = ""] - -# Override the expected name on the server certificate. -# CLI flag: -.etcd.tls-server-name -[tls_server_name: | default = ""] - -# Skip validating server certificate. -# CLI flag: -.etcd.tls-insecure-skip-verify -[tls_insecure_skip_verify: | default = false] - -# Override the default cipher suite list (separated by commas). Allowed values: -# -# Secure Ciphers: -# - TLS_AES_128_GCM_SHA256 -# - TLS_AES_256_GCM_SHA384 -# - TLS_CHACHA20_POLY1305_SHA256 -# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA -# - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA -# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA -# - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA -# - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 -# - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 -# - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 -# - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 -# - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 -# - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 -# -# Insecure Ciphers: -# - TLS_RSA_WITH_RC4_128_SHA -# - TLS_RSA_WITH_3DES_EDE_CBC_SHA -# - TLS_RSA_WITH_AES_128_CBC_SHA -# - TLS_RSA_WITH_AES_256_CBC_SHA -# - TLS_RSA_WITH_AES_128_CBC_SHA256 -# - TLS_RSA_WITH_AES_128_GCM_SHA256 -# - TLS_RSA_WITH_AES_256_GCM_SHA384 -# - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA -# - TLS_ECDHE_RSA_WITH_RC4_128_SHA -# - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA -# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 -# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 -# CLI flag: -.etcd.tls-cipher-suites -[tls_cipher_suites: | default = ""] - -# Override the default minimum TLS version. Allowed values: VersionTLS10, -# VersionTLS11, VersionTLS12, VersionTLS13 -# CLI flag: -.etcd.tls-min-version -[tls_min_version: | default = ""] +# The TLS configuration. +# The CLI flags prefix for this block configuration is: ruler.ring.etcd +[] # Etcd username. # CLI flag: -.etcd.username @@ -2693,6 +2404,7 @@ The `grpc_client` block configures the gRPC client used to communicate between a - `bloom-build.builder.grpc` - `bloom-gateway-client.grpc` - `boltdb.shipper.index-gateway-client.grpc` +- `compactor.grpc-client` - `frontend.grpc-client-config` - `ingester.client` - `pattern-ingester.client` @@ -2762,66 +2474,10 @@ backoff_config: # CLI flag: -.tls-enabled [tls_enabled: | default = false] -# Path to the client certificate, which will be used for authenticating with the -# server. Also requires the key path to be configured. -# CLI flag: -.tls-cert-path -[tls_cert_path: | default = ""] - -# Path to the key for the client certificate. Also requires the client -# certificate to be configured. -# CLI flag: -.tls-key-path -[tls_key_path: | default = ""] - -# Path to the CA certificates to validate server certificate against. If not -# set, the host's root CA certificates are used. -# CLI flag: -.tls-ca-path -[tls_ca_path: | default = ""] - -# Override the expected name on the server certificate. -# CLI flag: -.tls-server-name -[tls_server_name: | default = ""] - -# Skip validating server certificate. -# CLI flag: -.tls-insecure-skip-verify -[tls_insecure_skip_verify: | default = false] - -# Override the default cipher suite list (separated by commas). Allowed values: -# -# Secure Ciphers: -# - TLS_AES_128_GCM_SHA256 -# - TLS_AES_256_GCM_SHA384 -# - TLS_CHACHA20_POLY1305_SHA256 -# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA -# - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA -# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA -# - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA -# - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 -# - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 -# - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 -# - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 -# - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 -# - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 -# -# Insecure Ciphers: -# - TLS_RSA_WITH_RC4_128_SHA -# - TLS_RSA_WITH_3DES_EDE_CBC_SHA -# - TLS_RSA_WITH_AES_128_CBC_SHA -# - TLS_RSA_WITH_AES_256_CBC_SHA -# - TLS_RSA_WITH_AES_128_CBC_SHA256 -# - TLS_RSA_WITH_AES_128_GCM_SHA256 -# - TLS_RSA_WITH_AES_256_GCM_SHA384 -# - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA -# - TLS_ECDHE_RSA_WITH_RC4_128_SHA -# - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA -# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 -# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 -# CLI flag: -.tls-cipher-suites -[tls_cipher_suites: | default = ""] - -# Override the default minimum TLS version. Allowed values: VersionTLS10, -# VersionTLS11, VersionTLS12, VersionTLS13 -# CLI flag: -.tls-min-version -[tls_min_version: | default = ""] +# The TLS configuration. +# The CLI flags prefix for this block configuration is: +# tsdb.shipper.index-gateway-client.grpc +[] # The maximum amount of time to establish a connection. A value of 0 means # default gRPC client connect timeout and backoff. @@ -4142,66 +3798,9 @@ When a memberlist config with atleast 1 join_members is defined, kvstore of type # CLI flag: -memberlist.tls-enabled [tls_enabled: | default = false] -# Path to the client certificate, which will be used for authenticating with the -# server. Also requires the key path to be configured. -# CLI flag: -memberlist.tls-cert-path -[tls_cert_path: | default = ""] - -# Path to the key for the client certificate. Also requires the client -# certificate to be configured. -# CLI flag: -memberlist.tls-key-path -[tls_key_path: | default = ""] - -# Path to the CA certificates to validate server certificate against. If not -# set, the host's root CA certificates are used. -# CLI flag: -memberlist.tls-ca-path -[tls_ca_path: | default = ""] - -# Override the expected name on the server certificate. -# CLI flag: -memberlist.tls-server-name -[tls_server_name: | default = ""] - -# Skip validating server certificate. -# CLI flag: -memberlist.tls-insecure-skip-verify -[tls_insecure_skip_verify: | default = false] - -# Override the default cipher suite list (separated by commas). Allowed values: -# -# Secure Ciphers: -# - TLS_AES_128_GCM_SHA256 -# - TLS_AES_256_GCM_SHA384 -# - TLS_CHACHA20_POLY1305_SHA256 -# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA -# - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA -# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA -# - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA -# - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 -# - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 -# - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 -# - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 -# - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 -# - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 -# -# Insecure Ciphers: -# - TLS_RSA_WITH_RC4_128_SHA -# - TLS_RSA_WITH_3DES_EDE_CBC_SHA -# - TLS_RSA_WITH_AES_128_CBC_SHA -# - TLS_RSA_WITH_AES_256_CBC_SHA -# - TLS_RSA_WITH_AES_128_CBC_SHA256 -# - TLS_RSA_WITH_AES_128_GCM_SHA256 -# - TLS_RSA_WITH_AES_256_GCM_SHA384 -# - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA -# - TLS_ECDHE_RSA_WITH_RC4_128_SHA -# - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA -# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 -# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 -# CLI flag: -memberlist.tls-cipher-suites -[tls_cipher_suites: | default = ""] - -# Override the default minimum TLS version. Allowed values: VersionTLS10, -# VersionTLS11, VersionTLS12, VersionTLS13 -# CLI flag: -memberlist.tls-min-version -[tls_min_version: | default = ""] +# The TLS configuration. +# The CLI flags prefix for this block configuration is: memberlist +[] ``` ### named_stores_config @@ -4708,7 +4307,7 @@ storage: [gcs: ] # Configures backend rule storage for S3. - # The CLI flags prefix for this block configuration is: ruler + # The CLI flags prefix for this block configuration is: ruler.storage [s3: ] # Configures backend rule storage for Baidu Object Storage (BOS). @@ -4765,67 +4364,10 @@ storage: [notification_timeout: | default = 10s] alertmanager_client: - # Path to the client certificate, which will be used for authenticating with - # the server. Also requires the key path to be configured. - # CLI flag: -ruler.alertmanager-client.tls-cert-path - [tls_cert_path: | default = ""] - - # Path to the key for the client certificate. Also requires the client - # certificate to be configured. - # CLI flag: -ruler.alertmanager-client.tls-key-path - [tls_key_path: | default = ""] - - # Path to the CA certificates to validate server certificate against. If not - # set, the host's root CA certificates are used. - # CLI flag: -ruler.alertmanager-client.tls-ca-path - [tls_ca_path: | default = ""] - - # Override the expected name on the server certificate. - # CLI flag: -ruler.alertmanager-client.tls-server-name - [tls_server_name: | default = ""] - - # Skip validating server certificate. - # CLI flag: -ruler.alertmanager-client.tls-insecure-skip-verify - [tls_insecure_skip_verify: | default = false] - - # Override the default cipher suite list (separated by commas). Allowed - # values: - # - # Secure Ciphers: - # - TLS_AES_128_GCM_SHA256 - # - TLS_AES_256_GCM_SHA384 - # - TLS_CHACHA20_POLY1305_SHA256 - # - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA - # - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA - # - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA - # - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA - # - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - # - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - # - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - # - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - # - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 - # - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 - # - # Insecure Ciphers: - # - TLS_RSA_WITH_RC4_128_SHA - # - TLS_RSA_WITH_3DES_EDE_CBC_SHA - # - TLS_RSA_WITH_AES_128_CBC_SHA - # - TLS_RSA_WITH_AES_256_CBC_SHA - # - TLS_RSA_WITH_AES_128_CBC_SHA256 - # - TLS_RSA_WITH_AES_128_GCM_SHA256 - # - TLS_RSA_WITH_AES_256_GCM_SHA384 - # - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA - # - TLS_ECDHE_RSA_WITH_RC4_128_SHA - # - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA - # - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 - # - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 - # CLI flag: -ruler.alertmanager-client.tls-cipher-suites - [tls_cipher_suites: | default = ""] - - # Override the default minimum TLS version. Allowed values: VersionTLS10, - # VersionTLS11, VersionTLS12, VersionTLS13 - # CLI flag: -ruler.alertmanager-client.tls-min-version - [tls_min_version: | default = ""] + # The TLS configuration. + # The CLI flags prefix for this block configuration is: + # ruler.alertmanager-client + [] # HTTP Basic authentication username. It overrides the username set in the URL # (if any). @@ -5047,67 +4589,10 @@ evaluation: # CLI flag: -ruler.evaluation.query-frontend.tls-enabled [tls_enabled: | default = false] - # Path to the client certificate, which will be used for authenticating with - # the server. Also requires the key path to be configured. - # CLI flag: -ruler.evaluation.query-frontend.tls-cert-path - [tls_cert_path: | default = ""] - - # Path to the key for the client certificate. Also requires the client - # certificate to be configured. - # CLI flag: -ruler.evaluation.query-frontend.tls-key-path - [tls_key_path: | default = ""] - - # Path to the CA certificates to validate server certificate against. If not - # set, the host's root CA certificates are used. - # CLI flag: -ruler.evaluation.query-frontend.tls-ca-path - [tls_ca_path: | default = ""] - - # Override the expected name on the server certificate. - # CLI flag: -ruler.evaluation.query-frontend.tls-server-name - [tls_server_name: | default = ""] - - # Skip validating server certificate. - # CLI flag: -ruler.evaluation.query-frontend.tls-insecure-skip-verify - [tls_insecure_skip_verify: | default = false] - - # Override the default cipher suite list (separated by commas). Allowed - # values: - # - # Secure Ciphers: - # - TLS_AES_128_GCM_SHA256 - # - TLS_AES_256_GCM_SHA384 - # - TLS_CHACHA20_POLY1305_SHA256 - # - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA - # - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA - # - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA - # - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA - # - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - # - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - # - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - # - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - # - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 - # - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 - # - # Insecure Ciphers: - # - TLS_RSA_WITH_RC4_128_SHA - # - TLS_RSA_WITH_3DES_EDE_CBC_SHA - # - TLS_RSA_WITH_AES_128_CBC_SHA - # - TLS_RSA_WITH_AES_256_CBC_SHA - # - TLS_RSA_WITH_AES_128_CBC_SHA256 - # - TLS_RSA_WITH_AES_128_GCM_SHA256 - # - TLS_RSA_WITH_AES_256_GCM_SHA384 - # - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA - # - TLS_ECDHE_RSA_WITH_RC4_128_SHA - # - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA - # - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 - # - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 - # CLI flag: -ruler.evaluation.query-frontend.tls-cipher-suites - [tls_cipher_suites: | default = ""] - - # Override the default minimum TLS version. Allowed values: VersionTLS10, - # VersionTLS11, VersionTLS12, VersionTLS13 - # CLI flag: -ruler.evaluation.query-frontend.tls-min-version - [tls_min_version: | default = ""] + # The TLS configuration. + # The CLI flags prefix for this block configuration is: + # ruler.evaluation.query-frontend + [] ``` ### runtime_config @@ -5129,8 +4614,8 @@ Configuration for 'runtime config' module, responsible for reloading runtime con The `s3_storage_config` block configures the connection to Amazon S3 object storage backend. The supported CLI flags `` used to reference this configuration block are: -- `common` -- `ruler` +- `common.storage` +- `ruler.storage`   @@ -5138,106 +4623,106 @@ The `s3_storage_config` block configures the connection to Amazon S3 object stor # S3 endpoint URL with escaped Key and Secret encoded. If only region is # specified as a host, proper endpoint will be deduced. Use # inmemory:/// to use a mock in-memory implementation. -# CLI flag: -.storage.s3.url +# CLI flag: -.s3.url [s3: ] # Set this to `true` to force the request to use path-style addressing. -# CLI flag: -.storage.s3.force-path-style +# CLI flag: -.s3.force-path-style [s3forcepathstyle: | default = false] # Comma separated list of bucket names to evenly distribute chunks over. # Overrides any buckets specified in s3.url flag -# CLI flag: -.storage.s3.buckets +# CLI flag: -.s3.buckets [bucketnames: | default = ""] # S3 Endpoint to connect to. -# CLI flag: -.storage.s3.endpoint +# CLI flag: -.s3.endpoint [endpoint: | default = ""] # AWS region to use. -# CLI flag: -.storage.s3.region +# CLI flag: -.s3.region [region: | default = ""] # AWS Access Key ID -# CLI flag: -.storage.s3.access-key-id +# CLI flag: -.s3.access-key-id [access_key_id: | default = ""] # AWS Secret Access Key -# CLI flag: -.storage.s3.secret-access-key +# CLI flag: -.s3.secret-access-key [secret_access_key: | default = ""] # AWS Session Token -# CLI flag: -.storage.s3.session-token +# CLI flag: -.s3.session-token [session_token: | default = ""] # Disable https on s3 connection. -# CLI flag: -.storage.s3.insecure +# CLI flag: -.s3.insecure [insecure: | default = false] http_config: # Timeout specifies a time limit for requests made by s3 Client. - # CLI flag: -.storage.s3.http.timeout + # CLI flag: -.s3.http.timeout [timeout: | default = 0s] # The maximum amount of time an idle connection will be held open. - # CLI flag: -.storage.s3.http.idle-conn-timeout + # CLI flag: -.s3.http.idle-conn-timeout [idle_conn_timeout: | default = 1m30s] # If non-zero, specifies the amount of time to wait for a server's response # headers after fully writing the request. - # CLI flag: -.storage.s3.http.response-header-timeout + # CLI flag: -.s3.http.response-header-timeout [response_header_timeout: | default = 0s] # Set to true to skip verifying the certificate chain and hostname. - # CLI flag: -.storage.s3.http.insecure-skip-verify + # CLI flag: -.s3.http.insecure-skip-verify [insecure_skip_verify: | default = false] # Path to the trusted CA file that signed the SSL certificate of the S3 # endpoint. - # CLI flag: -.storage.s3.http.ca-file + # CLI flag: -.s3.http.ca-file [ca_file: | default = ""] # The signature version to use for authenticating against S3. Supported values # are: v4. -# CLI flag: -.storage.s3.signature-version +# CLI flag: -.s3.signature-version [signature_version: | default = "v4"] # The S3 storage class which objects will use. Supported values are: GLACIER, # DEEP_ARCHIVE, GLACIER_IR, INTELLIGENT_TIERING, ONEZONE_IA, OUTPOSTS, # REDUCED_REDUNDANCY, STANDARD, STANDARD_IA. -# CLI flag: -.storage.s3.storage-class +# CLI flag: -.s3.storage-class [storage_class: | default = "STANDARD"] sse: # Enable AWS Server Side Encryption. Supported values: SSE-KMS, SSE-S3. - # CLI flag: -.storage.s3.sse.type + # CLI flag: -.s3.sse.type [type: | default = ""] # KMS Key ID used to encrypt objects in S3 - # CLI flag: -.storage.s3.sse.kms-key-id + # CLI flag: -.s3.sse.kms-key-id [kms_key_id: | default = ""] # KMS Encryption Context used for object encryption. It expects JSON formatted # string. - # CLI flag: -.storage.s3.sse.kms-encryption-context + # CLI flag: -.s3.sse.kms-encryption-context [kms_encryption_context: | default = ""] # Configures back off when S3 get Object. backoff_config: # Minimum backoff time when s3 get Object - # CLI flag: -.storage.s3.min-backoff + # CLI flag: -.s3.min-backoff [min_period: | default = 100ms] # Maximum backoff time when s3 get Object - # CLI flag: -.storage.s3.max-backoff + # CLI flag: -.s3.max-backoff [max_period: | default = 3s] # Maximum number of times to retry for s3 GetObject or ObjectExists - # CLI flag: -.storage.s3.max-retries + # CLI flag: -.s3.max-retries [max_retries: | default = 5] # Disable forcing S3 dualstack endpoint usage. -# CLI flag: -.storage.s3.disable-dualstack +# CLI flag: -.s3.disable-dualstack [disable_dualstack: | default = false] ``` @@ -5800,6 +5285,33 @@ congestion_control: # CLI flag: -store.max-parallel-get-chunk [max_parallel_get_chunk: | default = 150] +# Enables the use of thanos-io/objstore clients for connecting to object +# storage. When set to true, the configuration inside +# `storage_config.object_store` or `common.storage.object_store` block takes +# effect. +# CLI flag: -use-thanos-objstore +[use_thanos_objstore: | default = false] + +object_store: + # The thanos_object_store_config block configures the connection to object + # storage backend using thanos-io/objstore clients. This will become the + # default way of configuring object store clients in future releases. + # Currently this is opt-in and takes effect only when `-use-thanos-objstore` + # is set to true. + # The CLI flags prefix for this block configuration is: object-store + [] + + named_stores: + [azure: ] + + [filesystem: ] + + [gcs: ] + + [s3: ] + + [swift: ] + # The maximum number of chunks to fetch per batch. # CLI flag: -store.max-chunk-batch-size [max_chunk_batch_size: | default = 50] @@ -6403,12 +5915,481 @@ chunk_tables_provisioning: [inactive_read_scale_lastn: | default = 4] ``` +### thanos_object_store_config + +The `thanos_object_store_config` block configures the connection to object storage backend using thanos-io/objstore clients. This will become the default way of configuring object store clients in future releases. +Currently this is opt-in and takes effect only when `-use-thanos-objstore` is set to true. The supported CLI flags `` used to reference this configuration block are: + +- `common.storage.object-store` +- `object-store` +- `ruler-storage` + +  + +```yaml +s3: + # The S3 bucket endpoint. It could be an AWS S3 endpoint listed at + # https://docs.aws.amazon.com/general/latest/gr/s3.html or the address of an + # S3-compatible service in hostname:port format. + # CLI flag: -.s3.endpoint + [endpoint: | default = ""] + + # S3 region. If unset, the client will issue a S3 GetBucketLocation API call + # to autodetect it. + # CLI flag: -.s3.region + [region: | default = ""] + + # S3 bucket name + # CLI flag: -.s3.bucket-name + [bucket_name: | default = ""] + + # S3 secret access key + # CLI flag: -.s3.secret-access-key + [secret_access_key: | default = ""] + + # S3 access key ID + # CLI flag: -.s3.access-key-id + [access_key_id: | default = ""] + + # S3 session token + # CLI flag: -.s3.session-token + [session_token: | default = ""] + + # If enabled, use http:// for the S3 endpoint instead of https://. This could + # be useful in local dev/test environments while using an S3-compatible + # backend storage, like Minio. + # CLI flag: -.s3.insecure + [insecure: | default = false] + + # Use a specific version of the S3 list object API. Supported values are v1 or + # v2. Default is unset. + # CLI flag: -.s3.list-objects-version + [list_objects_version: | default = ""] + + # Bucket lookup style type, used to access bucket in S3-compatible service. + # Default is auto. Supported values are: auto, path, virtual-hosted. + # CLI flag: -.s3.bucket-lookup-type + [bucket_lookup_type: | default = auto] + + # When enabled, direct all AWS S3 requests to the dual-stack IPv4/IPv6 + # endpoint for the configured region. + # CLI flag: -.s3.dualstack-enabled + [dualstack_enabled: | default = true] + + # The S3 storage class to use, not set by default. Details can be found at + # https://aws.amazon.com/s3/storage-classes/. Supported values are: STANDARD, + # REDUCED_REDUNDANCY, GLACIER, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, + # DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR, SNOW, EXPRESS_ONEZONE + # CLI flag: -.s3.storage-class + [storage_class: | default = ""] + + # If enabled, it will use the default authentication methods of the AWS SDK + # for go based on known environment variables and known AWS config files. + # CLI flag: -.s3.native-aws-auth-enabled + [native_aws_auth_enabled: | default = false] + + # The minimum file size in bytes used for multipart uploads. If 0, the value + # is optimally computed for each object. + # CLI flag: -.s3.part-size + [part_size: | default = 0] + + # If enabled, a Content-MD5 header is sent with S3 Put Object requests. + # Consumes more resources to compute the MD5, but may improve compatibility + # with object storage services that do not support checksums. + # CLI flag: -.s3.send-content-md5 + [send_content_md5: | default = false] + + # Accessing S3 resources using temporary, secure credentials provided by AWS + # Security Token Service. + # CLI flag: -.s3.sts-endpoint + [sts_endpoint: | default = ""] + + # The maximum number of retries for S3 requests that are retryable. Default is + # 10, set this to 1 to disable retries. + # CLI flag: -.s3.max-retries + [max_retries: | default = 10] + + sse: + # Enable AWS Server Side Encryption. Supported values: SSE-KMS, SSE-S3. + # CLI flag: -.s3.sse.type + [type: | default = ""] + + # KMS Key ID used to encrypt objects in S3 + # CLI flag: -.s3.sse.kms-key-id + [kms_key_id: | default = ""] + + # KMS Encryption Context used for object encryption. It expects JSON + # formatted string. + # CLI flag: -.s3.sse.kms-encryption-context + [kms_encryption_context: | default = ""] + + http: + # The time an idle connection will remain idle before closing. + # CLI flag: -.s3.http.idle-conn-timeout + [idle_conn_timeout: | default = 1m30s] + + # The amount of time the client will wait for a servers response headers. + # CLI flag: -.s3.http.response-header-timeout + [response_header_timeout: | default = 2m] + + # If the client connects via HTTPS and this option is enabled, the client + # will accept any certificate and hostname. + # CLI flag: -.s3.http.insecure-skip-verify + [insecure_skip_verify: | default = false] + + # Maximum time to wait for a TLS handshake. 0 means no limit. + # CLI flag: -.s3.tls-handshake-timeout + [tls_handshake_timeout: | default = 10s] + + # The time to wait for a server's first response headers after fully writing + # the request headers if the request has an Expect header. 0 to send the + # request body immediately. + # CLI flag: -.s3.expect-continue-timeout + [expect_continue_timeout: | default = 1s] + + # Maximum number of idle (keep-alive) connections across all hosts. 0 means + # no limit. + # CLI flag: -.s3.max-idle-connections + [max_idle_connections: | default = 100] + + # Maximum number of idle (keep-alive) connections to keep per-host. If 0, a + # built-in default value is used. + # CLI flag: -.s3.max-idle-connections-per-host + [max_idle_connections_per_host: | default = 100] + + # Maximum number of connections per host. 0 means no limit. + # CLI flag: -.s3.max-connections-per-host + [max_connections_per_host: | default = 0] + + # Path to the CA certificates to validate server certificate against. If not + # set, the host's root CA certificates are used. + # CLI flag: -.s3.http.tls-ca-path + [tls_ca_path: | default = ""] + + # Path to the client certificate, which will be used for authenticating with + # the server. Also requires the key path to be configured. + # CLI flag: -.s3.http.tls-cert-path + [tls_cert_path: | default = ""] + + # Path to the key for the client certificate. Also requires the client + # certificate to be configured. + # CLI flag: -.s3.http.tls-key-path + [tls_key_path: | default = ""] + + # Override the expected name on the server certificate. + # CLI flag: -.s3.http.tls-server-name + [tls_server_name: | default = ""] + + trace: + # When enabled, low-level S3 HTTP operation information is logged at the + # debug level. + # CLI flag: -.s3.trace.enabled + [enabled: | default = false] + +gcs: + # GCS bucket name + # CLI flag: -.gcs.bucket-name + [bucket_name: | default = ""] + + # JSON either from a Google Developers Console client_credentials.json file, + # or a Google Developers service account key. Needs to be valid JSON, not a + # filesystem path. If empty, fallback to Google default logic: + # 1. A JSON file whose path is specified by the GOOGLE_APPLICATION_CREDENTIALS + # environment variable. For workload identity federation, refer to + # https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation + # on how to generate the JSON configuration file for on-prem/non-Google cloud + # platforms. + # 2. A JSON file in a location known to the gcloud command-line tool: + # $HOME/.config/gcloud/application_default_credentials.json. + # 3. On Google Compute Engine it fetches credentials from the metadata server. + # CLI flag: -.gcs.service-account + [service_account: | default = ""] + + # The maximum size of the buffer that GCS client for a single PUT request. 0 + # to disable buffering. + # CLI flag: -.gcs.chunk-buffer-size + [chunk_buffer_size: | default = 0] + + # The maximum number of retries for idempotent operations. Overrides the + # default gcs storage client behavior if this value is greater than 0. Set + # this to 1 to disable retries. + # CLI flag: -.gcs.max-retries + [max_retries: | default = 10] + +azure: + # Azure storage account name + # CLI flag: -.azure.account-name + [account_name: | default = ""] + + # Azure storage account key. If unset, Azure managed identities will be used + # for authentication instead. + # CLI flag: -.azure.account-key + [account_key: | default = ""] + + # If `connection-string` is set, the value of `endpoint-suffix` will not be + # used. Use this method over `account-key` if you need to authenticate via a + # SAS token. Or if you use the Azurite emulator. + # CLI flag: -.azure.connection-string + [connection_string: | default = ""] + + # Azure storage container name + # CLI flag: -.azure.container-name + [container_name: | default = ""] + + # Azure storage endpoint suffix without schema. The account name will be + # prefixed to this value to create the FQDN. If set to empty string, default + # endpoint suffix is used. + # CLI flag: -.azure.endpoint-suffix + [endpoint_suffix: | default = ""] + + # Number of retries for recoverable errors + # CLI flag: -.azure.max-retries + [max_retries: | default = 20] + + # User assigned managed identity. If empty, then System assigned identity is + # used. + # CLI flag: -.azure.user-assigned-id + [user_assigned_id: | default = ""] + + # Delimiter used to replace ':' in chunk IDs when storing chunks + # CLI flag: -.azure.chunk-delimiter + [chunk_delimiter: | default = "-"] + +swift: + # OpenStack Swift application credential id + # CLI flag: -.swift.application-credential-id + [application_credential_id: | default = ""] + + # OpenStack Swift application credential name + # CLI flag: -.swift.application-credential-name + [application_credential_name: | default = ""] + + # OpenStack Swift application credential secret + # CLI flag: -.swift.application-credential-secret + [application_credential_secret: | default = ""] + + # OpenStack Swift authentication API version. 0 to autodetect. + # CLI flag: -.swift.auth-version + [auth_version: | default = 0] + + # OpenStack Swift authentication URL + # CLI flag: -.swift.auth-url + [auth_url: | default = ""] + + # OpenStack Swift username. + # CLI flag: -.swift.username + [username: | default = ""] + + # OpenStack Swift user's domain name. + # CLI flag: -.swift.user-domain-name + [user_domain_name: | default = ""] + + # OpenStack Swift user's domain ID. + # CLI flag: -.swift.user-domain-id + [user_domain_id: | default = ""] + + # OpenStack Swift user ID. + # CLI flag: -.swift.user-id + [user_id: | default = ""] + + # OpenStack Swift API key. + # CLI flag: -.swift.password + [password: | default = ""] + + # OpenStack Swift user's domain ID. + # CLI flag: -.swift.domain-id + [domain_id: | default = ""] + + # OpenStack Swift user's domain name. + # CLI flag: -.swift.domain-name + [domain_name: | default = ""] + + # OpenStack Swift project ID (v2,v3 auth only). + # CLI flag: -.swift.project-id + [project_id: | default = ""] + + # OpenStack Swift project name (v2,v3 auth only). + # CLI flag: -.swift.project-name + [project_name: | default = ""] + + # ID of the OpenStack Swift project's domain (v3 auth only), only needed if it + # differs the from user domain. + # CLI flag: -.swift.project-domain-id + [project_domain_id: | default = ""] + + # Name of the OpenStack Swift project's domain (v3 auth only), only needed if + # it differs from the user domain. + # CLI flag: -.swift.project-domain-name + [project_domain_name: | default = ""] + + # OpenStack Swift Region to use (v2,v3 auth only). + # CLI flag: -.swift.region-name + [region_name: | default = ""] + + # Name of the OpenStack Swift container to put chunks in. + # CLI flag: -.swift.container-name + [container_name: | default = ""] + + # Max retries on requests error. + # CLI flag: -.swift.max-retries + [max_retries: | default = 3] + + # Time after which a connection attempt is aborted. + # CLI flag: -.swift.connect-timeout + [connect_timeout: | default = 10s] + + # Time after which an idle request is aborted. The timeout watchdog is reset + # each time some data is received, so the timeout triggers after X time no + # data is received on a request. + # CLI flag: -.swift.request-timeout + [request_timeout: | default = 5s] + + http: + # The time an idle connection will remain idle before closing. + # CLI flag: -.swift.http.idle-conn-timeout + [idle_conn_timeout: | default = 1m30s] + + # The amount of time the client will wait for a servers response headers. + # CLI flag: -.swift.http.response-header-timeout + [response_header_timeout: | default = 2m] + + # If the client connects via HTTPS and this option is enabled, the client + # will accept any certificate and hostname. + # CLI flag: -.swift.http.insecure-skip-verify + [insecure_skip_verify: | default = false] + + # Maximum time to wait for a TLS handshake. 0 means no limit. + # CLI flag: -.swift.tls-handshake-timeout + [tls_handshake_timeout: | default = 10s] + + # The time to wait for a server's first response headers after fully writing + # the request headers if the request has an Expect header. 0 to send the + # request body immediately. + # CLI flag: -.swift.expect-continue-timeout + [expect_continue_timeout: | default = 1s] + + # Maximum number of idle (keep-alive) connections across all hosts. 0 means + # no limit. + # CLI flag: -.swift.max-idle-connections + [max_idle_connections: | default = 100] + + # Maximum number of idle (keep-alive) connections to keep per-host. If 0, a + # built-in default value is used. + # CLI flag: -.swift.max-idle-connections-per-host + [max_idle_connections_per_host: | default = 100] + + # Maximum number of connections per host. 0 means no limit. + # CLI flag: -.swift.max-connections-per-host + [max_connections_per_host: | default = 0] + + # Path to the CA certificates to validate server certificate against. If not + # set, the host's root CA certificates are used. + # CLI flag: -.swift.http.tls-ca-path + [tls_ca_path: | default = ""] + + # Path to the client certificate, which will be used for authenticating with + # the server. Also requires the key path to be configured. + # CLI flag: -.swift.http.tls-cert-path + [tls_cert_path: | default = ""] + + # Path to the key for the client certificate. Also requires the client + # certificate to be configured. + # CLI flag: -.swift.http.tls-key-path + [tls_key_path: | default = ""] + + # Override the expected name on the server certificate. + # CLI flag: -.swift.http.tls-server-name + [tls_server_name: | default = ""] + +filesystem: + # Local filesystem storage directory. + # CLI flag: -.filesystem.dir + [dir: | default = ""] + +alibaba: + # Endpoint to connect to. + # CLI flag: -.oss.endpoint + [endpoint: | default = ""] + + # Name of OSS bucket. + # CLI flag: -.oss.bucketname + [bucket: | default = ""] + + # alibabacloud Access Key ID + # CLI flag: -.oss.access-key-id + [access_key_id: | default = ""] + + # alibabacloud Secret Access Key + # CLI flag: -.oss.access-key-secret + [access_key_secret: | default = ""] + +bos: + # Name of BOS bucket. + # CLI flag: -.bos.bucket + [bucket: | default = ""] + + # BOS endpoint to connect to. + # CLI flag: -.bos.endpoint + [endpoint: | default = ""] + + # Baidu Cloud Engine (BCE) Access Key ID. + # CLI flag: -.bos.access-key + [access_key: | default = ""] + + # Baidu Cloud Engine (BCE) Secret Access Key. + # CLI flag: -.bos.secret-key + [secret_key: | default = ""] + +# Prefix for all objects stored in the backend storage. For simplicity, it may +# only contain digits and English alphabet letters. +# CLI flag: -.storage-prefix +[storage_prefix: | default = ""] +``` + ### tls_config The TLS configuration. The supported CLI flags `` used to reference this configuration block are: +- `bigtable` +- `blockbuilder.scheduler-grpc-client` +- `bloom-build.builder.grpc` +- `bloom-gateway-client.grpc` +- `bloom.metas-cache.memcached` +- `boltdb.shipper.index-gateway-client.grpc` +- `common.storage.ring.etcd` +- `compactor.grpc-client` +- `compactor.ring.etcd` +- `distributor.ring.etcd` +- `etcd` +- `frontend.grpc-client-config` +- `frontend.index-stats-results-cache.memcached` +- `frontend.instant-metric-results-cache.memcached` +- `frontend.label-results-cache.memcached` +- `frontend.memcached` +- `frontend.series-results-cache.memcached` - `frontend.tail-tls-config` +- `frontend.volume-results-cache.memcached` +- `index-gateway.ring.etcd` +- `ingester.client` +- `ingester.partition-ring.etcd` +- `memberlist` +- `pattern-ingester.client` +- `pattern-ingester.etcd` +- `querier.frontend-client` +- `querier.frontend-grpc-client` +- `querier.scheduler-grpc-client` +- `query-scheduler.grpc-client-config` +- `query-scheduler.ring.etcd` - `reporting.tls-config` +- `ruler.alertmanager-client` +- `ruler.client` +- `ruler.evaluation.query-frontend` +- `ruler.ring.etcd` +- `store.chunks-cache-l2.memcached` +- `store.chunks-cache.memcached` +- `store.index-cache-read.memcached` +- `store.index-cache-write.memcached` +- `tsdb.shipper.index-gateway-client.grpc`   diff --git a/go.mod b/go.mod index be67a0b5d664f..f6b7dedfb55b7 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ toolchain go1.23.1 require ( cloud.google.com/go/bigtable v1.35.0 - cloud.google.com/go/pubsub v1.46.0 + cloud.google.com/go/pubsub v1.47.0 cloud.google.com/go/storage v1.50.0 dario.cat/mergo v1.0.1 github.com/Azure/azure-pipeline-go v0.2.3 @@ -134,7 +134,7 @@ require ( github.com/prometheus/common/sigv4 v0.1.0 github.com/richardartoul/molecule v1.0.0 github.com/schollz/progressbar/v3 v3.18.0 - github.com/shirou/gopsutil/v4 v4.24.12 + github.com/shirou/gopsutil/v4 v4.25.1 github.com/thanos-io/objstore v0.0.0-20250115091151-a54d0f04b42a github.com/twmb/franz-go v1.18.1 github.com/twmb/franz-go/pkg/kadm v1.15.0 @@ -143,7 +143,7 @@ require ( github.com/twmb/franz-go/plugin/kotel v1.5.0 github.com/twmb/franz-go/plugin/kprom v1.1.0 github.com/willf/bloom v2.0.3+incompatible - go.opentelemetry.io/collector/pdata v1.24.0 + go.opentelemetry.io/collector/pdata v1.25.0 go4.org/netipx v0.0.0-20230125063823-8449b0a6169f golang.org/x/oauth2 v0.25.0 golang.org/x/text v0.21.0 @@ -166,7 +166,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6 // indirect github.com/containerd/containerd v1.7.25 // indirect github.com/dlclark/regexp2 v1.11.4 // indirect - github.com/ebitengine/purego v0.8.1 // indirect + github.com/ebitengine/purego v0.8.2 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/gabriel-vasile/mimetype v1.4.4 // indirect github.com/go-ini/ini v1.67.0 // indirect @@ -203,7 +203,7 @@ require ( ) require ( - cloud.google.com/go v0.118.0 // indirect + cloud.google.com/go v0.118.1 // indirect cloud.google.com/go/compute/metadata v0.6.0 // indirect cloud.google.com/go/iam v1.3.1 // indirect cloud.google.com/go/longrunning v0.6.4 // indirect @@ -408,3 +408,5 @@ replace github.com/grafana/loki/pkg/push => ./pkg/push // leodido fork his project to continue support replace github.com/influxdata/go-syslog/v3 => github.com/leodido/go-syslog/v4 v4.2.0 + +replace github.com/thanos-io/objstore => github.com/grafana/objstore v0.0.0-20250128154815-d7e99f81f866 diff --git a/go.sum b/go.sum index 4e482158459e9..ce878f9db4506 100644 --- a/go.sum +++ b/go.sum @@ -15,8 +15,8 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.118.0 h1:tvZe1mgqRxpiVa3XlIGMiPcEUbP1gNXELgD4y/IXmeQ= -cloud.google.com/go v0.118.0/go.mod h1:zIt2pkedt/mo+DQjcT4/L3NDxzHPR29j5HcclNH+9PM= +cloud.google.com/go v0.118.1 h1:b8RATMcrK9A4BH0rj8yQupPXp+aP+cJ0l6H7V9osV1E= +cloud.google.com/go v0.118.1/go.mod h1:CFO4UPEPi8oV21xoezZCrd3d81K4fFkDTEJu4R8K+9M= cloud.google.com/go/auth v0.14.0 h1:A5C4dKV/Spdvxcl0ggWwWEzzP7AZMJSEIgrkngwhGYM= cloud.google.com/go/auth v0.14.0/go.mod h1:CYsoRL1PdiDuqeQpZE0bP2pnPrGqFcOkI0nldEQis+A= cloud.google.com/go/auth/oauth2adapt v0.2.7 h1:/Lc7xODdqcEw8IrZ9SvwnlLX6j9FHQM74z6cBk9Rw6M= @@ -47,8 +47,8 @@ cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2k cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/pubsub v1.46.0 h1:C4EzOr0VuaesIFVFoYTcpCmXIyPVDPtvD3DEyn1Sqew= -cloud.google.com/go/pubsub v1.46.0/go.mod h1:qdGehMCX7Qnuoo8EhzKLXgbm3x8a8BVxfm/c06ZnRyE= +cloud.google.com/go/pubsub v1.47.0 h1:Ou2Qu4INnf7ykrFjGv2ntFOjVo8Nloh/+OffF4mUu9w= +cloud.google.com/go/pubsub v1.47.0/go.mod h1:LaENesmga+2u0nDtLkIOILskxsfvn/BXX9Ak1NFxOs8= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= @@ -356,8 +356,8 @@ github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 h1:Oy0F4A github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0= github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/ebitengine/purego v0.8.1 h1:sdRKd6plj7KYW33EH5As6YKfe8m9zbN9JMrOjNVF/BE= -github.com/ebitengine/purego v0.8.1/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ= +github.com/ebitengine/purego v0.8.2 h1:jPPGWs2sZ1UgOSgD2bClL0MJIqu58nOmIcBuXr62z1I= +github.com/ebitengine/purego v0.8.2/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ= github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= @@ -628,6 +628,8 @@ github.com/grafana/jsonparser v0.0.0-20241004153430-023329977675 h1:U94jQ2TQr1m3 github.com/grafana/jsonparser v0.0.0-20241004153430-023329977675/go.mod h1:796sq+UcONnSlzA3RtlBZ+b/hrerkZXiEmO8oMjyRwY= github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe h1:yIXAAbLswn7VNWBIvM71O2QsgfgW9fRXZNR0DXe6pDU= github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/grafana/objstore v0.0.0-20250128154815-d7e99f81f866 h1:/y3qC0I9kttHjLPxp4bGf+4jcJw60C6hrokTPckHYT8= +github.com/grafana/objstore v0.0.0-20250128154815-d7e99f81f866/go.mod h1:Quz9HUDjGidU0RQpoytzK4KqJ7kwzP+DMAm4K57/usM= github.com/grafana/pyroscope-go/godeltaprof v0.1.8 h1:iwOtYXeeVSAeYefJNaxDytgjKtUuKQbJqgAIjlnicKg= github.com/grafana/pyroscope-go/godeltaprof v0.1.8/go.mod h1:2+l7K7twW49Ct4wFluZD3tZ6e0SjanjcUUBPVD/UuGU= github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248= @@ -1065,8 +1067,8 @@ github.com/segmentio/fasthash v1.0.3 h1:EI9+KE1EwvMLBWwjpRDc+fEM+prwxDYbslddQGtr github.com/segmentio/fasthash v1.0.3/go.mod h1:waKX8l2N8yckOgmSsXJi7x1ZfdKZ4x7KRMzBtS3oedY= github.com/sercand/kuberesolver/v5 v5.1.1 h1:CYH+d67G0sGBj7q5wLK61yzqJJ8gLLC8aeprPTHb6yY= github.com/sercand/kuberesolver/v5 v5.1.1/go.mod h1:Fs1KbKhVRnB2aDWN12NjKCB+RgYMWZJ294T3BtmVCpQ= -github.com/shirou/gopsutil/v4 v4.24.12 h1:qvePBOk20e0IKA1QXrIIU+jmk+zEiYVVx06WjBRlZo4= -github.com/shirou/gopsutil/v4 v4.24.12/go.mod h1:DCtMPAad2XceTeIAbGyVfycbYQNBGk2P8cvDi7/VN9o= +github.com/shirou/gopsutil/v4 v4.25.1 h1:QSWkTc+fu9LTAWfkZwZ6j8MSUk4A2LV7rbH0ZqmLjXs= +github.com/shirou/gopsutil/v4 v4.25.1/go.mod h1:RoUCUpndaJFtT+2zsZzzmhvbfGoDCJ7nFXKJf8GqJbI= github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c h1:aqg5Vm5dwtvL+YgDpBcK1ITf3o96N/K7/wsRXQnUTEs= @@ -1124,8 +1126,6 @@ github.com/stvp/tempredis v0.0.0-20181119212430-b82af8480203 h1:QVqDTf3h2WHt08Yu github.com/stvp/tempredis v0.0.0-20181119212430-b82af8480203/go.mod h1:oqN97ltKNihBbwlX8dLpwxCl3+HnXKV/R0e+sRLd9C8= github.com/tencentyun/cos-go-sdk-v5 v0.7.40 h1:W6vDGKCHe4wBACI1d2UgE6+50sJFhRWU4O8IB2ozzxM= github.com/tencentyun/cos-go-sdk-v5 v0.7.40/go.mod h1:4dCEtLHGh8QPxHEkgq+nFaky7yZxQuYwgSJM87icDaw= -github.com/thanos-io/objstore v0.0.0-20250115091151-a54d0f04b42a h1:wFBHAmtq1tOLPFaiC4LozyG/BzkRa3ZTmVv1KujUNqk= -github.com/thanos-io/objstore v0.0.0-20250115091151-a54d0f04b42a/go.mod h1:Quz9HUDjGidU0RQpoytzK4KqJ7kwzP+DMAm4K57/usM= github.com/tinylib/msgp v1.1.5/go.mod h1:eQsjooMTnV42mHu917E26IogZ2930nFyBQdofk10Udg= github.com/tklauser/go-sysconf v0.3.13 h1:GBUpcahXSpR2xN01jhkNAbTLRk2Yzgggk8IM08lq3r4= github.com/tklauser/go-sysconf v0.3.13/go.mod h1:zwleP4Q4OehZHGn4CYZDipCgg9usW5IJePewFCGVEa0= @@ -1207,8 +1207,8 @@ go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/collector/pdata v1.24.0 h1:D6j92eAzmAbQgivNBUnt8r9juOl8ugb+ihYynoFZIEg= -go.opentelemetry.io/collector/pdata v1.24.0/go.mod h1:cf3/W9E/uIvPS4MR26SnMFJhraUCattzzM6qusuONuc= +go.opentelemetry.io/collector/pdata v1.25.0 h1:AmgBklQfbfy0lT8qsoJtRuYMZ7ZV3VZvkvhjSDentrg= +go.opentelemetry.io/collector/pdata v1.25.0/go.mod h1:Zs7D4RXOGS7E2faGc/jfWdbmhoiHBxA7QbpuJOioxq8= go.opentelemetry.io/collector/semconv v0.108.1 h1:Txk9tauUnamZaxS5vlf1O0uZ4VD6nioRBR0nX8L/fU4= go.opentelemetry.io/collector/semconv v0.108.1/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A= go.opentelemetry.io/contrib/detectors/gcp v1.33.0 h1:FVPoXEoILwgbZUu4X7YSgsESsAmGRgoYcnXkzgQPhP4= diff --git a/operator/api/loki/v1/v1.go b/operator/api/loki/v1/v1.go index 3c5fa54c60621..58d7f9aa987a6 100644 --- a/operator/api/loki/v1/v1.go +++ b/operator/api/loki/v1/v1.go @@ -104,4 +104,6 @@ var ( ErrSummaryAnnotationMissing = errors.New("rule requires annotation: summary") // ErrDescriptionAnnotationMissing indicates that an alerting rule is missing the description annotation ErrDescriptionAnnotationMissing = errors.New("rule requires annotation: description") + // ErrRuleExclusiveNamespaceLabel indicates that a rule must not specify both kubernetes_namespace_name and k8s_namespace_name labels + ErrRuleExclusiveNamespaceLabel = errors.New("rule must not specify both kubernetes_namespace_name and k8s_namespace_name labels") ) diff --git a/operator/internal/validation/openshift/common.go b/operator/internal/validation/openshift/common.go index 7d70c463fa36d..a5aab7704bb8d 100644 --- a/operator/internal/validation/openshift/common.go +++ b/operator/internal/validation/openshift/common.go @@ -65,8 +65,13 @@ func validateRuleExpression(namespace, tenantID, rawExpr string) error { } matchers := selector.Matchers() - if tenantID != tenantAudit && !validateIncludesNamespace(namespace, matchers) { - return lokiv1.ErrRuleMustMatchNamespace + if tenantID != tenantAudit { + if !validateIncludesNamespace(namespace, matchers) { + return lokiv1.ErrRuleMustMatchNamespace + } + if !validateExclusiveNamespaceLabels(matchers) { + return lokiv1.ErrRuleExclusiveNamespaceLabel + } } return nil @@ -82,6 +87,22 @@ func validateIncludesNamespace(namespace string, matchers []*labels.Matcher) boo return false } +func validateExclusiveNamespaceLabels(matchers []*labels.Matcher) bool { + var namespaceLabelSet, otlpLabelSet bool + + for _, m := range matchers { + if m.Name == namespaceLabelName && m.Type == labels.MatchEqual { + namespaceLabelSet = true + } + if m.Name == namespaceOTLPLabelName && m.Type == labels.MatchEqual { + otlpLabelSet = true + } + } + + // Only one of the labels should be set, not both + return (namespaceLabelSet || otlpLabelSet) && !(namespaceLabelSet && otlpLabelSet) +} + func tenantForNamespace(namespace string) []string { if strings.HasPrefix(namespace, "openshift") || strings.HasPrefix(namespace, "kube-") || diff --git a/pkg/dataobj/builder.go b/pkg/dataobj/builder.go index ba0391b9298f2..87f668bc4fd2c 100644 --- a/pkg/dataobj/builder.go +++ b/pkg/dataobj/builder.go @@ -8,6 +8,7 @@ import ( "errors" "flag" "fmt" + "time" "github.com/grafana/dskit/flagext" lru "github.com/hashicorp/golang-lru/v2" @@ -126,6 +127,12 @@ type Builder struct { type builderState int +type FlushResult struct { + Path string + MinTimestamp time.Time + MaxTimestamp time.Time +} + const ( // builderStateReady indicates the builder is empty and ready to accept new data. builderStateEmpty builderState = iota @@ -285,15 +292,10 @@ func streamSizeEstimate(stream logproto.Stream) int { // If Flush builds an object but fails to upload it to object storage, the // built object is cached and can be retried. [Builder.Reset] can be called to // discard any pending data and allow new data to be appended. -func (b *Builder) Flush(ctx context.Context) error { - switch b.state { - case builderStateEmpty: - return nil // Nothing to flush - case builderStateDirty: - if err := b.buildObject(); err != nil { - return fmt.Errorf("building object: %w", err) - } - b.state = builderStateFlush +func (b *Builder) Flush(ctx context.Context) (FlushResult, error) { + buf, err := b.FlushToBuffer() + if err != nil { + return FlushResult{}, fmt.Errorf("flushing buffer: %w", err) } timer := prometheus.NewTimer(b.metrics.flushTime) @@ -303,12 +305,32 @@ func (b *Builder) Flush(ctx context.Context) error { sumStr := hex.EncodeToString(sum[:]) objectPath := fmt.Sprintf("tenant-%s/objects/%s/%s", b.tenantID, sumStr[:b.cfg.SHAPrefixSize], sumStr[b.cfg.SHAPrefixSize:]) - if err := b.bucket.Upload(ctx, objectPath, bytes.NewReader(b.flushBuffer.Bytes())); err != nil { - return err + if err := b.bucket.Upload(ctx, objectPath, bytes.NewReader(buf.Bytes())); err != nil { + return FlushResult{}, fmt.Errorf("uploading object: %w", err) } + minTime, maxTime := b.streams.GetBounds() + b.Reset() - return nil + return FlushResult{ + Path: objectPath, + MinTimestamp: minTime, + MaxTimestamp: maxTime, + }, nil +} + +func (b *Builder) FlushToBuffer() (*bytes.Buffer, error) { + switch b.state { + case builderStateEmpty: + return nil, nil // Nothing to flush + case builderStateDirty: + if err := b.buildObject(); err != nil { + return nil, fmt.Errorf("building object: %w", err) + } + b.state = builderStateFlush + } + + return b.flushBuffer, nil } func (b *Builder) buildObject() error { @@ -353,6 +375,7 @@ func (b *Builder) Reset() { // reg must contain additional labels to differentiate between them. func (b *Builder) RegisterMetrics(reg prometheus.Registerer) error { reg = prometheus.WrapRegistererWith(prometheus.Labels{"tenant": b.tenantID}, reg) + return b.metrics.Register(reg) } diff --git a/pkg/dataobj/builder_test.go b/pkg/dataobj/builder_test.go index 365f6a5d6196f..ff86e8dcb941c 100644 --- a/pkg/dataobj/builder_test.go +++ b/pkg/dataobj/builder_test.go @@ -81,7 +81,8 @@ func TestBuilder(t *testing.T) { for _, entry := range streams { require.NoError(t, builder.Append(entry)) } - require.NoError(t, builder.Flush(context.Background())) + _, err = builder.Flush(context.Background()) + require.NoError(t, err) }) t.Run("Read", func(t *testing.T) { diff --git a/pkg/dataobj/consumer/config.go b/pkg/dataobj/consumer/config.go new file mode 100644 index 0000000000000..c62ae612193cb --- /dev/null +++ b/pkg/dataobj/consumer/config.go @@ -0,0 +1,26 @@ +package consumer + +import ( + "flag" + + "github.com/grafana/loki/v3/pkg/dataobj" +) + +type Config struct { + dataobj.BuilderConfig + // StorageBucketPrefix is the prefix to use for the storage bucket. + StorageBucketPrefix string `yaml:"storage_bucket_prefix"` +} + +func (cfg *Config) Validate() error { + return cfg.BuilderConfig.Validate() +} + +func (cfg *Config) RegisterFlags(f *flag.FlagSet) { + cfg.RegisterFlagsWithPrefix("dataobj-consumer.", f) +} + +func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { + cfg.BuilderConfig.RegisterFlagsWithPrefix(prefix, f) + f.StringVar(&cfg.StorageBucketPrefix, prefix+"storage-bucket-prefix", "dataobj/", "The prefix to use for the storage bucket.") +} diff --git a/pkg/dataobj/consumer/metrics.go b/pkg/dataobj/consumer/metrics.go new file mode 100644 index 0000000000000..4525cb512de3b --- /dev/null +++ b/pkg/dataobj/consumer/metrics.go @@ -0,0 +1,117 @@ +package consumer + +import ( + "time" + + "go.uber.org/atomic" + + "github.com/prometheus/client_golang/prometheus" +) + +type partitionOffsetMetrics struct { + currentOffset prometheus.GaugeFunc + lastOffset atomic.Int64 + + // Error counters + flushFailures prometheus.Counter + commitFailures prometheus.Counter + appendFailures prometheus.Counter + + // Processing delay histogram + processingDelay prometheus.Histogram +} + +func newPartitionOffsetMetrics() *partitionOffsetMetrics { + p := &partitionOffsetMetrics{ + flushFailures: prometheus.NewCounter(prometheus.CounterOpts{ + Name: "loki_dataobj_consumer_flush_failures_total", + Help: "Total number of flush failures", + }), + commitFailures: prometheus.NewCounter(prometheus.CounterOpts{ + Name: "loki_dataobj_consumer_commit_failures_total", + Help: "Total number of commit failures", + }), + appendFailures: prometheus.NewCounter(prometheus.CounterOpts{ + Name: "loki_dataobj_consumer_append_failures_total", + Help: "Total number of append failures", + }), + processingDelay: prometheus.NewHistogram(prometheus.HistogramOpts{ + Name: "loki_dataobj_consumer_processing_delay_seconds", + Help: "Time difference between record timestamp and processing time in seconds", + Buckets: prometheus.DefBuckets, + NativeHistogramBucketFactor: 1.1, + NativeHistogramMaxBucketNumber: 100, + NativeHistogramMinResetDuration: 0, + }), + } + + p.currentOffset = prometheus.NewGaugeFunc( + prometheus.GaugeOpts{ + Name: "loki_dataobj_consumer_current_offset", + Help: "The last consumed offset for this partition", + }, + p.getCurrentOffset, + ) + + return p +} + +func (p *partitionOffsetMetrics) getCurrentOffset() float64 { + return float64(p.lastOffset.Load()) +} + +func (p *partitionOffsetMetrics) register(reg prometheus.Registerer) error { + collectors := []prometheus.Collector{ + p.currentOffset, + p.flushFailures, + p.commitFailures, + p.appendFailures, + p.processingDelay, + } + + for _, collector := range collectors { + if err := reg.Register(collector); err != nil { + if _, ok := err.(prometheus.AlreadyRegisteredError); !ok { + return err + } + } + } + return nil +} + +func (p *partitionOffsetMetrics) unregister(reg prometheus.Registerer) { + collectors := []prometheus.Collector{ + p.currentOffset, + p.flushFailures, + p.commitFailures, + p.appendFailures, + p.processingDelay, + } + + for _, collector := range collectors { + reg.Unregister(collector) + } +} + +func (p *partitionOffsetMetrics) updateOffset(offset int64) { + p.lastOffset.Store(offset) +} + +func (p *partitionOffsetMetrics) incFlushFailures() { + p.flushFailures.Inc() +} + +func (p *partitionOffsetMetrics) incCommitFailures() { + p.commitFailures.Inc() +} + +func (p *partitionOffsetMetrics) incAppendFailures() { + p.appendFailures.Inc() +} + +func (p *partitionOffsetMetrics) observeProcessingDelay(recordTimestamp time.Time) { + // Convert milliseconds to seconds and calculate delay + if !recordTimestamp.IsZero() { // Only observe if timestamp is valid + p.processingDelay.Observe(time.Since(recordTimestamp).Seconds()) + } +} diff --git a/pkg/dataobj/consumer/partition_processor.go b/pkg/dataobj/consumer/partition_processor.go new file mode 100644 index 0000000000000..0033e04c640f5 --- /dev/null +++ b/pkg/dataobj/consumer/partition_processor.go @@ -0,0 +1,222 @@ +package consumer + +import ( + "bytes" + "context" + "strconv" + "sync" + "time" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/grafana/dskit/backoff" + "github.com/prometheus/client_golang/prometheus" + "github.com/thanos-io/objstore" + "github.com/twmb/franz-go/pkg/kgo" + + "github.com/grafana/loki/v3/pkg/dataobj" + "github.com/grafana/loki/v3/pkg/dataobj/metastore" + "github.com/grafana/loki/v3/pkg/kafka" +) + +type partitionProcessor struct { + // Kafka client and topic/partition info + client *kgo.Client + topic string + partition int32 + tenantID []byte + // Processing pipeline + records chan *kgo.Record + builder *dataobj.Builder + decoder *kafka.Decoder + + // Builder initialization + builderOnce sync.Once + builderCfg dataobj.BuilderConfig + bucket objstore.Bucket + metastoreManager *metastore.Manager + // Metrics + metrics *partitionOffsetMetrics + + // Control and coordination + ctx context.Context + cancel context.CancelFunc + wg sync.WaitGroup + reg prometheus.Registerer + logger log.Logger +} + +func newPartitionProcessor(ctx context.Context, client *kgo.Client, builderCfg dataobj.BuilderConfig, bucket objstore.Bucket, tenantID string, virtualShard int32, topic string, partition int32, logger log.Logger, reg prometheus.Registerer) *partitionProcessor { + ctx, cancel := context.WithCancel(ctx) + decoder, err := kafka.NewDecoder() + if err != nil { + panic(err) + } + reg = prometheus.WrapRegistererWith(prometheus.Labels{ + "shard": strconv.Itoa(int(virtualShard)), + "partition": strconv.Itoa(int(partition)), + "topic": topic, + }, reg) + + metrics := newPartitionOffsetMetrics() + if err := metrics.register(reg); err != nil { + level.Error(logger).Log("msg", "failed to register partition metrics", "err", err) + } + + metastoreManager, err := metastore.NewMetastoreManager(bucket, tenantID, logger, reg) + if err != nil { + level.Error(logger).Log("msg", "failed to create metastore manager", "err", err) + cancel() + return nil + } + + return &partitionProcessor{ + client: client, + logger: log.With(logger, "topic", topic, "partition", partition, "tenant", tenantID), + topic: topic, + partition: partition, + records: make(chan *kgo.Record, 1000), + ctx: ctx, + cancel: cancel, + decoder: decoder, + reg: reg, + builderCfg: builderCfg, + bucket: bucket, + tenantID: []byte(tenantID), + metrics: metrics, + metastoreManager: metastoreManager, + } +} + +func (p *partitionProcessor) start() { + p.wg.Add(1) + go func() { + defer p.wg.Done() + + level.Info(p.logger).Log("msg", "started partition processor") + for { + select { + case <-p.ctx.Done(): + level.Info(p.logger).Log("msg", "stopping partition processor") + return + case record, ok := <-p.records: + if !ok { + // Channel was closed + return + } + p.processRecord(record) + } + } + }() +} + +func (p *partitionProcessor) stop() { + p.cancel() + p.wg.Wait() + if p.builder != nil { + p.builder.UnregisterMetrics(p.reg) + } + p.metrics.unregister(p.reg) +} + +// Drops records from the channel if the processor is stopped. +// Returns false if the processor is stopped, true otherwise. +func (p *partitionProcessor) Append(records []*kgo.Record) bool { + for _, record := range records { + select { + // must check per-record in order to not block on a full channel + // after receiver has been stopped. + case <-p.ctx.Done(): + return false + case p.records <- record: + } + } + return true +} + +func (p *partitionProcessor) initBuilder() error { + var initErr error + p.builderOnce.Do(func() { + builder, err := dataobj.NewBuilder(p.builderCfg, p.bucket, string(p.tenantID)) + if err != nil { + initErr = err + return + } + if err := builder.RegisterMetrics(p.reg); err != nil { + initErr = err + return + } + p.builder = builder + }) + return initErr +} + +func (p *partitionProcessor) processRecord(record *kgo.Record) { + // Update offset metric at the end of processing + defer p.metrics.updateOffset(record.Offset) + + // Observe processing delay + p.metrics.observeProcessingDelay(record.Timestamp) + + // Initialize builder if this is the first record + if err := p.initBuilder(); err != nil { + level.Error(p.logger).Log("msg", "failed to initialize builder", "err", err) + return + } + + // todo: handle multi-tenant + if !bytes.Equal(record.Key, p.tenantID) { + level.Error(p.logger).Log("msg", "record key does not match tenant ID", "key", record.Key, "tenant_id", p.tenantID) + return + } + stream, err := p.decoder.DecodeWithoutLabels(record.Value) + if err != nil { + level.Error(p.logger).Log("msg", "failed to decode record", "err", err) + return + } + + if err := p.builder.Append(stream); err != nil { + if err != dataobj.ErrBufferFull { + level.Error(p.logger).Log("msg", "failed to append stream", "err", err) + p.metrics.incAppendFailures() + return + } + + backoff := backoff.New(p.ctx, backoff.Config{ + MinBackoff: 100 * time.Millisecond, + MaxBackoff: 10 * time.Second, + }) + + var flushResult dataobj.FlushResult + for backoff.Ongoing() { + flushResult, err = p.builder.Flush(p.ctx) + if err == nil { + break + } + level.Error(p.logger).Log("msg", "failed to flush builder", "err", err) + p.metrics.incFlushFailures() + backoff.Wait() + } + + if err := p.metastoreManager.UpdateMetastore(p.ctx, flushResult); err != nil { + level.Error(p.logger).Log("msg", "failed to update metastore", "err", err) + return + } + + backoff.Reset() + for backoff.Ongoing() { + err = p.client.CommitRecords(p.ctx, record) + if err == nil { + break + } + level.Error(p.logger).Log("msg", "failed to commit records", "err", err) + p.metrics.incCommitFailures() + backoff.Wait() + } + + if err := p.builder.Append(stream); err != nil { + level.Error(p.logger).Log("msg", "failed to append stream after flushing", "err", err) + p.metrics.incAppendFailures() + } + } +} diff --git a/pkg/dataobj/consumer/service.go b/pkg/dataobj/consumer/service.go new file mode 100644 index 0000000000000..aee69dbd0dfd3 --- /dev/null +++ b/pkg/dataobj/consumer/service.go @@ -0,0 +1,221 @@ +package consumer + +import ( + "context" + "errors" + "strconv" + "sync" + "time" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/grafana/dskit/ring" + "github.com/grafana/dskit/services" + "github.com/prometheus/client_golang/prometheus" + "github.com/thanos-io/objstore" + "github.com/twmb/franz-go/pkg/kgo" + + "github.com/grafana/loki/v3/pkg/distributor" + "github.com/grafana/loki/v3/pkg/kafka" + "github.com/grafana/loki/v3/pkg/kafka/client" + "github.com/grafana/loki/v3/pkg/kafka/partitionring/consumer" +) + +const ( + groupName = "dataobj-consumer" +) + +type Service struct { + services.Service + + logger log.Logger + reg prometheus.Registerer + client *consumer.Client + + cfg Config + bucket objstore.Bucket + codec distributor.TenantPrefixCodec + + // Partition management + partitionMtx sync.RWMutex + partitionHandlers map[string]map[int32]*partitionProcessor +} + +func New(kafkaCfg kafka.Config, cfg Config, topicPrefix string, bucket objstore.Bucket, instanceID string, partitionRing ring.PartitionRingReader, reg prometheus.Registerer, logger log.Logger) *Service { + if cfg.StorageBucketPrefix != "" { + bucket = objstore.NewPrefixedBucket(bucket, cfg.StorageBucketPrefix) + } + s := &Service{ + logger: log.With(logger, "component", groupName), + cfg: cfg, + bucket: bucket, + codec: distributor.TenantPrefixCodec(topicPrefix), + partitionHandlers: make(map[string]map[int32]*partitionProcessor), + reg: reg, + } + + client, err := consumer.NewGroupClient( + kafkaCfg, + partitionRing, + groupName, + client.NewReaderClientMetrics(groupName, reg), + logger, + kgo.InstanceID(instanceID), + kgo.SessionTimeout(3*time.Minute), + kgo.RebalanceTimeout(5*time.Minute), + kgo.OnPartitionsAssigned(s.handlePartitionsAssigned), + kgo.OnPartitionsRevoked(func(_ context.Context, _ *kgo.Client, m map[string][]int32) { + s.handlePartitionsRevoked(m) + }), + ) + if err != nil { + level.Error(logger).Log("msg", "failed to create consumer", "err", err) + return nil + } + s.client = client + s.Service = services.NewBasicService(nil, s.run, s.stopping) + return s +} + +func (s *Service) handlePartitionsAssigned(ctx context.Context, client *kgo.Client, partitions map[string][]int32) { + level.Info(s.logger).Log("msg", "partitions assigned", "partitions", formatPartitionsMap(partitions)) + s.partitionMtx.Lock() + defer s.partitionMtx.Unlock() + + for topic, parts := range partitions { + tenant, virtualShard, err := s.codec.Decode(topic) + // TODO: should propage more effectively + if err != nil { + level.Error(s.logger).Log("msg", "failed to decode topic", "topic", topic, "err", err) + continue + } + + if _, ok := s.partitionHandlers[topic]; !ok { + s.partitionHandlers[topic] = make(map[int32]*partitionProcessor) + } + + for _, partition := range parts { + processor := newPartitionProcessor(ctx, client, s.cfg.BuilderConfig, s.bucket, tenant, virtualShard, topic, partition, s.logger, s.reg) + s.partitionHandlers[topic][partition] = processor + processor.start() + } + } +} + +func (s *Service) handlePartitionsRevoked(partitions map[string][]int32) { + level.Info(s.logger).Log("msg", "partitions revoked", "partitions", formatPartitionsMap(partitions)) + s.partitionMtx.Lock() + defer s.partitionMtx.Unlock() + + var wg sync.WaitGroup + for topic, parts := range partitions { + if handlers, ok := s.partitionHandlers[topic]; ok { + for _, partition := range parts { + if processor, exists := handlers[partition]; exists { + wg.Add(1) + go func(p *partitionProcessor) { + defer wg.Done() + p.stop() + }(processor) + delete(handlers, partition) + } + } + if len(handlers) == 0 { + delete(s.partitionHandlers, topic) + } + } + } + wg.Wait() +} + +func (s *Service) run(ctx context.Context) error { + for { + fetches := s.client.PollRecords(ctx, -1) + if fetches.IsClientClosed() || ctx.Err() != nil { + return nil + } + if errs := fetches.Errors(); len(errs) > 0 { + var multiErr error + for _, err := range errs { + multiErr = errors.Join(multiErr, err.Err) + } + level.Error(s.logger).Log("msg", "error fetching records", "err", multiErr.Error()) + continue + } + if fetches.Empty() { + continue + } + + fetches.EachPartition(func(ftp kgo.FetchTopicPartition) { + s.partitionMtx.RLock() + handlers, ok := s.partitionHandlers[ftp.Topic] + if !ok { + s.partitionMtx.RUnlock() + return + } + processor, ok := handlers[ftp.Partition] + s.partitionMtx.RUnlock() + if !ok { + return + } + + // Collect all records for this partition + records := ftp.Records + if len(records) == 0 { + return + } + + _ = processor.Append(records) + }) + } +} + +func (s *Service) stopping(failureCase error) error { + s.partitionMtx.Lock() + defer s.partitionMtx.Unlock() + + var wg sync.WaitGroup + for _, handlers := range s.partitionHandlers { + for _, processor := range handlers { + wg.Add(1) + go func(p *partitionProcessor) { + defer wg.Done() + p.stop() + }(processor) + } + } + wg.Wait() + // Only close the client once all partitions have been stopped. + // This is to ensure that all records have been processed before closing and offsets committed. + s.client.Close() + level.Info(s.logger).Log("msg", "consumer stopped") + return failureCase +} + +// Helper function to format []int32 slice +func formatInt32Slice(slice []int32) string { + if len(slice) == 0 { + return "[]" + } + result := "[" + for i, v := range slice { + if i > 0 { + result += "," + } + result += strconv.Itoa(int(v)) + } + result += "]" + return result +} + +// Helper function to format map[string][]int32 into a readable string +func formatPartitionsMap(partitions map[string][]int32) string { + var result string + for topic, parts := range partitions { + if len(result) > 0 { + result += ", " + } + result += topic + "=" + formatInt32Slice(parts) + } + return result +} diff --git a/pkg/dataobj/explorer/ui/package-lock.json b/pkg/dataobj/explorer/ui/package-lock.json index 9c72f7908a78d..37f8b3267a699 100644 --- a/pkg/dataobj/explorer/ui/package-lock.json +++ b/pkg/dataobj/explorer/ui/package-lock.json @@ -20,11 +20,24 @@ "@vitejs/plugin-react": "^4.2.1", "autoprefixer": "^10.4.16", "postcss": "^8.4.32", - "tailwindcss": "^4.0.0", + "tailwindcss": "^3.4.0", "typescript": "^5.2.2", "vite": "^6.0.0" } }, + "node_modules/@alloc/quick-lru": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/@alloc/quick-lru/-/quick-lru-5.2.0.tgz", + "integrity": "sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/@ampproject/remapping": { "version": "2.3.0", "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.3.0.tgz", @@ -736,6 +749,24 @@ "node": ">=18" } }, + "node_modules/@isaacs/cliui": { + "version": "8.0.2", + "resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz", + "integrity": "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==", + "dev": true, + "license": "ISC", + "dependencies": { + "string-width": "^5.1.2", + "string-width-cjs": "npm:string-width@^4.2.0", + "strip-ansi": "^7.0.1", + "strip-ansi-cjs": "npm:strip-ansi@^6.0.1", + "wrap-ansi": "^8.1.0", + "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, "node_modules/@jridgewell/gen-mapping": { "version": "0.3.8", "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.8.tgz", @@ -789,6 +820,55 @@ "@jridgewell/sourcemap-codec": "^1.4.14" } }, + "node_modules/@nodelib/fs.scandir": { + "version": "2.1.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", + "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "2.0.5", + "run-parallel": "^1.1.9" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.stat": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", + "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.walk": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", + "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.scandir": "2.1.5", + "fastq": "^1.6.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@pkgjs/parseargs": { + "version": "0.11.0", + "resolved": "https://registry.npmjs.org/@pkgjs/parseargs/-/parseargs-0.11.0.tgz", + "integrity": "sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==", + "dev": true, + "license": "MIT", + "optional": true, + "engines": { + "node": ">=14" + } + }, "node_modules/@rollup/rollup-android-arm-eabi": { "version": "4.31.0", "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.31.0.tgz", @@ -1114,9 +1194,9 @@ "license": "MIT" }, "node_modules/@types/node": { - "version": "22.12.0", - "resolved": "https://registry.npmjs.org/@types/node/-/node-22.12.0.tgz", - "integrity": "sha512-Fll2FZ1riMjNmlmJOdAyY5pUbkftXslB5DgEzlIuNaiWhXd00FhWxVC/r4yV/4wBb9JfImTu+jiSvXTkJ7F/gA==", + "version": "22.13.1", + "resolved": "https://registry.npmjs.org/@types/node/-/node-22.13.1.tgz", + "integrity": "sha512-jK8uzQlrvXqEU91UxiK5J7pKHyzgnI1Qnl0QDHIgVGuolJhRb9EEl28Cj9b3rGR8B2lhFCtvIm5os8lFnO/1Ew==", "dev": true, "license": "MIT", "dependencies": { @@ -1163,6 +1243,60 @@ "vite": "^4.2.0 || ^5.0.0 || ^6.0.0" } }, + "node_modules/ansi-regex": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.1.0.tgz", + "integrity": "sha512-7HSX4QQb4CspciLpVFwyRe79O3xsIZDDLER21kERQ71oaPodF8jL725AgJMFAYbooIqolJoRLuM81SpeUkpkvA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/ansi-styles": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz", + "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/any-promise": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/any-promise/-/any-promise-1.3.0.tgz", + "integrity": "sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A==", + "dev": true, + "license": "MIT" + }, + "node_modules/anymatch": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", + "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + "dev": true, + "license": "ISC", + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/arg": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/arg/-/arg-5.0.2.tgz", + "integrity": "sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg==", + "dev": true, + "license": "MIT" + }, "node_modules/autoprefixer": { "version": "10.4.20", "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.20.tgz", @@ -1201,6 +1335,49 @@ "postcss": "^8.1.0" } }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "dev": true, + "license": "MIT" + }, + "node_modules/binary-extensions": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz", + "integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/brace-expansion": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/braces": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "dev": true, + "license": "MIT", + "dependencies": { + "fill-range": "^7.1.1" + }, + "engines": { + "node": ">=8" + } + }, "node_modules/browserslist": { "version": "4.24.4", "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.24.4.tgz", @@ -1234,6 +1411,16 @@ "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" } }, + "node_modules/camelcase-css": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/camelcase-css/-/camelcase-css-2.0.1.tgz", + "integrity": "sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, "node_modules/caniuse-lite": { "version": "1.0.30001695", "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001695.tgz", @@ -1255,6 +1442,74 @@ ], "license": "CC-BY-4.0" }, + "node_modules/chokidar": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz", + "integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==", + "dev": true, + "license": "MIT", + "dependencies": { + "anymatch": "~3.1.2", + "braces": "~3.0.2", + "glob-parent": "~5.1.2", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.6.0" + }, + "engines": { + "node": ">= 8.10.0" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + } + }, + "node_modules/chokidar/node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true, + "license": "MIT" + }, + "node_modules/commander": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/commander/-/commander-4.1.1.tgz", + "integrity": "sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, "node_modules/convert-source-map": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", @@ -1271,6 +1526,34 @@ "node": ">=18" } }, + "node_modules/cross-spawn": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", + "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", + "dev": true, + "license": "MIT", + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/cssesc": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz", + "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==", + "dev": true, + "license": "MIT", + "bin": { + "cssesc": "bin/cssesc" + }, + "engines": { + "node": ">=4" + } + }, "node_modules/csstype": { "version": "3.1.3", "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz", @@ -1306,6 +1589,27 @@ } } }, + "node_modules/didyoumean": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/didyoumean/-/didyoumean-1.2.2.tgz", + "integrity": "sha512-gxtyfqMg7GKyhQmb056K7M3xszy/myH8w+B4RT+QXBQsvAOdc3XymqDDPHx1BgPgsdAA5SIifona89YtRATDzw==", + "dev": true, + "license": "Apache-2.0" + }, + "node_modules/dlv": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/dlv/-/dlv-1.1.3.tgz", + "integrity": "sha512-+HlytyjlPKnIG8XuRG8WvmBP8xs8P71y+SKKS6ZXWoEgLuePxtDoUEiH7WkdePWrQ5JBpE6aoVqfZfJUQkjXwA==", + "dev": true, + "license": "MIT" + }, + "node_modules/eastasianwidth": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz", + "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==", + "dev": true, + "license": "MIT" + }, "node_modules/electron-to-chromium": { "version": "1.5.83", "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.83.tgz", @@ -1313,6 +1617,13 @@ "dev": true, "license": "ISC" }, + "node_modules/emoji-regex": { + "version": "9.2.2", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", + "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", + "dev": true, + "license": "MIT" + }, "node_modules/esbuild": { "version": "0.24.2", "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.24.2.tgz", @@ -1364,6 +1675,76 @@ "node": ">=6" } }, + "node_modules/fast-glob": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.3.tgz", + "integrity": "sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.8" + }, + "engines": { + "node": ">=8.6.0" + } + }, + "node_modules/fast-glob/node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/fastq": { + "version": "1.19.0", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.19.0.tgz", + "integrity": "sha512-7SFSRCNjBQIZH/xZR3iy5iQYR8aGBE0h3VG6/cwlbrpdciNYBMotQav8c1XI3HjHH+NikUpP53nPdlZSdWmFzA==", + "dev": true, + "license": "ISC", + "dependencies": { + "reusify": "^1.0.4" + } + }, + "node_modules/fill-range": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "dev": true, + "license": "MIT", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/foreground-child": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.3.0.tgz", + "integrity": "sha512-Ld2g8rrAyMYFXBhEqMz8ZAHBi4J4uS1i/CxGMDnjyFWddMXLVcDp051DZfu+t7+ab7Wv6SMqpWmyFIj5UbfFvg==", + "dev": true, + "license": "ISC", + "dependencies": { + "cross-spawn": "^7.0.0", + "signal-exit": "^4.0.1" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, "node_modules/fraction.js": { "version": "4.3.7", "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-4.3.7.tgz", @@ -1393,6 +1774,16 @@ "node": "^8.16.0 || ^10.6.0 || >=11.0.0" } }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/gensync": { "version": "1.0.0-beta.2", "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", @@ -1403,6 +1794,40 @@ "node": ">=6.9.0" } }, + "node_modules/glob": { + "version": "10.4.5", + "resolved": "https://registry.npmjs.org/glob/-/glob-10.4.5.tgz", + "integrity": "sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==", + "dev": true, + "license": "ISC", + "dependencies": { + "foreground-child": "^3.1.0", + "jackspeak": "^3.1.2", + "minimatch": "^9.0.4", + "minipass": "^7.1.2", + "package-json-from-dist": "^1.0.0", + "path-scurry": "^1.11.1" + }, + "bin": { + "glob": "dist/esm/bin.mjs" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/glob-parent": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", + "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.3" + }, + "engines": { + "node": ">=10.13.0" + } + }, "node_modules/globals": { "version": "11.12.0", "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", @@ -1413,61 +1838,237 @@ "node": ">=4" } }, - "node_modules/jiti": { - "version": "1.21.7", - "resolved": "https://registry.npmjs.org/jiti/-/jiti-1.21.7.tgz", - "integrity": "sha512-/imKNG4EbWNrVjoNC/1H5/9GFy+tqjGBHCaSsN+P2RnPqjsLmv6UD3Ej+Kj8nBWaRAwyk7kK5ZUc+OEatnTR3A==", + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", "dev": true, "license": "MIT", - "optional": true, - "peer": true, - "bin": { - "jiti": "bin/jiti.js" + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" } }, - "node_modules/js-tokens": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", - "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "node_modules/is-binary-path": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", + "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", "dev": true, - "license": "MIT" + "license": "MIT", + "dependencies": { + "binary-extensions": "^2.0.0" + }, + "engines": { + "node": ">=8" + } }, - "node_modules/jsesc": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz", - "integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==", + "node_modules/is-core-module": { + "version": "2.16.1", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.16.1.tgz", + "integrity": "sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==", "dev": true, "license": "MIT", - "bin": { - "jsesc": "bin/jsesc" + "dependencies": { + "hasown": "^2.0.2" }, "engines": { - "node": ">=6" + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/json5": { - "version": "2.2.3", - "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", - "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", "dev": true, "license": "MIT", - "bin": { - "json5": "lib/cli.js" - }, "engines": { - "node": ">=6" + "node": ">=0.10.0" } }, - "node_modules/lru-cache": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", - "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "dev": true, + "license": "ISC" + }, + "node_modules/jackspeak": { + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-3.4.3.tgz", + "integrity": "sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw==", + "dev": true, + "license": "BlueOak-1.0.0", + "dependencies": { + "@isaacs/cliui": "^8.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + }, + "optionalDependencies": { + "@pkgjs/parseargs": "^0.11.0" + } + }, + "node_modules/jiti": { + "version": "1.21.7", + "resolved": "https://registry.npmjs.org/jiti/-/jiti-1.21.7.tgz", + "integrity": "sha512-/imKNG4EbWNrVjoNC/1H5/9GFy+tqjGBHCaSsN+P2RnPqjsLmv6UD3Ej+Kj8nBWaRAwyk7kK5ZUc+OEatnTR3A==", + "dev": true, + "license": "MIT", + "bin": { + "jiti": "bin/jiti.js" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/jsesc": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz", + "integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==", + "dev": true, + "license": "MIT", + "bin": { + "jsesc": "bin/jsesc" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/json5": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", + "dev": true, + "license": "MIT", + "bin": { + "json5": "lib/cli.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/lilconfig": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-3.1.3.tgz", + "integrity": "sha512-/vlFKAoH5Cgt3Ie+JLhRbwOsCQePABiU3tJ1egGvyQ+33R/vcwM2Zl2QR/LzjsBeItPt3oSVXapn+m4nQDvpzw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/antonk52" + } + }, + "node_modules/lines-and-columns": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", + "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==", + "dev": true, + "license": "MIT" + }, + "node_modules/lru-cache": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", + "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", "dev": true, "license": "ISC", "dependencies": { "yallist": "^3.0.2" } }, + "node_modules/merge2": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", + "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/micromatch": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", + "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", + "dev": true, + "license": "MIT", + "dependencies": { + "braces": "^3.0.3", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/minimatch": { + "version": "9.0.5", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, "node_modules/ms": { "version": "2.1.3", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", @@ -1475,6 +2076,18 @@ "dev": true, "license": "MIT" }, + "node_modules/mz": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/mz/-/mz-2.7.0.tgz", + "integrity": "sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "any-promise": "^1.0.0", + "object-assign": "^4.0.1", + "thenify-all": "^1.0.0" + } + }, "node_modules/nanoid": { "version": "3.3.8", "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.8.tgz", @@ -1501,6 +2114,16 @@ "dev": true, "license": "MIT" }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/normalize-range": { "version": "0.1.2", "resolved": "https://registry.npmjs.org/normalize-range/-/normalize-range-0.1.2.tgz", @@ -1511,6 +2134,74 @@ "node": ">=0.10.0" } }, + "node_modules/object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-hash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/object-hash/-/object-hash-3.0.0.tgz", + "integrity": "sha512-RSn9F68PjH9HqtltsSnqYC1XXoWe9Bju5+213R98cNGttag9q9yAOTzdbsqvIa7aNm5WffBZFpWYr2aWrklWAw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/package-json-from-dist": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/package-json-from-dist/-/package-json-from-dist-1.0.1.tgz", + "integrity": "sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw==", + "dev": true, + "license": "BlueOak-1.0.0" + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-parse": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", + "dev": true, + "license": "MIT" + }, + "node_modules/path-scurry": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-1.11.1.tgz", + "integrity": "sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==", + "dev": true, + "license": "BlueOak-1.0.0", + "dependencies": { + "lru-cache": "^10.2.0", + "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0" + }, + "engines": { + "node": ">=16 || 14 >=14.18" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/path-scurry/node_modules/lru-cache": { + "version": "10.4.3", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.4.3.tgz", + "integrity": "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==", + "dev": true, + "license": "ISC" + }, "node_modules/picocolors": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", @@ -1518,6 +2209,39 @@ "dev": true, "license": "ISC" }, + "node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pify": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", + "integrity": "sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/pirates": { + "version": "4.0.6", + "resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.6.tgz", + "integrity": "sha512-saLsH7WeYYPiD25LDuLRRY/i+6HaPYr6G1OUlN39otzkSTxKnubR9RTxS3/Kk50s1g2JTgFwWQDQyplC5/SHZg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, "node_modules/postcss": { "version": "8.5.1", "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.1.tgz", @@ -1547,6 +2271,120 @@ "node": "^10 || ^12 || >=14" } }, + "node_modules/postcss-import": { + "version": "15.1.0", + "resolved": "https://registry.npmjs.org/postcss-import/-/postcss-import-15.1.0.tgz", + "integrity": "sha512-hpr+J05B2FVYUAXHeK1YyI267J/dDDhMU6B6civm8hSY1jYJnBXxzKDKDswzJmtLHryrjhnDjqqp/49t8FALew==", + "dev": true, + "license": "MIT", + "dependencies": { + "postcss-value-parser": "^4.0.0", + "read-cache": "^1.0.0", + "resolve": "^1.1.7" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "postcss": "^8.0.0" + } + }, + "node_modules/postcss-js": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/postcss-js/-/postcss-js-4.0.1.tgz", + "integrity": "sha512-dDLF8pEO191hJMtlHFPRa8xsizHaM82MLfNkUHdUtVEV3tgTp5oj+8qbEqYM57SLfc74KSbw//4SeJma2LRVIw==", + "dev": true, + "license": "MIT", + "dependencies": { + "camelcase-css": "^2.0.1" + }, + "engines": { + "node": "^12 || ^14 || >= 16" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + "peerDependencies": { + "postcss": "^8.4.21" + } + }, + "node_modules/postcss-load-config": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/postcss-load-config/-/postcss-load-config-4.0.2.tgz", + "integrity": "sha512-bSVhyJGL00wMVoPUzAVAnbEoWyqRxkjv64tUl427SKnPrENtq6hJwUojroMz2VB+Q1edmi4IfrAPpami5VVgMQ==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "lilconfig": "^3.0.0", + "yaml": "^2.3.4" + }, + "engines": { + "node": ">= 14" + }, + "peerDependencies": { + "postcss": ">=8.0.9", + "ts-node": ">=9.0.0" + }, + "peerDependenciesMeta": { + "postcss": { + "optional": true + }, + "ts-node": { + "optional": true + } + } + }, + "node_modules/postcss-nested": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/postcss-nested/-/postcss-nested-6.2.0.tgz", + "integrity": "sha512-HQbt28KulC5AJzG+cZtj9kvKB93CFCdLvog1WFLf1D+xmMvPGlBstkpTEZfK5+AN9hfJocyBFCNiqyS48bpgzQ==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "postcss-selector-parser": "^6.1.1" + }, + "engines": { + "node": ">=12.0" + }, + "peerDependencies": { + "postcss": "^8.2.14" + } + }, + "node_modules/postcss-selector-parser": { + "version": "6.1.2", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.1.2.tgz", + "integrity": "sha512-Q8qQfPiZ+THO/3ZrOrO0cJJKfpYCagtMUkXbnEfmgUjwXg6z/WBeOyS9APBBPCTSiDV+s4SwQGu8yFsiMRIudg==", + "dev": true, + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, "node_modules/postcss-value-parser": { "version": "4.2.0", "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz", @@ -1554,6 +2392,27 @@ "dev": true, "license": "MIT" }, + "node_modules/queue-microtask": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", + "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, "node_modules/react": { "version": "19.0.0", "resolved": "https://registry.npmjs.org/react/-/react-19.0.0.tgz", @@ -1586,9 +2445,9 @@ } }, "node_modules/react-router": { - "version": "7.1.4", - "resolved": "https://registry.npmjs.org/react-router/-/react-router-7.1.4.tgz", - "integrity": "sha512-aJWVrKoLI0nIK1lfbTU3d5al1ZEUiwtSus/xjYL8K5sv2hyPesiOIojHM7QnaNLVtroOB1McZsWk37fMQVoc6A==", + "version": "7.1.5", + "resolved": "https://registry.npmjs.org/react-router/-/react-router-7.1.5.tgz", + "integrity": "sha512-8BUF+hZEU4/z/JD201yK6S+UYhsf58bzYIDq2NS1iGpwxSXDu7F+DeGSkIXMFBuHZB21FSiCzEcUb18cQNdRkA==", "license": "MIT", "dependencies": { "@types/cookie": "^0.6.0", @@ -1610,12 +2469,12 @@ } }, "node_modules/react-router-dom": { - "version": "7.1.4", - "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-7.1.4.tgz", - "integrity": "sha512-p474cAeRKfPNp+9QtpdVEa025iWLIIIBhYCnjsSwFmZH3c5DBHOc7vB7zmL6lL747o0ArfrLblNTebtL6lt0lA==", + "version": "7.1.5", + "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-7.1.5.tgz", + "integrity": "sha512-/4f9+up0Qv92D3bB8iN5P1s3oHAepSGa9h5k6tpTFlixTTskJZwKGhJ6vRJ277tLD1zuaZTt95hyGWV1Z37csQ==", "license": "MIT", "dependencies": { - "react-router": "7.1.4" + "react-router": "7.1.5" }, "engines": { "node": ">=20.0.0" @@ -1625,6 +2484,61 @@ "react-dom": ">=18" } }, + "node_modules/read-cache": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/read-cache/-/read-cache-1.0.0.tgz", + "integrity": "sha512-Owdv/Ft7IjOgm/i0xvNDZ1LrRANRfew4b2prF3OWMQLxLfu3bS8FVhCsrSCMK4lR56Y9ya+AThoTpDCTxCmpRA==", + "dev": true, + "license": "MIT", + "dependencies": { + "pify": "^2.3.0" + } + }, + "node_modules/readdirp": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", + "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", + "dev": true, + "license": "MIT", + "dependencies": { + "picomatch": "^2.2.1" + }, + "engines": { + "node": ">=8.10.0" + } + }, + "node_modules/resolve": { + "version": "1.22.10", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.10.tgz", + "integrity": "sha512-NPRy+/ncIMeDlTAsuqwKIiferiawhefFJtkNSW0qZJEqMEb+qBt/77B/jGeeek+F0uOeN05CDa6HXbbIgtVX4w==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-core-module": "^2.16.0", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/reusify": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", + "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==", + "dev": true, + "license": "MIT", + "engines": { + "iojs": ">=1.0.0", + "node": ">=0.10.0" + } + }, "node_modules/rollup": { "version": "4.31.0", "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.31.0.tgz", @@ -1664,6 +2578,30 @@ "fsevents": "~2.3.2" } }, + "node_modules/run-parallel": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", + "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "queue-microtask": "^1.2.2" + } + }, "node_modules/scheduler": { "version": "0.25.0", "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.25.0.tgz", @@ -1686,6 +2624,42 @@ "integrity": "sha512-IOc8uWeOZgnb3ptbCURJWNjWUPcO3ZnTTdzsurqERrP6nPyv+paC55vJM0LpOlT2ne+Ix+9+CRG1MNLlyZ4GjQ==", "license": "MIT" }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, + "license": "MIT", + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/signal-exit": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", + "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, "node_modules/source-map-js": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", @@ -1696,13 +2670,227 @@ "node": ">=0.10.0" } }, - "node_modules/tailwindcss": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-4.0.1.tgz", - "integrity": "sha512-UK5Biiit/e+r3i0O223bisoS5+y7ZT1PM8Ojn0MxRHzXN1VPZ2KY6Lo6fhu1dOfCfyUAlK7Lt6wSxowRabATBw==", + "node_modules/string-width": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", + "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", + "dev": true, + "license": "MIT", + "dependencies": { + "eastasianwidth": "^0.2.0", + "emoji-regex": "^9.2.2", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/string-width-cjs": { + "name": "string-width", + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/string-width-cjs/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/string-width-cjs/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", "dev": true, "license": "MIT" }, + "node_modules/string-width-cjs/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", + "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/strip-ansi-cjs": { + "name": "strip-ansi", + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi-cjs/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/sucrase": { + "version": "3.35.0", + "resolved": "https://registry.npmjs.org/sucrase/-/sucrase-3.35.0.tgz", + "integrity": "sha512-8EbVDiu9iN/nESwxeSxDKe0dunta1GOlHufmSSXxMD2z2/tMZpDMpvXQGsc+ajGo8y2uYUmixaSRUc/QPoQ0GA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.2", + "commander": "^4.0.0", + "glob": "^10.3.10", + "lines-and-columns": "^1.1.6", + "mz": "^2.7.0", + "pirates": "^4.0.1", + "ts-interface-checker": "^0.1.9" + }, + "bin": { + "sucrase": "bin/sucrase", + "sucrase-node": "bin/sucrase-node" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/supports-preserve-symlinks-flag": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", + "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/tailwindcss": { + "version": "3.4.17", + "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.4.17.tgz", + "integrity": "sha512-w33E2aCvSDP0tW9RZuNXadXlkHXqFzSkQew/aIa2i/Sj8fThxwovwlXHSPXTbAHwEIhBFXAedUhP2tueAKP8Og==", + "dev": true, + "license": "MIT", + "dependencies": { + "@alloc/quick-lru": "^5.2.0", + "arg": "^5.0.2", + "chokidar": "^3.6.0", + "didyoumean": "^1.2.2", + "dlv": "^1.1.3", + "fast-glob": "^3.3.2", + "glob-parent": "^6.0.2", + "is-glob": "^4.0.3", + "jiti": "^1.21.6", + "lilconfig": "^3.1.3", + "micromatch": "^4.0.8", + "normalize-path": "^3.0.0", + "object-hash": "^3.0.0", + "picocolors": "^1.1.1", + "postcss": "^8.4.47", + "postcss-import": "^15.1.0", + "postcss-js": "^4.0.1", + "postcss-load-config": "^4.0.2", + "postcss-nested": "^6.2.0", + "postcss-selector-parser": "^6.1.2", + "resolve": "^1.22.8", + "sucrase": "^3.35.0" + }, + "bin": { + "tailwind": "lib/cli.js", + "tailwindcss": "lib/cli.js" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/thenify": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/thenify/-/thenify-3.3.1.tgz", + "integrity": "sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw==", + "dev": true, + "license": "MIT", + "dependencies": { + "any-promise": "^1.0.0" + } + }, + "node_modules/thenify-all": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/thenify-all/-/thenify-all-1.6.0.tgz", + "integrity": "sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA==", + "dev": true, + "license": "MIT", + "dependencies": { + "thenify": ">= 3.1.0 < 4" + }, + "engines": { + "node": ">=0.8" + } + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/ts-interface-checker": { + "version": "0.1.13", + "resolved": "https://registry.npmjs.org/ts-interface-checker/-/ts-interface-checker-0.1.13.tgz", + "integrity": "sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA==", + "dev": true, + "license": "Apache-2.0" + }, "node_modules/turbo-stream": { "version": "2.4.0", "resolved": "https://registry.npmjs.org/turbo-stream/-/turbo-stream-2.4.0.tgz", @@ -1761,6 +2949,13 @@ "browserslist": ">= 4.21.0" } }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", + "dev": true, + "license": "MIT" + }, "node_modules/vite": { "version": "6.0.11", "resolved": "https://registry.npmjs.org/vite/-/vite-6.0.11.tgz", @@ -1833,6 +3028,120 @@ } } }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "license": "ISC", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/wrap-ansi": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", + "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^6.1.0", + "string-width": "^5.0.1", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrap-ansi-cjs": { + "name": "wrap-ansi", + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrap-ansi-cjs/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/wrap-ansi-cjs/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/wrap-ansi-cjs/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true, + "license": "MIT" + }, + "node_modules/wrap-ansi-cjs/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/wrap-ansi-cjs/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, "node_modules/yallist": { "version": "3.1.1", "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", @@ -1846,8 +3155,6 @@ "integrity": "sha512-+hSoy/QHluxmC9kCIJyL/uyFmLmc+e5CFR5Wa+bpIhIj85LVb9ZH2nVnqrHoSvKogwODv0ClqZkmiSSaIH5LTA==", "dev": true, "license": "ISC", - "optional": true, - "peer": true, "bin": { "yaml": "bin.mjs" }, diff --git a/pkg/dataobj/explorer/ui/package.json b/pkg/dataobj/explorer/ui/package.json index 97f030622c5a3..6d395838764e9 100644 --- a/pkg/dataobj/explorer/ui/package.json +++ b/pkg/dataobj/explorer/ui/package.json @@ -21,7 +21,7 @@ "@vitejs/plugin-react": "^4.2.1", "autoprefixer": "^10.4.16", "postcss": "^8.4.32", - "tailwindcss": "^4.0.0", + "tailwindcss": "^3.4.0", "typescript": "^5.2.2", "vite": "^6.0.0" } diff --git a/pkg/dataobj/internal/sections/logs/table.go b/pkg/dataobj/internal/sections/logs/table.go index aea082be064c2..82658e967fa40 100644 --- a/pkg/dataobj/internal/sections/logs/table.go +++ b/pkg/dataobj/internal/sections/logs/table.go @@ -80,7 +80,6 @@ func (t *table) ReadPages(ctx context.Context, pages []dataset.Page) result.Seq[ return nil }) - } // Size returns the total size of the table in bytes. diff --git a/pkg/dataobj/internal/sections/streams/streams.go b/pkg/dataobj/internal/sections/streams/streams.go index 518807e2104ec..138de989cc3cd 100644 --- a/pkg/dataobj/internal/sections/streams/streams.go +++ b/pkg/dataobj/internal/sections/streams/streams.go @@ -6,6 +6,7 @@ import ( "errors" "fmt" "sort" + "sync" "time" "github.com/prometheus/client_golang/prometheus" @@ -16,6 +17,7 @@ import ( "github.com/grafana/loki/v3/pkg/dataobj/internal/encoding" "github.com/grafana/loki/v3/pkg/dataobj/internal/metadata/datasetmd" "github.com/grafana/loki/v3/pkg/dataobj/internal/metadata/streamsmd" + "github.com/grafana/loki/v3/pkg/dataobj/internal/result" "github.com/grafana/loki/v3/pkg/dataobj/internal/streamio" "github.com/grafana/loki/v3/pkg/dataobj/internal/util/sliceclear" ) @@ -33,6 +35,20 @@ type Stream struct { Rows int // Number of rows in the stream. } +func (s *Stream) Reset() { + s.ID = 0 + s.Labels = nil + s.MinTimestamp = time.Time{} + s.MaxTimestamp = time.Time{} + s.Rows = 0 +} + +var streamPool = sync.Pool{ + New: func() interface{} { + return &Stream{} + }, +} + // Streams tracks information about streams in a data object. type Streams struct { metrics *Metrics @@ -61,10 +77,26 @@ func New(metrics *Metrics, pageSize int) *Streams { return &Streams{ metrics: metrics, pageSize: pageSize, - lookup: make(map[uint64][]*Stream), + lookup: make(map[uint64][]*Stream, 1024), + ordered: make([]*Stream, 0, 1024), } } +func (s *Streams) Iter() result.Seq[Stream] { + return result.Iter(func(yield func(Stream) bool) error { + for _, stream := range s.ordered { + if !yield(*stream) { + return nil + } + } + return nil + }) +} + +func (s *Streams) GetBounds() (time.Time, time.Time) { + return s.globalMinTimestamp, s.globalMaxTimestamp +} + // Record a stream record within the Streams section. The provided timestamp is // used to track the minimum and maximum timestamp of a stream. The number of // calls to Record is used to track the number of rows for a stream. @@ -153,7 +185,11 @@ func (s *Streams) addStream(hash uint64, streamLabels labels.Labels) *Stream { s.currentLabelsSize += len(lbl.Value) } - newStream := &Stream{ID: s.lastID.Add(1), Labels: streamLabels} + newStream := streamPool.Get().(*Stream) + newStream.Reset() + newStream.ID = s.lastID.Add(1) + newStream.Labels = streamLabels + s.lookup[hash] = append(s.lookup[hash], newStream) s.ordered = append(s.ordered, newStream) s.metrics.streamCount.Inc() @@ -187,7 +223,6 @@ func (s *Streams) StreamID(streamLabels labels.Labels) int64 { func (s *Streams) EncodeTo(enc *encoding.Encoder) error { timer := prometheus.NewTimer(s.metrics.encodeSeconds) defer timer.ObserveDuration() - defer s.Reset() // TODO(rfratto): handle one section becoming too large. This can happen when // the number of columns is very wide. There are two approaches to handle @@ -335,6 +370,9 @@ func encodeColumn(enc *encoding.StreamsEncoder, columnType streamsmd.ColumnType, // Reset resets all state, allowing Streams to be reused. func (s *Streams) Reset() { s.lastID.Store(0) + for _, stream := range s.ordered { + streamPool.Put(stream) + } clear(s.lookup) s.ordered = sliceclear.Clear(s.ordered) s.currentLabelsSize = 0 diff --git a/pkg/dataobj/metastore/metastore.go b/pkg/dataobj/metastore/metastore.go new file mode 100644 index 0000000000000..08c0b00364c9a --- /dev/null +++ b/pkg/dataobj/metastore/metastore.go @@ -0,0 +1,175 @@ +package metastore + +import ( + "bytes" + "context" + "fmt" + "io" + "sync" + "time" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/grafana/dskit/backoff" + "github.com/prometheus/client_golang/prometheus" + "github.com/thanos-io/objstore" + + "github.com/grafana/loki/v3/pkg/dataobj" + "github.com/grafana/loki/v3/pkg/logproto" +) + +const ( + metastoreWindowSize = 12 * time.Hour +) + +var ( + // Define our own builder config because metastore objects are significantly smaller. + metastoreBuilderCfg = dataobj.BuilderConfig{ + SHAPrefixSize: 2, + TargetObjectSize: 32 * 1024 * 1024, + TargetPageSize: 4 * 1024 * 1024, + BufferSize: 32 * 1024 * 1024, // 8x page size + TargetSectionSize: 4 * 1024 * 1024, // object size / 8 + } +) + +type Manager struct { + metastoreBuilder *dataobj.Builder + tenantID string + metrics *metastoreMetrics + bucket objstore.Bucket + logger log.Logger + backoff *backoff.Backoff + + builderOnce sync.Once +} + +func NewMetastoreManager(bucket objstore.Bucket, tenantID string, logger log.Logger, reg prometheus.Registerer) (*Manager, error) { + metrics := newMetastoreMetrics() + if err := metrics.register(reg); err != nil { + return nil, err + } + + return &Manager{ + bucket: bucket, + metrics: metrics, + logger: logger, + tenantID: tenantID, + backoff: backoff.New(context.TODO(), backoff.Config{ + MinBackoff: 50 * time.Millisecond, + MaxBackoff: 10 * time.Second, + }), + builderOnce: sync.Once{}, + }, nil +} + +func (m *Manager) initBuilder() error { + var initErr error + m.builderOnce.Do(func() { + metastoreBuilder, err := dataobj.NewBuilder(metastoreBuilderCfg, m.bucket, m.tenantID) + if err != nil { + initErr = err + return + } + m.metastoreBuilder = metastoreBuilder + }) + return initErr +} + +func (m *Manager) UpdateMetastore(ctx context.Context, flushResult dataobj.FlushResult) error { + var err error + start := time.Now() + defer m.metrics.observeMetastoreProcessing(start) + + // Initialize builder if this is the first call for this partition + if err := m.initBuilder(); err != nil { + return err + } + + minTimestamp, maxTimestamp := flushResult.MinTimestamp, flushResult.MaxTimestamp + + // Work our way through the metastore objects window by window, updating & creating them as needed. + // Each one handles its own retries in order to keep making progress in the event of a failure. + minMetastoreWindow := minTimestamp.Truncate(metastoreWindowSize) + maxMetastoreWindow := maxTimestamp.Truncate(metastoreWindowSize) + for metastoreWindow := minMetastoreWindow; metastoreWindow.Compare(maxMetastoreWindow) <= 0; metastoreWindow = metastoreWindow.Add(metastoreWindowSize) { + metastorePath := fmt.Sprintf("tenant-%s/metastore/%s.store", m.tenantID, metastoreWindow.Format(time.RFC3339)) + m.backoff.Reset() + for m.backoff.Ongoing() { + err = m.bucket.GetAndReplace(ctx, metastorePath, func(existing io.Reader) (io.Reader, error) { + buf, err := io.ReadAll(existing) + if err != nil { + return nil, err + } + + m.metastoreBuilder.Reset() + + if len(buf) > 0 { + replayStart := time.Now() + object := dataobj.FromReaderAt(bytes.NewReader(buf), int64(len(buf))) + if err := m.readFromExisting(ctx, object); err != nil { + return nil, err + } + m.metrics.observeMetastoreReplay(replayStart) + } + + encodingStart := time.Now() + + ls := fmt.Sprintf("{__start__=\"%d\", __end__=\"%d\", __path__=\"%s\"}", minTimestamp.UnixNano(), maxTimestamp.UnixNano(), flushResult.Path) + err = m.metastoreBuilder.Append(logproto.Stream{ + Labels: ls, + Entries: []logproto.Entry{{Line: ""}}, + }) + if err != nil { + return nil, err + } + + newMetastore, err := m.metastoreBuilder.FlushToBuffer() + if err != nil { + return nil, err + } + m.metrics.observeMetastoreEncoding(encodingStart) + return newMetastore, nil + }) + if err == nil { + level.Info(m.logger).Log("msg", "successfully merged & updated metastore", "metastore", metastorePath) + break + } + level.Error(m.logger).Log("msg", "failed to get and replace metastore object", "err", err, "metastore", metastorePath) + m.metrics.incMetastoreWriteFailures() + m.backoff.Wait() + } + // Reset at the end too so we don't leave our memory hanging around between calls. + m.metastoreBuilder.Reset() + } + return err +} + +func (m *Manager) readFromExisting(ctx context.Context, object *dataobj.Object) error { + // Fetch sections + si, err := object.Metadata(ctx) + if err != nil { + return err + } + + // Read streams from existing metastore object and write them to the builder for the new object + streams := make([]dataobj.Stream, 100) + for i := 0; i < si.StreamsSections; i++ { + streamsReader := dataobj.NewStreamsReader(object, i) + for n, err := streamsReader.Read(ctx, streams); n > 0; n, err = streamsReader.Read(ctx, streams) { + if err != nil && err != io.EOF { + return err + } + for _, stream := range streams[:n] { + err = m.metastoreBuilder.Append(logproto.Stream{ + Labels: stream.Labels.String(), + Entries: []logproto.Entry{{Line: ""}}, + }) + if err != nil { + return err + } + } + } + } + return nil +} diff --git a/pkg/dataobj/metastore/metastore_test.go b/pkg/dataobj/metastore/metastore_test.go new file mode 100644 index 0000000000000..582882917b5e9 --- /dev/null +++ b/pkg/dataobj/metastore/metastore_test.go @@ -0,0 +1,106 @@ +package metastore + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" + + "github.com/grafana/dskit/backoff" + "github.com/thanos-io/objstore" + + "github.com/grafana/loki/v3/pkg/dataobj" +) + +func BenchmarkWriteMetastores(t *testing.B) { + ctx := context.Background() + bucket := objstore.NewInMemBucket() + tenantID := "test-tenant" + + m, err := NewMetastoreManager(bucket, tenantID, log.NewNopLogger(), prometheus.DefaultRegisterer) + require.NoError(t, err) + + // Set limits for the test + m.backoff = backoff.New(context.TODO(), backoff.Config{ + MinBackoff: 10 * time.Millisecond, + MaxBackoff: 100 * time.Millisecond, + MaxRetries: 3, + }) + + // Add test data spanning multiple metastore windows + now := time.Date(2025, 1, 1, 15, 0, 0, 0, time.UTC) + + flushResults := make([]dataobj.FlushResult, 1000) + for i := 0; i < 1000; i++ { + flushResults[i] = dataobj.FlushResult{ + Path: fmt.Sprintf("test-dataobj-path-%d", i), + MinTimestamp: now.Add(-1 * time.Hour).Add(time.Duration(i) * time.Millisecond), + MaxTimestamp: now, + } + } + + t.ResetTimer() + t.ReportAllocs() + for i := 0; i < t.N; i++ { + // Test writing metastores + err = m.UpdateMetastore(ctx, flushResults[i%len(flushResults)]) + require.NoError(t, err) + } + + require.Len(t, bucket.Objects(), 1) +} + +func TestWriteMetastores(t *testing.T) { + ctx := context.Background() + bucket := objstore.NewInMemBucket() + tenantID := "test-tenant" + + m, err := NewMetastoreManager(bucket, tenantID, log.NewNopLogger(), prometheus.DefaultRegisterer) + require.NoError(t, err) + + // Set limits for the test + m.backoff = backoff.New(context.TODO(), backoff.Config{ + MinBackoff: 10 * time.Millisecond, + MaxBackoff: 100 * time.Millisecond, + MaxRetries: 3, + }) + + // Add test data spanning multiple metastore windows + now := time.Date(2025, 1, 1, 15, 0, 0, 0, time.UTC) + + flushResult := dataobj.FlushResult{ + Path: "test-dataobj-path", + MinTimestamp: now.Add(-1 * time.Hour), + MaxTimestamp: now, + } + + require.Len(t, bucket.Objects(), 0) + + // Test writing metastores + err = m.UpdateMetastore(ctx, flushResult) + require.NoError(t, err) + + require.Len(t, bucket.Objects(), 1) + var originalSize int + for _, obj := range bucket.Objects() { + originalSize = len(obj) + } + + flushResult2 := dataobj.FlushResult{ + Path: "different-test-dataobj-path", + MinTimestamp: now.Add(-15 * time.Minute), + MaxTimestamp: now, + } + + err = m.UpdateMetastore(ctx, flushResult2) + require.NoError(t, err) + + require.Len(t, bucket.Objects(), 1) + for _, obj := range bucket.Objects() { + require.Greater(t, len(obj), originalSize) + } +} diff --git a/pkg/dataobj/metastore/metrics.go b/pkg/dataobj/metastore/metrics.go new file mode 100644 index 0000000000000..424f1e27cccee --- /dev/null +++ b/pkg/dataobj/metastore/metrics.go @@ -0,0 +1,102 @@ +package metastore + +import ( + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +type metastoreMetrics struct { + metastoreProcessingTime prometheus.Histogram + metastoreReplayTime prometheus.Histogram + metastoreEncodingTime prometheus.Histogram + metastoreWriteFailures prometheus.Counter +} + +func newMetastoreMetrics() *metastoreMetrics { + metrics := &metastoreMetrics{ + metastoreReplayTime: prometheus.NewHistogram(prometheus.HistogramOpts{ + Name: "loki_dataobj_consumer_metastore_replay_seconds", + Help: "Time taken to replay existing metastore data into the in-memory builder in seconds", + Buckets: prometheus.DefBuckets, + NativeHistogramBucketFactor: 1.1, + NativeHistogramMaxBucketNumber: 100, + NativeHistogramMinResetDuration: 0, + }), + metastoreEncodingTime: prometheus.NewHistogram(prometheus.HistogramOpts{ + Name: "loki_dataobj_consumer_metastore_encoding_seconds", + Help: "Time taken to add the new metadata & encode the new metastore data object in seconds", + Buckets: prometheus.DefBuckets, + NativeHistogramBucketFactor: 1.1, + NativeHistogramMaxBucketNumber: 100, + NativeHistogramMinResetDuration: 0, + }), + metastoreProcessingTime: prometheus.NewHistogram(prometheus.HistogramOpts{ + Name: "loki_dataobj_consumer_metastore_processing_seconds", + Help: "Total time taken to update all metastores for a flushed dataobj in seconds", + Buckets: prometheus.DefBuckets, + NativeHistogramBucketFactor: 1.1, + NativeHistogramMaxBucketNumber: 100, + NativeHistogramMinResetDuration: 0, + }), + metastoreWriteFailures: prometheus.NewCounter(prometheus.CounterOpts{ + Name: "loki_dataobj_consumer_metastore_write_failures_total", + Help: "Total number of metastore write failures", + }), + } + + return metrics +} + +func (p *metastoreMetrics) register(reg prometheus.Registerer) error { + collectors := []prometheus.Collector{ + p.metastoreReplayTime, + p.metastoreEncodingTime, + p.metastoreProcessingTime, + p.metastoreWriteFailures, + } + + for _, collector := range collectors { + if err := reg.Register(collector); err != nil { + if _, ok := err.(prometheus.AlreadyRegisteredError); !ok { + return err + } + } + } + return nil +} + +func (p *metastoreMetrics) unregister(reg prometheus.Registerer) { + collectors := []prometheus.Collector{ + p.metastoreReplayTime, + p.metastoreEncodingTime, + p.metastoreProcessingTime, + p.metastoreWriteFailures, + } + + for _, collector := range collectors { + reg.Unregister(collector) + } +} + +func (p *metastoreMetrics) incMetastoreWriteFailures() { + p.metastoreWriteFailures.Inc() +} + +func (p *metastoreMetrics) observeMetastoreReplay(recordTimestamp time.Time) { + if !recordTimestamp.IsZero() { // Only observe if timestamp is valid + p.metastoreReplayTime.Observe(time.Since(recordTimestamp).Seconds()) + } +} + +func (p *metastoreMetrics) observeMetastoreEncoding(recordTimestamp time.Time) { + if !recordTimestamp.IsZero() { // Only observe if timestamp is valid + p.metastoreEncodingTime.Observe(time.Since(recordTimestamp).Seconds()) + } +} + +func (p *metastoreMetrics) observeMetastoreProcessing(recordTimestamp time.Time) { + if !recordTimestamp.IsZero() { // Only observe if timestamp is valid + p.metastoreProcessingTime.Observe(time.Since(recordTimestamp).Seconds()) + } +} diff --git a/pkg/distributor/http.go b/pkg/distributor/http.go index 7337ce16209c4..1b0cee2a9c62a 100644 --- a/pkg/distributor/http.go +++ b/pkg/distributor/http.go @@ -1,6 +1,7 @@ package distributor import ( + "errors" "fmt" "net/http" "strings" @@ -42,16 +43,26 @@ func (d *Distributor) pushHandler(w http.ResponseWriter, r *http.Request, pushRe logPushRequestStreams := d.tenantConfigs.LogPushRequestStreams(tenantID) req, err := push.ParseRequest(logger, tenantID, r, d.tenantsRetention, d.validator.Limits, pushRequestParser, d.usageTracker, logPushRequestStreams) if err != nil { + if !errors.Is(err, push.ErrAllLogsFiltered) { + if d.tenantConfigs.LogPushRequest(tenantID) { + level.Debug(logger).Log( + "msg", "push request failed", + "code", http.StatusBadRequest, + "err", err, + ) + } + d.writeFailuresManager.Log(tenantID, fmt.Errorf("couldn't parse push request: %w", err)) + + errorWriter(w, err.Error(), http.StatusBadRequest, logger) + return + } + if d.tenantConfigs.LogPushRequest(tenantID) { level.Debug(logger).Log( - "msg", "push request failed", - "code", http.StatusBadRequest, - "err", err, + "msg", "successful push request filtered all lines", ) } - d.writeFailuresManager.Log(tenantID, fmt.Errorf("couldn't parse push request: %w", err)) - - errorWriter(w, err.Error(), http.StatusBadRequest, logger) + w.WriteHeader(http.StatusNoContent) return } diff --git a/pkg/distributor/http_test.go b/pkg/distributor/http_test.go index 8da8fc608fa98..7e1ee788994c4 100644 --- a/pkg/distributor/http_test.go +++ b/pkg/distributor/http_test.go @@ -63,27 +63,66 @@ func TestDistributorRingHandler(t *testing.T) { } func TestRequestParserWrapping(t *testing.T) { - limits := &validation.Limits{} - flagext.DefaultValues(limits) - limits.RejectOldSamples = false - distributors, _ := prepare(t, 1, 3, limits, nil) + t.Run("it calls the parser wrapper if there is one", func(t *testing.T) { + limits := &validation.Limits{} + flagext.DefaultValues(limits) + limits.RejectOldSamples = false + distributors, _ := prepare(t, 1, 3, limits, nil) - var called bool - distributors[0].RequestParserWrapper = func(requestParser push.RequestParser) push.RequestParser { - called = true - return requestParser - } + var called bool + distributors[0].RequestParserWrapper = func(requestParser push.RequestParser) push.RequestParser { + called = true + return requestParser + } + + ctx := user.InjectOrgID(context.Background(), "test-user") + req, err := http.NewRequestWithContext(ctx, http.MethodPost, "fake-path", nil) + require.NoError(t, err) + + rec := httptest.NewRecorder() + distributors[0].pushHandler(rec, req, newFakeParser().parseRequest, push.HTTPError) - ctx := user.InjectOrgID(context.Background(), "test-user") - req, err := http.NewRequestWithContext(ctx, http.MethodPost, "fake-path", nil) - require.NoError(t, err) + // unprocessable code because there are no streams in the request. + require.Equal(t, http.StatusUnprocessableEntity, rec.Code) + require.True(t, called) + }) + + t.Run("it returns 204 when the parser wrapper filteres all log lines", func(t *testing.T) { + limits := &validation.Limits{} + flagext.DefaultValues(limits) + limits.RejectOldSamples = false + distributors, _ := prepare(t, 1, 3, limits, nil) - distributors[0].pushHandler(httptest.NewRecorder(), req, stubParser, push.HTTPError) + var called bool + distributors[0].RequestParserWrapper = func(requestParser push.RequestParser) push.RequestParser { + called = true + return requestParser + } + + ctx := user.InjectOrgID(context.Background(), "test-user") + req, err := http.NewRequestWithContext(ctx, http.MethodPost, "fake-path", nil) + require.NoError(t, err) + + parser := newFakeParser() + parser.parseErr = push.ErrAllLogsFiltered + + rec := httptest.NewRecorder() + distributors[0].pushHandler(rec, req, parser.parseRequest, push.HTTPError) + + require.True(t, called) + require.Equal(t, http.StatusNoContent, rec.Code) + }) +} + +type fakeParser struct { + parseErr error +} - require.True(t, called) +func newFakeParser() *fakeParser { + return &fakeParser{} } -func stubParser( +func (p *fakeParser) parseRequest( _ string, _ *http.Request, _ push.TenantsRetention, @@ -92,5 +131,5 @@ func stubParser( _ bool, _ log.Logger, ) (*logproto.PushRequest, *push.Stats, error) { - return &logproto.PushRequest{}, &push.Stats{}, nil + return &logproto.PushRequest{}, &push.Stats{}, p.parseErr } diff --git a/pkg/kafka/partitionring/consumer/client.go b/pkg/kafka/partitionring/consumer/client.go index 2e218949f9094..8790f12441260 100644 --- a/pkg/kafka/partitionring/consumer/client.go +++ b/pkg/kafka/partitionring/consumer/client.go @@ -37,6 +37,7 @@ type Client struct { func NewGroupClient(kafkaCfg kafka.Config, partitionRing ring.PartitionRingReader, groupName string, metrics *kprom.Metrics, logger log.Logger, opts ...kgo.Opt) (*Client, error) { defaultOpts := []kgo.Opt{ kgo.ConsumerGroup(groupName), + kgo.ConsumeRegex(), kgo.ConsumeTopics(kafkaCfg.Topic), kgo.Balancers(NewCooperativeActiveStickyBalancer(partitionRing)), kgo.ConsumeResetOffset(kgo.NewOffset().AtStart()), diff --git a/pkg/loghttp/push/push.go b/pkg/loghttp/push/push.go index 759e21f293ede..37938fe2a8e89 100644 --- a/pkg/loghttp/push/push.go +++ b/pkg/loghttp/push/push.go @@ -11,17 +11,19 @@ import ( "time" "github.com/go-kit/log/level" + "github.com/pkg/errors" "github.com/grafana/loki/pkg/push" + "google.golang.org/grpc/codes" + grpcstatus "google.golang.org/grpc/status" + "github.com/dustin/go-humanize" "github.com/go-kit/log" "github.com/gogo/protobuf/proto" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/prometheus/model/labels" - "google.golang.org/grpc/codes" - grpcstatus "google.golang.org/grpc/status" "github.com/grafana/loki/v3/pkg/analytics" "github.com/grafana/loki/v3/pkg/loghttp" @@ -66,6 +68,8 @@ const ( AggregatedMetricLabel = "__aggregated_metric__" ) +var ErrAllLogsFiltered = errors.New("all logs lines filtered during parsing") + type TenantsRetention interface { RetentionPeriodFor(userID string, lbs labels.Labels) time.Duration } @@ -111,7 +115,7 @@ type Stats struct { func ParseRequest(logger log.Logger, userID string, r *http.Request, tenantsRetention TenantsRetention, limits Limits, pushRequestParser RequestParser, tracker UsageTracker, logPushRequestStreams bool) (*logproto.PushRequest, error) { req, pushStats, err := pushRequestParser(userID, r, tenantsRetention, limits, tracker, logPushRequestStreams, logger) - if err != nil { + if err != nil && !errors.Is(err, ErrAllLogsFiltered) { return nil, err } @@ -164,7 +168,7 @@ func ParseRequest(logger log.Logger, userID string, r *http.Request, tenantsRete logValues = append(logValues, pushStats.Extra...) level.Debug(logger).Log(logValues...) - return req, nil + return req, err } func ParseLokiRequest(userID string, r *http.Request, tenantsRetention TenantsRetention, limits Limits, tracker UsageTracker, logPushRequestStreams bool, logger log.Logger) (*logproto.PushRequest, *Stats, error) { diff --git a/pkg/logql/downstream.go b/pkg/logql/downstream.go index 78af117c80518..c52d7dea6a043 100644 --- a/pkg/logql/downstream.go +++ b/pkg/logql/downstream.go @@ -139,7 +139,12 @@ func (d DownstreamLogSelectorExpr) Pretty(level int) string { return s } -func (d DownstreamSampleExpr) Walk(f syntax.WalkFn) { f(d) } +func (d DownstreamSampleExpr) Walk(f syntax.WalkFn) { + f(d) + if d.SampleExpr != nil { + d.SampleExpr.Walk(f) + } +} var defaultMaxDepth = 4 @@ -173,7 +178,12 @@ func (c *ConcatSampleExpr) string(maxDepth int) string { func (c *ConcatSampleExpr) Walk(f syntax.WalkFn) { f(c) - f(c.next) + if c.SampleExpr != nil { + c.SampleExpr.Walk(f) + } + if c.next != nil { + c.next.Walk(f) + } } // ConcatSampleExpr has no LogQL repretenstation. It is expressed in in the @@ -271,7 +281,12 @@ func (e QuantileSketchEvalExpr) String() string { func (e *QuantileSketchEvalExpr) Walk(f syntax.WalkFn) { f(e) - e.quantileMergeExpr.Walk(f) + if e.SampleExpr != nil { + e.SampleExpr.Walk(f) + } + if e.quantileMergeExpr != nil { + e.quantileMergeExpr.Walk(f) + } } type QuantileSketchMergeExpr struct { @@ -297,6 +312,9 @@ func (e QuantileSketchMergeExpr) String() string { func (e *QuantileSketchMergeExpr) Walk(f syntax.WalkFn) { f(e) + if e.SampleExpr != nil { + e.SampleExpr.Walk(f) + } for _, d := range e.downstreams { d.Walk(f) } @@ -326,6 +344,9 @@ func (e MergeFirstOverTimeExpr) String() string { func (e *MergeFirstOverTimeExpr) Walk(f syntax.WalkFn) { f(e) + if e.SampleExpr != nil { + e.SampleExpr.Walk(f) + } for _, d := range e.downstreams { d.Walk(f) } @@ -355,6 +376,9 @@ func (e MergeLastOverTimeExpr) String() string { func (e *MergeLastOverTimeExpr) Walk(f syntax.WalkFn) { f(e) + if e.SampleExpr != nil { + e.SampleExpr.Walk(f) + } for _, d := range e.downstreams { d.Walk(f) } @@ -383,6 +407,9 @@ func (e CountMinSketchEvalExpr) String() string { func (e *CountMinSketchEvalExpr) Walk(f syntax.WalkFn) { f(e) + if e.SampleExpr != nil { + e.SampleExpr.Walk(f) + } for _, d := range e.downstreams { d.Walk(f) } diff --git a/pkg/logql/engine.go b/pkg/logql/engine.go index 3d7cb541bb9dc..c3561e75184fa 100644 --- a/pkg/logql/engine.go +++ b/pkg/logql/engine.go @@ -489,11 +489,10 @@ func (q *query) checkIntervalLimit(expr syntax.SampleExpr, limit time.Duration) var err error expr.Walk(func(e syntax.Expr) { switch e := e.(type) { - case *syntax.RangeAggregationExpr: - if e.Left == nil || e.Left.Interval <= limit { - return + case *syntax.LogRange: + if e.Interval > limit { + err = fmt.Errorf("%w: [%s] > [%s]", logqlmodel.ErrIntervalLimit, model.Duration(e.Interval), model.Duration(limit)) } - err = fmt.Errorf("%w: [%s] > [%s]", logqlmodel.ErrIntervalLimit, model.Duration(e.Left.Interval), model.Duration(limit)) } }) return err diff --git a/pkg/logql/engine_test.go b/pkg/logql/engine_test.go index 889d06344ddbe..8a13e246f6386 100644 --- a/pkg/logql/engine_test.go +++ b/pkg/logql/engine_test.go @@ -38,6 +38,45 @@ var ( ErrMockMultiple = util.MultiError{ErrMock, ErrMock} ) +func TestEngine_checkIntervalLimit(t *testing.T) { + q := &query{} + for _, tc := range []struct { + query string + expErr string + }{ + {query: `rate({app="foo"} [1m])`, expErr: ""}, + {query: `rate({app="foo"} [10m])`, expErr: ""}, + {query: `max(rate({app="foo"} [5m])) - max(rate({app="bar"} [10m]))`, expErr: ""}, + {query: `rate({app="foo"} [5m]) - rate({app="bar"} [15m])`, expErr: "[15m] > [10m]"}, + {query: `rate({app="foo"} [1h])`, expErr: "[1h] > [10m]"}, + {query: `sum(rate({app="foo"} [1h]))`, expErr: "[1h] > [10m]"}, + {query: `sum_over_time({app="foo"} |= "foo" | json | unwrap bar [1h])`, expErr: "[1h] > [10m]"}, + } { + for _, downstream := range []bool{true, false} { + t.Run(fmt.Sprintf("%v/downstream=%v", tc.query, downstream), func(t *testing.T) { + expr := syntax.MustParseExpr(tc.query).(syntax.SampleExpr) + if downstream { + // Simulate downstream expression + expr = &ConcatSampleExpr{ + DownstreamSampleExpr: DownstreamSampleExpr{ + shard: nil, + SampleExpr: expr, + }, + next: nil, + } + } + err := q.checkIntervalLimit(expr, 10*time.Minute) + if tc.expErr != "" { + require.ErrorContains(t, err, tc.expErr) + } else { + require.NoError(t, err) + } + }) + } + + } +} + func TestEngine_LogsRateUnwrap(t *testing.T) { t.Parallel() for _, test := range []struct { diff --git a/pkg/logql/syntax/ast.go b/pkg/logql/syntax/ast.go index 29cb5e548ddd6..33959098d8e24 100644 --- a/pkg/logql/syntax/ast.go +++ b/pkg/logql/syntax/ast.go @@ -341,16 +341,12 @@ func (e *PipelineExpr) Shardable(topLevel bool) bool { func (e *PipelineExpr) Walk(f WalkFn) { f(e) - if e.Left == nil { - return + if e.Left != nil { + e.Left.Walk(f) } - - xs := make([]Walkable, 0, len(e.MultiStages)+1) - xs = append(xs, e.Left) for _, p := range e.MultiStages { - xs = append(xs, p) + p.Walk(f) } - walkAll(f, xs...) } func (e *PipelineExpr) Accept(v RootVisitor) { v.VisitPipeline(e) } @@ -501,10 +497,12 @@ func (*LineFilterExpr) isStageExpr() {} func (e *LineFilterExpr) Walk(f WalkFn) { f(e) - if e.Left == nil { - return + if e.Left != nil { + e.Left.Walk(f) + } + if e.Or != nil { + e.Or.Walk(f) } - e.Left.Walk(f) } func (e *LineFilterExpr) Accept(v RootVisitor) { @@ -1153,10 +1151,9 @@ func (r *LogRange) Shardable(topLevel bool) bool { return r.Left.Shardable(topLe func (r *LogRange) Walk(f WalkFn) { f(r) - if r.Left == nil { - return + if r.Left != nil { + r.Left.Walk(f) } - r.Left.Walk(f) } func (r *LogRange) Accept(v RootVisitor) { @@ -1476,10 +1473,9 @@ func (e *RangeAggregationExpr) Shardable(topLevel bool) bool { func (e *RangeAggregationExpr) Walk(f WalkFn) { f(e) - if e.Left == nil { - return + if e.Left != nil { + e.Left.Walk(f) } - e.Left.Walk(f) } func (e *RangeAggregationExpr) Accept(v RootVisitor) { v.VisitRangeAggregation(e) } @@ -1686,10 +1682,9 @@ func (e *VectorAggregationExpr) Shardable(topLevel bool) bool { func (e *VectorAggregationExpr) Walk(f WalkFn) { f(e) - if e.Left == nil { - return + if e.Left != nil { + e.Left.Walk(f) } - e.Left.Walk(f) } func (e *VectorAggregationExpr) Accept(v RootVisitor) { v.VisitVectorAggregation(e) } @@ -1806,7 +1801,13 @@ func (e *BinOpExpr) Shardable(topLevel bool) bool { } func (e *BinOpExpr) Walk(f WalkFn) { - walkAll(f, e.SampleExpr, e.RHS) + f(e) + if e.SampleExpr != nil { + e.SampleExpr.Walk(f) + } + if e.RHS != nil { + e.RHS.Walk(f) + } } func (e *BinOpExpr) Accept(v RootVisitor) { v.VisitBinOp(e) } @@ -2235,10 +2236,9 @@ func (e *LabelReplaceExpr) Shardable(_ bool) bool { func (e *LabelReplaceExpr) Walk(f WalkFn) { f(e) - if e.Left == nil { - return + if e.Left != nil { + e.Left.Walk(f) } - e.Left.Walk(f) } func (e *LabelReplaceExpr) Accept(v RootVisitor) { v.VisitLabelReplace(e) } diff --git a/pkg/logql/syntax/walk.go b/pkg/logql/syntax/walk.go index 291ec8b31036f..c728c55c1a38d 100644 --- a/pkg/logql/syntax/walk.go +++ b/pkg/logql/syntax/walk.go @@ -2,12 +2,6 @@ package syntax type WalkFn = func(e Expr) -func walkAll(f WalkFn, xs ...Walkable) { - for _, x := range xs { - x.Walk(f) - } -} - type Walkable interface { Walk(f WalkFn) } diff --git a/pkg/logql/syntax/walk_test.go b/pkg/logql/syntax/walk_test.go index ee536e969471f..9f0a5015ed731 100644 --- a/pkg/logql/syntax/walk_test.go +++ b/pkg/logql/syntax/walk_test.go @@ -22,7 +22,7 @@ func Test_Walkable(t *testing.T) { { desc: "bin op query", expr: `(sum by(cluster)(rate({job="foo"} |= "bar" | logfmt | bazz="buzz"[5m])) / sum by(cluster)(rate({job="foo"} |= "bar" | logfmt | bazz="buzz"[5m])))`, - want: 16, + want: 17, }, } for _, test := range tests { @@ -79,8 +79,6 @@ func Test_AppendMatchers(t *testing.T) { switch me := e.(type) { case *MatchersExpr: me.AppendMatchers(test.matchers) - default: - // Do nothing } }) require.Equal(t, test.want, expr.String()) diff --git a/pkg/loki/common/common.go b/pkg/loki/common/common.go index cc280e19bd514..5055cb2f9bb75 100644 --- a/pkg/loki/common/common.go +++ b/pkg/loki/common/common.go @@ -80,7 +80,7 @@ type Storage struct { Hedging hedging.Config `yaml:"hedging"` COS ibmcloud.COSConfig `yaml:"cos"` CongestionControl congestion.Config `yaml:"congestion_control,omitempty"` - ObjectStore bucket.Config `yaml:"object_store" doc:"hidden"` + ObjectStore bucket.Config `yaml:"object_store"` } func (s *Storage) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { diff --git a/pkg/loki/loki.go b/pkg/loki/loki.go index d8c9be61f52f5..5dc070cb212f0 100644 --- a/pkg/loki/loki.go +++ b/pkg/loki/loki.go @@ -38,6 +38,7 @@ import ( "github.com/grafana/loki/v3/pkg/compactor" compactorclient "github.com/grafana/loki/v3/pkg/compactor/client" "github.com/grafana/loki/v3/pkg/compactor/deletion" + "github.com/grafana/loki/v3/pkg/dataobj/consumer" "github.com/grafana/loki/v3/pkg/dataobj/explorer" "github.com/grafana/loki/v3/pkg/distributor" "github.com/grafana/loki/v3/pkg/indexgateway" @@ -89,7 +90,7 @@ type Config struct { Frontend lokifrontend.Config `yaml:"frontend,omitempty"` QueryRange queryrange.Config `yaml:"query_range,omitempty"` Ruler ruler.Config `yaml:"ruler,omitempty"` - RulerStorage rulestore.Config `yaml:"ruler_storage,omitempty" doc:"hidden"` + RulerStorage rulestore.Config `yaml:"ruler_storage,omitempty"` IngesterClient ingester_client.Config `yaml:"ingester_client,omitempty"` Ingester ingester.Config `yaml:"ingester,omitempty"` BlockBuilder blockbuilder.Config `yaml:"block_builder,omitempty"` @@ -109,6 +110,7 @@ type Config struct { TableManager index.TableManagerConfig `yaml:"table_manager,omitempty"` MemberlistKV memberlist.KVConfig `yaml:"memberlist"` KafkaConfig kafka.Config `yaml:"kafka_config,omitempty" category:"experimental"` + DataObjConsumer consumer.Config `yaml:"dataobj_consumer,omitempty" category:"experimental"` DataObjExplorer explorer.Config `yaml:"dataobj_explorer,omitempty" category:"experimental"` RuntimeConfig runtimeconfig.Config `yaml:"runtime_config,omitempty"` @@ -192,6 +194,7 @@ func (c *Config) RegisterFlags(f *flag.FlagSet) { c.BlockBuilder.RegisterFlags(f) c.BlockScheduler.RegisterFlags(f) c.DataObjExplorer.RegisterFlags(f) + c.DataObjConsumer.RegisterFlags(f) } func (c *Config) registerServerFlagsWithChangedDefaultValues(fs *flag.FlagSet) { @@ -307,6 +310,9 @@ func (c *Config) Validate() error { if err := c.KafkaConfig.Validate(); err != nil { errs = append(errs, errors.Wrap(err, "CONFIG ERROR: invalid kafka_config config")) } + if err := c.DataObjConsumer.Validate(); err != nil { + errs = append(errs, errors.Wrap(err, "CONFIG ERROR: invalid dataobj_consumer config")) + } } if err := c.Distributor.Validate(); err != nil { errs = append(errs, errors.Wrap(err, "CONFIG ERROR: invalid distributor config")) @@ -390,6 +396,7 @@ type Loki struct { partitionRing *ring.PartitionInstanceRing blockBuilder *blockbuilder.BlockBuilder blockScheduler *blockscheduler.BlockScheduler + dataObjConsumer *consumer.Service ClientMetrics storage.ClientMetrics deleteClientMetrics *deletion.DeleteRequestClientMetrics @@ -707,6 +714,8 @@ func (t *Loki) setupModuleManager() error { mm.RegisterModule(BlockBuilder, t.initBlockBuilder) mm.RegisterModule(BlockScheduler, t.initBlockScheduler) mm.RegisterModule(DataObjExplorer, t.initDataObjExplorer) + mm.RegisterModule(DataObjConsumer, t.initDataObjConsumer) + mm.RegisterModule(All, nil) mm.RegisterModule(Read, nil) mm.RegisterModule(Write, nil) @@ -746,6 +755,7 @@ func (t *Loki) setupModuleManager() error { BlockBuilder: {PartitionRing, Store, Server}, BlockScheduler: {Server}, DataObjExplorer: {Server}, + DataObjConsumer: {PartitionRing, Server}, Read: {QueryFrontend, Querier}, Write: {Ingester, Distributor, PatternIngester}, diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go index 2bdddb33cc186..540b25efe9f59 100644 --- a/pkg/loki/modules.go +++ b/pkg/loki/modules.go @@ -51,6 +51,7 @@ import ( "github.com/grafana/loki/v3/pkg/compactor/client/grpc" "github.com/grafana/loki/v3/pkg/compactor/deletion" "github.com/grafana/loki/v3/pkg/compactor/generationnumber" + "github.com/grafana/loki/v3/pkg/dataobj/consumer" "github.com/grafana/loki/v3/pkg/dataobj/explorer" "github.com/grafana/loki/v3/pkg/distributor" "github.com/grafana/loki/v3/pkg/indexgateway" @@ -144,11 +145,11 @@ const ( BlockBuilder = "block-builder" BlockScheduler = "block-scheduler" DataObjExplorer = "dataobj-explorer" - - All = "all" - Read = "read" - Write = "write" - Backend = "backend" + DataObjConsumer = "dataobj-consumer" + All = "all" + Read = "read" + Write = "write" + Backend = "backend" ) const ( @@ -1905,6 +1906,34 @@ func (t *Loki) initDataObjExplorer() (services.Service, error) { return explorer, nil } +func (t *Loki) initDataObjConsumer() (services.Service, error) { + if !t.Cfg.Ingester.KafkaIngestion.Enabled { + return nil, nil + } + schema, err := t.Cfg.SchemaConfig.SchemaForTime(model.Now()) + if err != nil { + return nil, fmt.Errorf("failed to get schema for now: %w", err) + } + + store, err := bucket.NewClient(context.Background(), schema.ObjectType, t.Cfg.StorageConfig.ObjectStore.Config, "dataobj", util_log.Logger) + if err != nil { + return nil, err + } + level.Info(util_log.Logger).Log("msg", "initializing dataobj consumer", "instance", t.Cfg.Ingester.LifecyclerConfig.ID) + t.dataObjConsumer = consumer.New( + t.Cfg.KafkaConfig, + t.Cfg.DataObjConsumer, + t.Cfg.Distributor.TenantTopic.TopicPrefix, + store, + t.Cfg.Ingester.LifecyclerConfig.ID, + t.partitionRing, + prometheus.DefaultRegisterer, + util_log.Logger, + ) + + return t.dataObjConsumer, nil +} + func (t *Loki) deleteRequestsClient(clientType string, limits limiter.CombinedLimits) (deletion.DeleteRequestsClient, error) { if !t.supportIndexDeleteRequest() || !t.Cfg.CompactorConfig.RetentionEnabled { return deletion.NewNoOpDeleteRequestsStore(), nil diff --git a/pkg/querier/queryrange/roundtrip_test.go b/pkg/querier/queryrange/roundtrip_test.go index 8aa66e8419e61..bafe57096de98 100644 --- a/pkg/querier/queryrange/roundtrip_test.go +++ b/pkg/querier/queryrange/roundtrip_test.go @@ -1464,7 +1464,7 @@ func (f fakeLimits) MaxQueryLength(context.Context, string) time.Duration { } func (f fakeLimits) MaxQueryRange(context.Context, string) time.Duration { - return time.Second + return time.Hour } func (f fakeLimits) MaxQueryParallelism(context.Context, string) int { diff --git a/pkg/ruler/rulestore/config.go b/pkg/ruler/rulestore/config.go index 334e43de0917d..a4480bb112bf4 100644 --- a/pkg/ruler/rulestore/config.go +++ b/pkg/ruler/rulestore/config.go @@ -2,7 +2,9 @@ package rulestore import ( "flag" + "fmt" "reflect" + "strings" "github.com/grafana/dskit/flagext" @@ -23,7 +25,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { cfg.ExtraBackends = []string{local.Name} cfg.Local.RegisterFlagsWithPrefix(prefix, f) - f.StringVar(&cfg.Backend, prefix+"backend", "filesystem", "Backend storage to use. Supported backends are: s3, gcs, azure, swift, filesystem.") + f.StringVar(&cfg.Backend, prefix+"backend", "filesystem", fmt.Sprintf("Backend storage to use. Supported backends are: local, %s", strings.Join(bucket.SupportedBackends, ", "))) cfg.RegisterFlagsWithPrefix(prefix, f) } diff --git a/pkg/storage/bloom/v1/index.go b/pkg/storage/bloom/v1/index.go index 8be1a45d35c21..be87b5035f842 100644 --- a/pkg/storage/bloom/v1/index.go +++ b/pkg/storage/bloom/v1/index.go @@ -102,6 +102,7 @@ func (b *BlockIndex) NewSeriesPageDecoder(r io.ReadSeeker, header SeriesPageHead if err != nil { return nil, errors.Wrap(err, "getting decompressor") } + defer b.opts.Schema.DecompressorPool().PutReader(decompressor) decompressed := make([]byte, header.DecompressedLen) if _, err = io.ReadFull(decompressor, decompressed); err != nil { diff --git a/pkg/storage/bucket/prefixed_bucket_client.go b/pkg/storage/bucket/prefixed_bucket_client.go index c2d6cd0f422c6..e26edf80125c9 100644 --- a/pkg/storage/bucket/prefixed_bucket_client.go +++ b/pkg/storage/bucket/prefixed_bucket_client.go @@ -36,6 +36,11 @@ func (b *PrefixedBucketClient) Upload(ctx context.Context, name string, r io.Rea return } +// GetAndReplace is a helper function that gets an object from the bucket and replaces it with a new reader. +func (b *PrefixedBucketClient) GetAndReplace(ctx context.Context, name string, fn func(existing io.Reader) (io.Reader, error)) error { + return b.bucket.GetAndReplace(ctx, b.fullName(name), fn) +} + // Delete removes the object with the given name. func (b *PrefixedBucketClient) Delete(ctx context.Context, name string) error { return b.bucket.Delete(ctx, b.fullName(name)) diff --git a/pkg/storage/bucket/sse_bucket_client.go b/pkg/storage/bucket/sse_bucket_client.go index a613561c078b7..b6dd19fdbedd6 100644 --- a/pkg/storage/bucket/sse_bucket_client.go +++ b/pkg/storage/bucket/sse_bucket_client.go @@ -59,6 +59,10 @@ func (b *SSEBucketClient) Upload(ctx context.Context, name string, r io.Reader) return b.bucket.Upload(ctx, name, r) } +func (b *SSEBucketClient) GetAndReplace(ctx context.Context, name string, fn func(existing io.Reader) (io.Reader, error)) error { + return b.bucket.GetAndReplace(ctx, name, fn) +} + // Delete implements objstore.Bucket. func (b *SSEBucketClient) Delete(ctx context.Context, name string) error { return b.bucket.Delete(ctx, name) diff --git a/pkg/storage/factory.go b/pkg/storage/factory.go index e0477ab1e7acb..a80830de360b3 100644 --- a/pkg/storage/factory.go +++ b/pkg/storage/factory.go @@ -295,8 +295,8 @@ type Config struct { DisableBroadIndexQueries bool `yaml:"disable_broad_index_queries"` MaxParallelGetChunk int `yaml:"max_parallel_get_chunk"` - UseThanosObjstore bool `yaml:"use_thanos_objstore" doc:"hidden"` - ObjectStore bucket.ConfigWithNamedStores `yaml:"object_store" doc:"hidden"` + UseThanosObjstore bool `yaml:"use_thanos_objstore"` + ObjectStore bucket.ConfigWithNamedStores `yaml:"object_store"` MaxChunkBatchSize int `yaml:"max_chunk_batch_size"` BoltDBShipperConfig boltdb.IndexCfg `yaml:"boltdb_shipper" doc:"description=Configures storing index in an Object Store (GCS/S3/Azure/Swift/COS/Filesystem) in the form of boltdb files. Required fields only required when boltdb-shipper is defined in config."` diff --git a/production/docker/docker-compose.yaml b/production/docker/docker-compose.yaml index a27cccdaf8f83..613608b0906e8 100644 --- a/production/docker/docker-compose.yaml +++ b/production/docker/docker-compose.yaml @@ -24,7 +24,7 @@ services: - loki grafana: - image: grafana/grafana:11.5.0 + image: grafana/grafana:11.5.1 ports: - "3000:3000" environment: diff --git a/production/ksonnet/loki/config.libsonnet b/production/ksonnet/loki/config.libsonnet index fae999b8aded7..7c7ac4345ced7 100644 --- a/production/ksonnet/loki/config.libsonnet +++ b/production/ksonnet/loki/config.libsonnet @@ -86,6 +86,9 @@ topology_spread_max_skew: 1, }, + // Use thanos object store clients + use_thanos_objstore: false, + // GCS variables gcs_bucket_name: error 'must specify GCS bucket name', @@ -94,6 +97,7 @@ s3_secret_access_key: '', s3_address: error 'must specify s3_address', s3_bucket_name: error 'must specify s3_bucket_name', + s3_bucket_region: '', s3_path_style: false, // Azure variables @@ -104,28 +108,55 @@ // DNS Resolver dns_resolver: 'kube-dns.kube-system.svc.cluster.local', - client_configs: { - s3: { - s3forcepathstyle: $._config.s3_path_style, - } + ( - if $._config.s3_access_key != '' then { - s3: 's3://' + $._config.s3_access_key + ':' + $._config.s3_secret_access_key + '@' + $._config.s3_address + '/' + $._config.s3_bucket_name, - } else { - s3: 's3://' + $._config.s3_address + '/' + $._config.s3_bucket_name, - } - ), - gcs: { - bucket_name: $._config.gcs_bucket_name, - }, - azure: { - container_name: $._config.azure_container_name, - account_name: $._config.azure_account_name, - } + ( - if $._config.azure_account_key != '' then { - account_key: $._config.azure_account_key, - } else {} - ), - }, + object_store_config: + if $._config.storage_backend == 'gcs' then { + gcs: { + bucket_name: $._config.gcs_bucket_name, + }, + } else if $._config.storage_backend == 's3' then { + aws: { + s3forcepathstyle: $._config.s3_path_style, + } + ( + if $._config.s3_access_key != '' then { + s3: 's3://' + $._config.s3_access_key + ':' + $._config.s3_secret_access_key + '@' + $._config.s3_address + '/' + $._config.s3_bucket_name, + } else { + s3: 's3://' + $._config.s3_address + '/' + $._config.s3_bucket_name, + } + ), + } else if $._config.storage_backend == 'azure' then { + azure: { + container_name: $._config.azure_container_name, + account_name: $._config.azure_account_name, + } + ( + if $._config.azure_account_key != '' then { + account_key: $._config.azure_account_key, + } else {} + ), + } else {}, + + // thanos object store config + thanos_object_store_config: + if $._config.storage_backend == 'gcs' then { + gcs: $._config.object_store_config.gcs, + } else if $._config.storage_backend == 's3' then { + s3: { + bucket_name: $._config.s3_bucket_name, + endpoint: $._config.s3_address, + } + ( + if $._config.s3_access_key != '' && $._config.s3_secret_access_key != '' then { + access_key_id: $._config.s3_access_key, + secret_access_key: $._config.s3_secret_access_key, + } + else {} + ) + ( + if $._config.s3_bucket_region != '' then { + region: $._config.s3_bucket_region, + } + else {} + ), + } else if $._config.storage_backend == 'azure' then { + azure: $._config.object_store_config.azure, + } else {}, // December 11 is when we first launched to the public. // Assume we can ingest logs that are 5months old. @@ -282,19 +313,11 @@ consistent_hash: true, }, }, - } + ( - if $._config.storage_backend == 'gcs' then { - gcs: $._config.client_configs.gcs, - } else {} - ) + - ( - if $._config.storage_backend == 's3' then { - aws: $._config.client_configs.s3, - } else {} - ) + + } + $._config.object_store_config + ( - if $._config.storage_backend == 'azure' then { - azure: $._config.client_configs.azure, + if $._config.use_thanos_objstore then { + use_thanos_objstore: true, + object_store: $._config.thanos_object_store_config, } else {} ), @@ -364,6 +387,10 @@ }, } else {}, + ruler_storage: if $._config.ruler_enabled then { + backend: $._config.storage_backend, + } + $._config.thanos_object_store_config else {}, + }, }, diff --git a/production/loki-mixin-compiled-ssd/dashboards/loki_thanos_object_storage.json b/production/loki-mixin-compiled-ssd/dashboards/loki_thanos_object_storage.json new file mode 100644 index 0000000000000..d81f10f9dd3a6 --- /dev/null +++ b/production/loki-mixin-compiled-ssd/dashboards/loki_thanos_object_storage.json @@ -0,0 +1,776 @@ +{ + "annotations": { + "list": [ ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "links": [ + { + "asDropdown": true, + "icon": "external link", + "includeVars": true, + "keepTime": true, + "tags": [ + "loki" + ], + "targetBlank": false, + "title": "Loki Dashboards", + "type": "dashboards" + } + ], + "refresh": "10s", + "rows": [ + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "pointSize": 5, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + } + }, + "thresholds": { + "mode": "absolute", + "steps": [ ] + }, + "unit": "reqps" + }, + "overrides": [ ] + }, + "gridPos": { }, + "id": 1, + "links": [ ], + "options": { + "legend": { + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "expr": "sum by(operation) (rate(loki_objstore_bucket_operations_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval]))", + "format": "time_series", + "legendFormat": "{{operation}}", + "legendLink": null + } + ], + "title": "RPS / operation", + "type": "timeseries" + }, + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "pointSize": 5, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + } + }, + "thresholds": { + "mode": "absolute", + "steps": [ ] + }, + "unit": "reqps" + }, + "overrides": [ ] + }, + "gridPos": { }, + "id": 2, + "links": [ ], + "options": { + "legend": { + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "expr": "sum by(operation) (rate(loki_objstore_bucket_operation_failures_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) > 0", + "format": "time_series", + "legendFormat": "{{operation}}", + "legendLink": null + } + ], + "title": "Error rate / operation", + "type": "timeseries" + }, + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "pointSize": 5, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + } + }, + "thresholds": { + "mode": "absolute", + "steps": [ ] + }, + "unit": "reqps" + }, + "overrides": [ ] + }, + "gridPos": { }, + "id": 3, + "links": [ ], + "options": { + "legend": { + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "expr": "sum by (method, status_code) (rate(loki_objstore_bucket_transport_requests_total{cluster=\"$cluster\", namespace=~\"$namespace\", status_code!~\"2..\"}[$__rate_interval])) > 0", + "format": "time_series", + "legendFormat": "{{method}} - {{status_code}}", + "legendLink": null + } + ], + "title": "Transport error rate / method and status code", + "type": "timeseries" + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Operations", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "pointSize": 5, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + } + }, + "thresholds": { + "mode": "absolute", + "steps": [ ] + }, + "unit": "ms" + }, + "overrides": [ ] + }, + "gridPos": { }, + "id": 4, + "links": [ ], + "nullPointMode": "null as zero", + "options": { + "legend": { + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(loki_objstore_bucket_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"get\"}[$__rate_interval])) by (le)) * 1e3", + "format": "time_series", + "legendFormat": "99th Percentile", + "refId": "A" + }, + { + "expr": "histogram_quantile(0.50, sum(rate(loki_objstore_bucket_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"get\"}[$__rate_interval])) by (le)) * 1e3", + "format": "time_series", + "legendFormat": "50th Percentile", + "refId": "B" + }, + { + "expr": "sum(rate(loki_objstore_bucket_operation_duration_seconds_sum{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"get\"}[$__rate_interval])) * 1e3 / sum(rate(loki_objstore_bucket_operation_duration_seconds_count{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"get\"}[$__rate_interval]))", + "format": "time_series", + "legendFormat": "Average", + "refId": "C" + } + ], + "title": "Op: Get", + "type": "timeseries", + "yaxes": [ + { + "format": "ms", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "pointSize": 5, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + } + }, + "thresholds": { + "mode": "absolute", + "steps": [ ] + }, + "unit": "ms" + }, + "overrides": [ ] + }, + "gridPos": { }, + "id": 5, + "links": [ ], + "nullPointMode": "null as zero", + "options": { + "legend": { + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(loki_objstore_bucket_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"get_range\"}[$__rate_interval])) by (le)) * 1e3", + "format": "time_series", + "legendFormat": "99th Percentile", + "refId": "A" + }, + { + "expr": "histogram_quantile(0.50, sum(rate(loki_objstore_bucket_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"get_range\"}[$__rate_interval])) by (le)) * 1e3", + "format": "time_series", + "legendFormat": "50th Percentile", + "refId": "B" + }, + { + "expr": "sum(rate(loki_objstore_bucket_operation_duration_seconds_sum{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"get_range\"}[$__rate_interval])) * 1e3 / sum(rate(loki_objstore_bucket_operation_duration_seconds_count{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"get_range\"}[$__rate_interval]))", + "format": "time_series", + "legendFormat": "Average", + "refId": "C" + } + ], + "title": "Op: GetRange", + "type": "timeseries", + "yaxes": [ + { + "format": "ms", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "pointSize": 5, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + } + }, + "thresholds": { + "mode": "absolute", + "steps": [ ] + }, + "unit": "ms" + }, + "overrides": [ ] + }, + "gridPos": { }, + "id": 6, + "links": [ ], + "nullPointMode": "null as zero", + "options": { + "legend": { + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(loki_objstore_bucket_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"exists\"}[$__rate_interval])) by (le)) * 1e3", + "format": "time_series", + "legendFormat": "99th Percentile", + "refId": "A" + }, + { + "expr": "histogram_quantile(0.50, sum(rate(loki_objstore_bucket_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"exists\"}[$__rate_interval])) by (le)) * 1e3", + "format": "time_series", + "legendFormat": "50th Percentile", + "refId": "B" + }, + { + "expr": "sum(rate(loki_objstore_bucket_operation_duration_seconds_sum{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"exists\"}[$__rate_interval])) * 1e3 / sum(rate(loki_objstore_bucket_operation_duration_seconds_count{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"exists\"}[$__rate_interval]))", + "format": "time_series", + "legendFormat": "Average", + "refId": "C" + } + ], + "title": "Op: Exists", + "type": "timeseries", + "yaxes": [ + { + "format": "ms", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "pointSize": 5, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + } + }, + "thresholds": { + "mode": "absolute", + "steps": [ ] + }, + "unit": "ms" + }, + "overrides": [ ] + }, + "gridPos": { }, + "id": 7, + "links": [ ], + "nullPointMode": "null as zero", + "options": { + "legend": { + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(loki_objstore_bucket_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"attributes\"}[$__rate_interval])) by (le)) * 1e3", + "format": "time_series", + "legendFormat": "99th Percentile", + "refId": "A" + }, + { + "expr": "histogram_quantile(0.50, sum(rate(loki_objstore_bucket_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"attributes\"}[$__rate_interval])) by (le)) * 1e3", + "format": "time_series", + "legendFormat": "50th Percentile", + "refId": "B" + }, + { + "expr": "sum(rate(loki_objstore_bucket_operation_duration_seconds_sum{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"attributes\"}[$__rate_interval])) * 1e3 / sum(rate(loki_objstore_bucket_operation_duration_seconds_count{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"attributes\"}[$__rate_interval]))", + "format": "time_series", + "legendFormat": "Average", + "refId": "C" + } + ], + "title": "Op: Attributes", + "type": "timeseries", + "yaxes": [ + { + "format": "ms", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "pointSize": 5, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + } + }, + "thresholds": { + "mode": "absolute", + "steps": [ ] + }, + "unit": "ms" + }, + "overrides": [ ] + }, + "gridPos": { }, + "id": 8, + "links": [ ], + "nullPointMode": "null as zero", + "options": { + "legend": { + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(loki_objstore_bucket_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"upload\"}[$__rate_interval])) by (le)) * 1e3", + "format": "time_series", + "legendFormat": "99th Percentile", + "refId": "A" + }, + { + "expr": "histogram_quantile(0.50, sum(rate(loki_objstore_bucket_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"upload\"}[$__rate_interval])) by (le)) * 1e3", + "format": "time_series", + "legendFormat": "50th Percentile", + "refId": "B" + }, + { + "expr": "sum(rate(loki_objstore_bucket_operation_duration_seconds_sum{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"upload\"}[$__rate_interval])) * 1e3 / sum(rate(loki_objstore_bucket_operation_duration_seconds_count{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"upload\"}[$__rate_interval]))", + "format": "time_series", + "legendFormat": "Average", + "refId": "C" + } + ], + "title": "Op: Upload", + "type": "timeseries", + "yaxes": [ + { + "format": "ms", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "pointSize": 5, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + } + }, + "thresholds": { + "mode": "absolute", + "steps": [ ] + }, + "unit": "ms" + }, + "overrides": [ ] + }, + "gridPos": { }, + "id": 9, + "links": [ ], + "nullPointMode": "null as zero", + "options": { + "legend": { + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(loki_objstore_bucket_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"delete\"}[$__rate_interval])) by (le)) * 1e3", + "format": "time_series", + "legendFormat": "99th Percentile", + "refId": "A" + }, + { + "expr": "histogram_quantile(0.50, sum(rate(loki_objstore_bucket_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"delete\"}[$__rate_interval])) by (le)) * 1e3", + "format": "time_series", + "legendFormat": "50th Percentile", + "refId": "B" + }, + { + "expr": "sum(rate(loki_objstore_bucket_operation_duration_seconds_sum{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"delete\"}[$__rate_interval])) * 1e3 / sum(rate(loki_objstore_bucket_operation_duration_seconds_count{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"delete\"}[$__rate_interval]))", + "format": "time_series", + "legendFormat": "Average", + "refId": "C" + } + ], + "title": "Op: Delete", + "type": "timeseries", + "yaxes": [ + { + "format": "ms", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "", + "titleSize": "h6", + "type": "row" + } + ], + "schemaVersion": 14, + "style": "dark", + "tags": [ + "loki" + ], + "templating": { + "list": [ + { + "current": { + "text": "default", + "value": "default" + }, + "hide": 0, + "label": "Data source", + "name": "datasource", + "options": [ ], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": null, + "current": { + "text": "prod", + "value": "prod" + }, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": "cluster", + "multi": false, + "name": "cluster", + "options": [ ], + "query": "label_values(loki_build_info, cluster)", + "refresh": 1, + "regex": "", + "sort": 2, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + "text": "prod", + "value": "prod" + }, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": "namespace", + "multi": false, + "name": "namespace", + "options": [ ], + "query": "label_values(loki_build_info{cluster=~\"$cluster\"}, namespace)", + "refresh": 1, + "regex": "", + "sort": 2, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "utc", + "title": "Loki / Object Store Thanos", + "uid": "object-store", + "version": 0 + } \ No newline at end of file diff --git a/production/loki-mixin-compiled/dashboards/loki_thanos_object_storage.json b/production/loki-mixin-compiled/dashboards/loki_thanos_object_storage.json new file mode 100644 index 0000000000000..d81f10f9dd3a6 --- /dev/null +++ b/production/loki-mixin-compiled/dashboards/loki_thanos_object_storage.json @@ -0,0 +1,776 @@ +{ + "annotations": { + "list": [ ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "links": [ + { + "asDropdown": true, + "icon": "external link", + "includeVars": true, + "keepTime": true, + "tags": [ + "loki" + ], + "targetBlank": false, + "title": "Loki Dashboards", + "type": "dashboards" + } + ], + "refresh": "10s", + "rows": [ + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "pointSize": 5, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + } + }, + "thresholds": { + "mode": "absolute", + "steps": [ ] + }, + "unit": "reqps" + }, + "overrides": [ ] + }, + "gridPos": { }, + "id": 1, + "links": [ ], + "options": { + "legend": { + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "expr": "sum by(operation) (rate(loki_objstore_bucket_operations_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval]))", + "format": "time_series", + "legendFormat": "{{operation}}", + "legendLink": null + } + ], + "title": "RPS / operation", + "type": "timeseries" + }, + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "pointSize": 5, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + } + }, + "thresholds": { + "mode": "absolute", + "steps": [ ] + }, + "unit": "reqps" + }, + "overrides": [ ] + }, + "gridPos": { }, + "id": 2, + "links": [ ], + "options": { + "legend": { + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "expr": "sum by(operation) (rate(loki_objstore_bucket_operation_failures_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) > 0", + "format": "time_series", + "legendFormat": "{{operation}}", + "legendLink": null + } + ], + "title": "Error rate / operation", + "type": "timeseries" + }, + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "pointSize": 5, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + } + }, + "thresholds": { + "mode": "absolute", + "steps": [ ] + }, + "unit": "reqps" + }, + "overrides": [ ] + }, + "gridPos": { }, + "id": 3, + "links": [ ], + "options": { + "legend": { + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "expr": "sum by (method, status_code) (rate(loki_objstore_bucket_transport_requests_total{cluster=\"$cluster\", namespace=~\"$namespace\", status_code!~\"2..\"}[$__rate_interval])) > 0", + "format": "time_series", + "legendFormat": "{{method}} - {{status_code}}", + "legendLink": null + } + ], + "title": "Transport error rate / method and status code", + "type": "timeseries" + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Operations", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "pointSize": 5, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + } + }, + "thresholds": { + "mode": "absolute", + "steps": [ ] + }, + "unit": "ms" + }, + "overrides": [ ] + }, + "gridPos": { }, + "id": 4, + "links": [ ], + "nullPointMode": "null as zero", + "options": { + "legend": { + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(loki_objstore_bucket_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"get\"}[$__rate_interval])) by (le)) * 1e3", + "format": "time_series", + "legendFormat": "99th Percentile", + "refId": "A" + }, + { + "expr": "histogram_quantile(0.50, sum(rate(loki_objstore_bucket_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"get\"}[$__rate_interval])) by (le)) * 1e3", + "format": "time_series", + "legendFormat": "50th Percentile", + "refId": "B" + }, + { + "expr": "sum(rate(loki_objstore_bucket_operation_duration_seconds_sum{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"get\"}[$__rate_interval])) * 1e3 / sum(rate(loki_objstore_bucket_operation_duration_seconds_count{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"get\"}[$__rate_interval]))", + "format": "time_series", + "legendFormat": "Average", + "refId": "C" + } + ], + "title": "Op: Get", + "type": "timeseries", + "yaxes": [ + { + "format": "ms", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "pointSize": 5, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + } + }, + "thresholds": { + "mode": "absolute", + "steps": [ ] + }, + "unit": "ms" + }, + "overrides": [ ] + }, + "gridPos": { }, + "id": 5, + "links": [ ], + "nullPointMode": "null as zero", + "options": { + "legend": { + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(loki_objstore_bucket_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"get_range\"}[$__rate_interval])) by (le)) * 1e3", + "format": "time_series", + "legendFormat": "99th Percentile", + "refId": "A" + }, + { + "expr": "histogram_quantile(0.50, sum(rate(loki_objstore_bucket_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"get_range\"}[$__rate_interval])) by (le)) * 1e3", + "format": "time_series", + "legendFormat": "50th Percentile", + "refId": "B" + }, + { + "expr": "sum(rate(loki_objstore_bucket_operation_duration_seconds_sum{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"get_range\"}[$__rate_interval])) * 1e3 / sum(rate(loki_objstore_bucket_operation_duration_seconds_count{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"get_range\"}[$__rate_interval]))", + "format": "time_series", + "legendFormat": "Average", + "refId": "C" + } + ], + "title": "Op: GetRange", + "type": "timeseries", + "yaxes": [ + { + "format": "ms", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "pointSize": 5, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + } + }, + "thresholds": { + "mode": "absolute", + "steps": [ ] + }, + "unit": "ms" + }, + "overrides": [ ] + }, + "gridPos": { }, + "id": 6, + "links": [ ], + "nullPointMode": "null as zero", + "options": { + "legend": { + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(loki_objstore_bucket_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"exists\"}[$__rate_interval])) by (le)) * 1e3", + "format": "time_series", + "legendFormat": "99th Percentile", + "refId": "A" + }, + { + "expr": "histogram_quantile(0.50, sum(rate(loki_objstore_bucket_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"exists\"}[$__rate_interval])) by (le)) * 1e3", + "format": "time_series", + "legendFormat": "50th Percentile", + "refId": "B" + }, + { + "expr": "sum(rate(loki_objstore_bucket_operation_duration_seconds_sum{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"exists\"}[$__rate_interval])) * 1e3 / sum(rate(loki_objstore_bucket_operation_duration_seconds_count{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"exists\"}[$__rate_interval]))", + "format": "time_series", + "legendFormat": "Average", + "refId": "C" + } + ], + "title": "Op: Exists", + "type": "timeseries", + "yaxes": [ + { + "format": "ms", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "pointSize": 5, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + } + }, + "thresholds": { + "mode": "absolute", + "steps": [ ] + }, + "unit": "ms" + }, + "overrides": [ ] + }, + "gridPos": { }, + "id": 7, + "links": [ ], + "nullPointMode": "null as zero", + "options": { + "legend": { + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(loki_objstore_bucket_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"attributes\"}[$__rate_interval])) by (le)) * 1e3", + "format": "time_series", + "legendFormat": "99th Percentile", + "refId": "A" + }, + { + "expr": "histogram_quantile(0.50, sum(rate(loki_objstore_bucket_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"attributes\"}[$__rate_interval])) by (le)) * 1e3", + "format": "time_series", + "legendFormat": "50th Percentile", + "refId": "B" + }, + { + "expr": "sum(rate(loki_objstore_bucket_operation_duration_seconds_sum{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"attributes\"}[$__rate_interval])) * 1e3 / sum(rate(loki_objstore_bucket_operation_duration_seconds_count{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"attributes\"}[$__rate_interval]))", + "format": "time_series", + "legendFormat": "Average", + "refId": "C" + } + ], + "title": "Op: Attributes", + "type": "timeseries", + "yaxes": [ + { + "format": "ms", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "pointSize": 5, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + } + }, + "thresholds": { + "mode": "absolute", + "steps": [ ] + }, + "unit": "ms" + }, + "overrides": [ ] + }, + "gridPos": { }, + "id": 8, + "links": [ ], + "nullPointMode": "null as zero", + "options": { + "legend": { + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(loki_objstore_bucket_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"upload\"}[$__rate_interval])) by (le)) * 1e3", + "format": "time_series", + "legendFormat": "99th Percentile", + "refId": "A" + }, + { + "expr": "histogram_quantile(0.50, sum(rate(loki_objstore_bucket_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"upload\"}[$__rate_interval])) by (le)) * 1e3", + "format": "time_series", + "legendFormat": "50th Percentile", + "refId": "B" + }, + { + "expr": "sum(rate(loki_objstore_bucket_operation_duration_seconds_sum{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"upload\"}[$__rate_interval])) * 1e3 / sum(rate(loki_objstore_bucket_operation_duration_seconds_count{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"upload\"}[$__rate_interval]))", + "format": "time_series", + "legendFormat": "Average", + "refId": "C" + } + ], + "title": "Op: Upload", + "type": "timeseries", + "yaxes": [ + { + "format": "ms", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "pointSize": 5, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + } + }, + "thresholds": { + "mode": "absolute", + "steps": [ ] + }, + "unit": "ms" + }, + "overrides": [ ] + }, + "gridPos": { }, + "id": 9, + "links": [ ], + "nullPointMode": "null as zero", + "options": { + "legend": { + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(loki_objstore_bucket_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"delete\"}[$__rate_interval])) by (le)) * 1e3", + "format": "time_series", + "legendFormat": "99th Percentile", + "refId": "A" + }, + { + "expr": "histogram_quantile(0.50, sum(rate(loki_objstore_bucket_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"delete\"}[$__rate_interval])) by (le)) * 1e3", + "format": "time_series", + "legendFormat": "50th Percentile", + "refId": "B" + }, + { + "expr": "sum(rate(loki_objstore_bucket_operation_duration_seconds_sum{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"delete\"}[$__rate_interval])) * 1e3 / sum(rate(loki_objstore_bucket_operation_duration_seconds_count{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"delete\"}[$__rate_interval]))", + "format": "time_series", + "legendFormat": "Average", + "refId": "C" + } + ], + "title": "Op: Delete", + "type": "timeseries", + "yaxes": [ + { + "format": "ms", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "", + "titleSize": "h6", + "type": "row" + } + ], + "schemaVersion": 14, + "style": "dark", + "tags": [ + "loki" + ], + "templating": { + "list": [ + { + "current": { + "text": "default", + "value": "default" + }, + "hide": 0, + "label": "Data source", + "name": "datasource", + "options": [ ], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": null, + "current": { + "text": "prod", + "value": "prod" + }, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": "cluster", + "multi": false, + "name": "cluster", + "options": [ ], + "query": "label_values(loki_build_info, cluster)", + "refresh": 1, + "regex": "", + "sort": 2, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + "text": "prod", + "value": "prod" + }, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": "namespace", + "multi": false, + "name": "namespace", + "options": [ ], + "query": "label_values(loki_build_info{cluster=~\"$cluster\"}, namespace)", + "refresh": 1, + "regex": "", + "sort": 2, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "utc", + "title": "Loki / Object Store Thanos", + "uid": "object-store", + "version": 0 + } \ No newline at end of file diff --git a/production/loki-mixin/.lint b/production/loki-mixin/.lint index d8c2ddc8b956e..31c3b84367242 100644 --- a/production/loki-mixin/.lint +++ b/production/loki-mixin/.lint @@ -14,6 +14,7 @@ exclusions: - dashboard: "Loki / Writes" - dashboard: "Loki / Bloom Build" - dashboard: "Loki / Bloom Gateway" + - dashboard: "Loki / Object Store Thanos" template-datasource-rule: reason: "Based on new convention we are using variable names prometheus_datasource and loki_datasource where as linter expects 'datasource'" entries: @@ -29,6 +30,7 @@ exclusions: - dashboard: "Loki / Writes" - dashboard: "Loki / Bloom Build" - dashboard: "Loki / Bloom Gateway" + - dashboard: "Loki / Object Store Thanos" template-instance-rule: reason: "These dashboards are cluster overview dashboards, whereas the instance refers to specific pods or nodes" entries: @@ -44,6 +46,7 @@ exclusions: - dashboard: "Loki / Writes Resources" - dashboard: "Loki / Bloom Build" - dashboard: "Loki / Bloom Gateway" + - dashboard: "Loki / Object Store Thanos" target-instance-rule: reason: "These dashboards are cluster overview dashboards, whereas the instance refers to specific pods or nodes" entries: @@ -59,6 +62,7 @@ exclusions: - dashboard: "Loki / Writes" - dashboard: "Loki / Bloom Build" - dashboard: "Loki / Bloom Gateway" + - dashboard: "Loki / Object Store Thanos" target-job-rule: reason: "We don't have/need a job template selector for this dashboard" entries: @@ -74,6 +78,7 @@ exclusions: - dashboard: "Loki / Writes" - dashboard: "Loki / Bloom Build" - dashboard: "Loki / Bloom Gateway" + - dashboard: "Loki / Object Store Thanos" target-promql-rule: reason: "The following are logql queries, not promql" entries: diff --git a/production/loki-mixin/dashboards.libsonnet b/production/loki-mixin/dashboards.libsonnet index cb1b5d5161778..217be8487f2f5 100644 --- a/production/loki-mixin/dashboards.libsonnet +++ b/production/loki-mixin/dashboards.libsonnet @@ -11,4 +11,5 @@ (import 'dashboards/loki-canary-dashboard.libsonnet') + (import 'dashboards/recording-rules.libsonnet') + (import 'dashboards/loki-bloom-build.libsonnet') + -(import 'dashboards/loki-bloom-gateway.libsonnet') +(import 'dashboards/loki-bloom-gateway.libsonnet') + +(import 'dashboards/loki-object-store.libsonnet') diff --git a/production/loki-mixin/dashboards/loki-object-store.libsonnet b/production/loki-mixin/dashboards/loki-object-store.libsonnet new file mode 100644 index 0000000000000..e5de659828c5f --- /dev/null +++ b/production/loki-mixin/dashboards/loki-object-store.libsonnet @@ -0,0 +1,72 @@ +local grafana = import 'grafonnet/grafana.libsonnet'; +local row = grafana.row; + +{ + grafanaDashboards+:: { + local cluster_namespace_matcher = 'cluster="$cluster", namespace=~"$namespace"', + local dashboard = ( + (import 'dashboard-utils.libsonnet') + { + _config+:: $._config, + } + ), + 'loki_thanos_object_storage.json': + dashboard.dashboard('Loki / Object Store Thanos', uid='object-store') + .addCluster() + .addNamespace() + .addTag() + .addRow( + row.new('Operations') + .addPanel( + $.newQueryPanel('RPS / operation', 'reqps') + + $.queryPanel( + 'sum by(operation) (rate(loki_objstore_bucket_operations_total{%s}[$__rate_interval]))' % cluster_namespace_matcher, + '{{operation}}' + ) + ) + .addPanel( + $.newQueryPanel('Error rate / operation', 'reqps') + + $.queryPanel( + 'sum by(operation) (rate(loki_objstore_bucket_operation_failures_total{%s}[$__rate_interval])) > 0' % cluster_namespace_matcher, + '{{operation}}' + ) + ) + .addPanel( + $.newQueryPanel('Transport error rate / method and status code', 'reqps') + + $.queryPanel( + 'sum by (method, status_code) (rate(loki_objstore_bucket_transport_requests_total{%s, status_code!~"2.."}[$__rate_interval])) > 0' % cluster_namespace_matcher, + '{{method}} - {{status_code}}' + ) + ) + ) + .addRow( + row.new('') + .addPanel( + $.newQueryPanel('Op: Get', 'ms') + + $.latencyPanel('loki_objstore_bucket_operation_duration_seconds', '{%s,operation="get"}' % cluster_namespace_matcher) + ) + .addPanel( + $.newQueryPanel('Op: GetRange', 'ms') + + $.latencyPanel('loki_objstore_bucket_operation_duration_seconds', '{%s,operation="get_range"}' % cluster_namespace_matcher) + ) + .addPanel( + $.newQueryPanel('Op: Exists', 'ms') + + $.latencyPanel('loki_objstore_bucket_operation_duration_seconds', '{%s,operation="exists"}' % cluster_namespace_matcher) + ) + ) + .addRow( + row.new('') + .addPanel( + $.newQueryPanel('Op: Attributes', 'ms') + + $.latencyPanel('loki_objstore_bucket_operation_duration_seconds', '{%s,operation="attributes"}' % cluster_namespace_matcher) + ) + .addPanel( + $.newQueryPanel('Op: Upload', 'ms') + + $.latencyPanel('loki_objstore_bucket_operation_duration_seconds', '{%s,operation="upload"}' % cluster_namespace_matcher) + ) + .addPanel( + $.newQueryPanel('Op: Delete', 'ms') + + $.latencyPanel('loki_objstore_bucket_operation_duration_seconds', '{%s,operation="delete"}' % cluster_namespace_matcher) + ) + ), + }, +} diff --git a/production/terraform/modules/s3/versions.tf b/production/terraform/modules/s3/versions.tf index de0769f7a1f35..0d1740bd76ed7 100644 --- a/production/terraform/modules/s3/versions.tf +++ b/production/terraform/modules/s3/versions.tf @@ -2,7 +2,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "~> 5.84.0" + version = "~> 5.85.0" } random = { diff --git a/tools/bigtable-backup/requirements.txt b/tools/bigtable-backup/requirements.txt index c7bb1c11d1df1..2c55a2546056b 100644 --- a/tools/bigtable-backup/requirements.txt +++ b/tools/bigtable-backup/requirements.txt @@ -1,2 +1,2 @@ -e git://github.com/prometheus/client_python.git@a8f5c80f651ea570577c364203e0edbef67db727#egg=prometheus_client -pytz==2024.2 +pytz==2025.1 diff --git a/tools/doc-generator/parse/parser.go b/tools/doc-generator/parse/parser.go index f565bf2dc9c90..524950cb6ed9e 100644 --- a/tools/doc-generator/parse/parser.go +++ b/tools/doc-generator/parse/parser.go @@ -80,6 +80,7 @@ type ConfigEntry struct { Block *ConfigBlock BlockDesc string Root bool + Inline bool // In case the Kind is KindField FieldFlag string @@ -228,7 +229,25 @@ func config(block *ConfigBlock, cfg interface{}, flags map[uintptr]*flag.Flag, r blocks = append(blocks, subBlock) } } else { - subBlock = block + // For inline fields, we still want to add them to the root blocks list + if isRoot { + subBlock = &ConfigBlock{ + Name: rootName, + Desc: getFieldDescription(cfg, field, rootDesc), + } + blocks = append(blocks, subBlock) + + // Add a field entry that references the root block + block.Add(&ConfigEntry{ + Kind: KindBlock, + Block: subBlock, + BlockDesc: subBlock.Desc, + Root: true, + Inline: true, + }) + } else { + subBlock = block + } } if field.Type.Kind() == reflect.Ptr { diff --git a/tools/doc-generator/parse/root_blocks.go b/tools/doc-generator/parse/root_blocks.go index 12d8b44d2a7b6..b5c13e0109c76 100644 --- a/tools/doc-generator/parse/root_blocks.go +++ b/tools/doc-generator/parse/root_blocks.go @@ -30,11 +30,10 @@ import ( "github.com/grafana/loki/v3/pkg/querier/queryrange" querier_worker "github.com/grafana/loki/v3/pkg/querier/worker" "github.com/grafana/loki/v3/pkg/ruler" - "github.com/grafana/loki/v3/pkg/ruler/rulestore" "github.com/grafana/loki/v3/pkg/runtime" "github.com/grafana/loki/v3/pkg/scheduler" "github.com/grafana/loki/v3/pkg/storage" - "github.com/grafana/loki/v3/pkg/storage/bucket/gcs" + "github.com/grafana/loki/v3/pkg/storage/bucket" "github.com/grafana/loki/v3/pkg/storage/chunk/cache" "github.com/grafana/loki/v3/pkg/storage/chunk/client/alibaba" "github.com/grafana/loki/v3/pkg/storage/chunk/client/aws" @@ -298,15 +297,9 @@ Named store from this example can be used by setting object_store to store-1 in Desc: "Define actions for matching OpenTelemetry (OTEL) attributes.", }, { - Name: "gcs_storage_backend", - StructType: []reflect.Type{reflect.TypeOf(gcs.Config{})}, - Desc: "The gcs_storage_backend block configures the connection to Google Cloud Storage object storage backend.", - }, - { - Name: "ruler_storage_config", - StructType: []reflect.Type{reflect.TypeOf(rulestore.Config{})}, - Desc: `The ruler_storage_config configures ruler storage backend. -It uses thanos-io/objstore clients for connecting to object storage backends. This will become the default way of configuring object store clients in future releases. + Name: "thanos_object_store_config", + StructType: []reflect.Type{reflect.TypeOf(bucket.Config{})}, + Desc: `The thanos_object_store_config block configures the connection to object storage backend using thanos-io/objstore clients. This will become the default way of configuring object store clients in future releases. Currently this is opt-in and takes effect only when ` + "`-use-thanos-objstore` " + "is set to true.", }, } diff --git a/tools/doc-generator/writer.go b/tools/doc-generator/writer.go index 7a04c891ed2bc..58afa0ac2dc2f 100644 --- a/tools/doc-generator/writer.go +++ b/tools/doc-generator/writer.go @@ -52,7 +52,11 @@ func (w *specWriter) writeConfigEntry(e *parse.ConfigEntry, indent int) (written } // Block reference without entries, because it's a root block - w.out.WriteString(pad(indent) + "[" + e.Name + ": <" + e.Block.Name + ">]\n") + if e.Inline { + w.out.WriteString(pad(indent) + "[<" + e.Block.Name + ">]\n") + } else { + w.out.WriteString(pad(indent) + "[" + e.Name + ": <" + e.Block.Name + ">]\n") + } } else { // Description w.writeComment(e.BlockDesc, indent, 0) diff --git a/tools/gcplog/main.tf b/tools/gcplog/main.tf index 97ed5eddd5d5b..368f0865ed987 100644 --- a/tools/gcplog/main.tf +++ b/tools/gcplog/main.tf @@ -2,7 +2,7 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = "6.18.1" + version = "6.19.0" } } } diff --git a/tools/lambda-promtail/go.mod b/tools/lambda-promtail/go.mod index 46b39cf0feced..e28f71cc05f84 100644 --- a/tools/lambda-promtail/go.mod +++ b/tools/lambda-promtail/go.mod @@ -6,9 +6,9 @@ toolchain go1.23.5 require ( github.com/aws/aws-lambda-go v1.47.0 - github.com/aws/aws-sdk-go-v2 v1.35.0 - github.com/aws/aws-sdk-go-v2/config v1.29.3 - github.com/aws/aws-sdk-go-v2/service/s3 v1.75.1 + github.com/aws/aws-sdk-go-v2 v1.36.0 + github.com/aws/aws-sdk-go-v2/config v1.29.4 + github.com/aws/aws-sdk-go-v2/service/s3 v1.75.2 github.com/go-kit/log v0.2.1 github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.4 @@ -27,19 +27,19 @@ require ( github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 // indirect github.com/armon/go-metrics v0.4.1 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.17.56 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.26 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.30 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.30 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.57 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 // indirect - github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.30 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.31 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.4 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.11 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.11 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.24.13 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.12 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.33.11 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.5 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.12 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.12 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.24.14 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.33.12 // indirect github.com/aws/smithy-go v1.22.2 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/c2h5oh/datasize v0.0.0-20231215233829-aa82cc1e6500 // indirect diff --git a/tools/lambda-promtail/go.sum b/tools/lambda-promtail/go.sum index e77c7161fe843..39e2e96f5926d 100644 --- a/tools/lambda-promtail/go.sum +++ b/tools/lambda-promtail/go.sum @@ -48,40 +48,40 @@ github.com/aws/aws-lambda-go v1.47.0 h1:0H8s0vumYx/YKs4sE7YM0ktwL2eWse+kfopsRI1s github.com/aws/aws-lambda-go v1.47.0/go.mod h1:dpMpZgvWx5vuQJfBt0zqBha60q7Dd7RfgJv23DymV8A= github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU= github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= -github.com/aws/aws-sdk-go-v2 v1.35.0 h1:jTPxEJyzjSuuz0wB+302hr8Eu9KUI+Zv8zlujMGJpVI= -github.com/aws/aws-sdk-go-v2 v1.35.0/go.mod h1:JgstGg0JjWU1KpVJjD5H0y0yyAIpSdKEq556EI6yOOM= +github.com/aws/aws-sdk-go-v2 v1.36.0 h1:b1wM5CcE65Ujwn565qcwgtOTT1aT4ADOHHgglKjG7fk= +github.com/aws/aws-sdk-go-v2 v1.36.0/go.mod h1:5PMILGVKiW32oDzjj6RU52yrNrDPUHcbZQYr1sM7qmM= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8 h1:zAxi9p3wsZMIaVCdoiQp2uZ9k1LsZvmAnoTBeZPXom0= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8/go.mod h1:3XkePX5dSaxveLAYY7nsbsZZrKxCyEuE5pM4ziFxyGg= -github.com/aws/aws-sdk-go-v2/config v1.29.3 h1:a5Ucjxe6iV+LHEBmYA9w40rT5aGxWybx/4l/O/fvJlE= -github.com/aws/aws-sdk-go-v2/config v1.29.3/go.mod h1:pt9z1x12zDiDb4iFLrxoeAKLVCU/Gp9DL/5BnwlY77o= -github.com/aws/aws-sdk-go-v2/credentials v1.17.56 h1:JKMBreKudV+ozx6rZJLvEtiexv48aEdhdC7mXUw9MLs= -github.com/aws/aws-sdk-go-v2/credentials v1.17.56/go.mod h1:S3xRjIHD8HHFgMTz4L56q/7IldfNtGL9JjH/vP3U6DA= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.26 h1:XMBqBEuZLf8yxtH+mU/uUDyQbN4iD/xv9h6he2+lzhw= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.26/go.mod h1:d0+wQ/3CYGPuHEfBTPpQdfUX7gjk0/Lxs5Q6KzdEGY8= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.30 h1:+7AzSGNhHoY53di13lvztf9Dyd/9ofzoYGBllkWp3a0= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.30/go.mod h1:Jxd/FrCny99yURiQiMywgXvBhd7tmgdv6KdlUTNzMSo= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.30 h1:Ex06eY6I5rO7IX0HalGfa5nGjpBoOsS1Qm3xfjkuszs= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.30/go.mod h1:AvyEMA9QcX59kFhVizBpIBpEMThUTXssuJe+emBdcGM= +github.com/aws/aws-sdk-go-v2/config v1.29.4 h1:ObNqKsDYFGr2WxnoXKOhCvTlf3HhwtoGgc+KmZ4H5yg= +github.com/aws/aws-sdk-go-v2/config v1.29.4/go.mod h1:j2/AF7j/qxVmsNIChw1tWfsVKOayJoGRDjg1Tgq7NPk= +github.com/aws/aws-sdk-go-v2/credentials v1.17.57 h1:kFQDsbdBAR3GZsB8xA+51ptEnq9TIj3tS4MuP5b+TcQ= +github.com/aws/aws-sdk-go-v2/credentials v1.17.57/go.mod h1:2kerxPUUbTagAr/kkaHiqvj/bcYHzi2qiJS/ZinllU0= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27 h1:7lOW8NUwE9UZekS1DYoiPdVAqZ6A+LheHWb+mHbNOq8= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27/go.mod h1:w1BASFIPOPUae7AgaH4SbjNbfdkxuggLyGfNFTn8ITY= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31 h1:lWm9ucLSRFiI4dQQafLrEOmEDGry3Swrz0BIRdiHJqQ= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31/go.mod h1:Huu6GG0YTfbPphQkDSo4dEGmQRTKb9k9G7RdtyQWxuI= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31 h1:ACxDklUKKXb48+eg5ROZXi1vDgfMyfIA/WyvqHcHI0o= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31/go.mod h1:yadnfsDwqXeVaohbGc/RaD287PuyRw2wugkh5ZL2J6k= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 h1:Pg9URiobXy85kgFev3og2CuOZ8JZUBENF+dcgWBaYNk= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.30 h1:yQSv0NQ4CRHoki6AcV/Ldoa4/QCMJauZkF23qznBCPQ= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.30/go.mod h1:jH3z32wDrsducaYX26xnl41ksYFWqjHphIciwIANZkc= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.31 h1:8IwBjuLdqIO1dGB+dZ9zJEl8wzY3bVYxcs0Xyu/Lsc0= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.31/go.mod h1:8tMBcuVjL4kP/ECEIWTCWtwV2kj6+ouEKl4cqR4iWLw= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 h1:D4oz8/CzT9bAEYtVhSBmFj2dNOtaHOtMKc2vHBwYizA= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2/go.mod h1:Za3IHqTQ+yNcRHxu1OFucBh0ACZT4j4VQFF0BqpZcLY= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.4 h1:iwk7v5+lUtA0cIQcQM6EyCXtQJZ9MGIWWaf0JKud5UE= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.4/go.mod h1:o9mSr0x1NwImSmP9q38aTUhjYwcDm277YUURBjXcC2I= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.11 h1:5JKQ2J3BBW4ovy6A/5Lwx9SpA6IzgH8jB3bquGZ1NUw= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.11/go.mod h1:VShCk7rfCzK/b9U1aSkzLwcOoaDlYna16482QqEavis= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.11 h1:P8qJcYGVDswlMkVFhMi7SJmlf0jNA0JRbvE/q2PuXD8= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.11/go.mod h1:9yp5x5vYwyhnZZ9cKLBxZmrJTGv99C9iVmG7AKeUvdc= -github.com/aws/aws-sdk-go-v2/service/s3 v1.75.1 h1:hbTWOPUgAnPpk5+G1jZjYnq4eKCAePwRJEqLN1Tj7Bg= -github.com/aws/aws-sdk-go-v2/service/s3 v1.75.1/go.mod h1:Mo2xdnRzOyZQkGHEbhOgooG0eIV+GqS/g8LU4B5iftI= -github.com/aws/aws-sdk-go-v2/service/sso v1.24.13 h1:q4pOAKxypbFoUJzOpgo939bF50qb4DgYshiDfcsdN0M= -github.com/aws/aws-sdk-go-v2/service/sso v1.24.13/go.mod h1:G/0PTg7+vQT42ictQGjJhixzTcVZtHFvrN/OeTXrRfQ= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.12 h1:4sGSGshSSfO1vrcXruPick3ioSf8nhhD6nuB2ni37P4= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.12/go.mod h1:NHpu/pLOelViA4qxkAFH10VLqh+XeLhZfXDaFyMVgSs= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.11 h1:RIXOjp7Dp4siCYJRwBHUcBdVgOWflSJGlq4ZhMI5Ta0= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.11/go.mod h1:ZR17k9bPKPR8u0IkyA6xVsjr56doNQ4ZB1fs7abYBfE= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.5 h1:siiQ+jummya9OLPDEyHVb2dLW4aOMe22FGDd0sAfuSw= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.5/go.mod h1:iHVx2J9pWzITdP5MJY6qWfG34TfD9EA+Qi3eV6qQCXw= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.12 h1:O+8vD2rGjfihBewr5bT+QUfYUHIxCVgG61LHoT59shM= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.12/go.mod h1:usVdWJaosa66NMvmCrr08NcWDBRv4E6+YFG2pUdw1Lk= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.12 h1:tkVNm99nkJnFo1H9IIQb5QkCiPcvCDn3Pos+IeTbGRA= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.12/go.mod h1:dIVlquSPUMqEJtx2/W17SM2SuESRaVEhEV9alcMqxjw= +github.com/aws/aws-sdk-go-v2/service/s3 v1.75.2 h1:dyC+iA2+Yc7iDMDh0R4eT6fi8TgBduc+BOWCy6Br0/o= +github.com/aws/aws-sdk-go-v2/service/s3 v1.75.2/go.mod h1:FHSHmyEUkzRbaFFqqm6bkLAOQHgqhsLmfCahvCBMiyA= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.14 h1:c5WJ3iHz7rLIgArznb3JCSQT3uUMiz9DLZhIX+1G8ok= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.14/go.mod h1:+JJQTxB6N4niArC14YNtxcQtwEqzS3o9Z32n7q33Rfs= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13 h1:f1L/JtUkVODD+k1+IiSJUUv8A++2qVr+Xvb3xWXETMU= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13/go.mod h1:tvqlFoja8/s0o+UruA1Nrezo/df0PzdunMDDurUfg6U= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.12 h1:fqg6c1KVrc3SYWma/egWue5rKI4G2+M4wMQN2JosNAA= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.12/go.mod h1:7Yn+p66q/jt38qMoVfNvjbm3D89mGBnkwDcijgtih8w= github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ= github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps= diff --git a/vendor/cloud.google.com/go/.release-please-manifest-individual.json b/vendor/cloud.google.com/go/.release-please-manifest-individual.json index f7c0ab189278d..559307940020c 100644 --- a/vendor/cloud.google.com/go/.release-please-manifest-individual.json +++ b/vendor/cloud.google.com/go/.release-please-manifest-individual.json @@ -1,16 +1,16 @@ { - "auth": "0.13.0", - "auth/oauth2adapt": "0.2.6", - "bigquery": "1.65.0", - "bigtable": "1.33.0", + "auth": "0.14.1", + "auth/oauth2adapt": "0.2.7", + "bigquery": "1.66.0", + "bigtable": "1.35.0", "datastore": "1.20.0", "errorreporting": "0.3.2", - "firestore": "1.17.0", + "firestore": "1.18.0", "logging": "1.13.0", "profiler": "0.4.2", - "pubsub": "1.45.3", + "pubsub": "1.46.0", "pubsublite": "1.8.2", - "spanner": "1.73.0", - "storage": "1.49.0", + "spanner": "1.74.0", + "storage": "1.50.0", "vertexai": "0.13.3" } diff --git a/vendor/cloud.google.com/go/.release-please-manifest-submodules.json b/vendor/cloud.google.com/go/.release-please-manifest-submodules.json index 0c8cc178b7419..511ad2018e7f3 100644 --- a/vendor/cloud.google.com/go/.release-please-manifest-submodules.json +++ b/vendor/cloud.google.com/go/.release-please-manifest-submodules.json @@ -1,154 +1,155 @@ { - "accessapproval": "1.8.2", - "accesscontextmanager": "1.9.2", - "advisorynotifications": "1.5.2", - "ai": "0.9.0", - "aiplatform": "1.69.0", - "alloydb": "1.14.0", - "analytics": "0.25.2", - "apigateway": "1.7.2", - "apigeeconnect": "1.7.2", - "apigeeregistry": "0.9.2", - "apihub": "0.1.2", - "apikeys": "1.2.2", - "appengine": "1.9.2", - "apphub": "0.2.2", - "apps": "0.5.2", - "area120": "0.9.2", - "artifactregistry": "1.16.0", - "asset": "1.20.3", - "assuredworkloads": "1.12.2", - "automl": "1.14.3", - "backupdr": "1.2.1", - "baremetalsolution": "1.3.2", - "batch": "1.11.4", - "beyondcorp": "1.1.2", - "billing": "1.20.0", - "binaryauthorization": "1.9.2", - "certificatemanager": "1.9.2", - "channel": "1.19.1", - "chat": "0.9.0", - "cloudbuild": "1.19.1", - "cloudcontrolspartner": "1.2.1", - "clouddms": "1.8.2", - "cloudprofiler": "0.4.2", - "cloudquotas": "1.2.0", - "cloudtasks": "1.13.2", - "commerce": "1.2.1", - "compute": "1.31.0", + "accessapproval": "1.8.3", + "accesscontextmanager": "1.9.3", + "advisorynotifications": "1.5.3", + "ai": "0.10.0", + "aiplatform": "1.70.0", + "alloydb": "1.14.1", + "analytics": "0.25.3", + "apigateway": "1.7.3", + "apigeeconnect": "1.7.3", + "apigeeregistry": "0.9.3", + "apihub": "0.1.3", + "apikeys": "1.2.3", + "appengine": "1.9.3", + "apphub": "0.2.3", + "apps": "0.5.3", + "area120": "0.9.3", + "artifactregistry": "1.16.1", + "asset": "1.20.4", + "assuredworkloads": "1.12.3", + "automl": "1.14.4", + "backupdr": "1.3.0", + "baremetalsolution": "1.3.3", + "batch": "1.11.5", + "beyondcorp": "1.1.3", + "billing": "1.20.1", + "binaryauthorization": "1.9.3", + "certificatemanager": "1.9.3", + "channel": "1.19.2", + "chat": "0.9.1", + "cloudbuild": "1.20.0", + "cloudcontrolspartner": "1.2.2", + "clouddms": "1.8.3", + "cloudprofiler": "0.4.3", + "cloudquotas": "1.3.0", + "cloudtasks": "1.13.3", + "commerce": "1.2.2", + "compute": "1.31.1", "compute/metadata": "0.6.0", - "confidentialcomputing": "1.8.0", - "config": "1.2.0", - "contactcenterinsights": "1.17.0", - "container": "1.42.0", - "containeranalysis": "0.13.2", - "datacatalog": "1.24.1", - "dataflow": "0.10.2", - "dataform": "0.10.2", - "datafusion": "1.8.2", - "datalabeling": "0.9.2", - "dataplex": "1.20.0", - "dataproc": "2.10.0", - "dataqna": "0.9.2", - "datastream": "1.12.0", - "deploy": "1.26.0", - "developerconnect": "0.3.0", - "dialogflow": "1.64.0", - "discoveryengine": "1.16.0", - "dlp": "1.20.0", - "documentai": "1.35.0", - "domains": "0.10.2", - "edgecontainer": "1.4.0", - "edgenetwork": "1.2.2", - "essentialcontacts": "1.7.2", - "eventarc": "1.15.0", - "filestore": "1.9.2", - "functions": "1.19.2", - "gkebackup": "1.6.2", - "gkeconnect": "0.12.0", - "gkehub": "0.15.2", - "gkemulticloud": "1.4.1", - "grafeas": "0.3.12", - "gsuiteaddons": "1.7.2", - "iam": "1.3.0", - "iap": "1.10.2", - "identitytoolkit": "0.2.2", - "ids": "1.5.2", - "iot": "1.8.2", - "kms": "1.20.3", - "language": "1.14.2", - "lifesciences": "0.10.2", - "longrunning": "0.6.3", - "managedidentities": "1.7.2", - "managedkafka": "0.3.0", - "maps": "1.17.0", - "mediatranslation": "0.9.2", - "memcache": "1.11.2", - "memorystore": "0.1.0", - "metastore": "1.14.2", - "migrationcenter": "1.1.2", - "monitoring": "1.22.0", - "netapp": "1.5.0", - "networkconnectivity": "1.16.0", - "networkmanagement": "1.17.0", - "networksecurity": "0.10.2", - "networkservices": "0.2.2", - "notebooks": "1.12.2", - "optimization": "1.7.2", - "oracledatabase": "0.1.2", - "orchestration": "1.11.2", - "orgpolicy": "1.14.1", - "osconfig": "1.14.2", - "oslogin": "1.14.2", - "parallelstore": "0.9.0", - "phishingprotection": "0.9.2", - "policysimulator": "0.3.2", - "policytroubleshooter": "1.11.2", - "privatecatalog": "0.10.2", - "privilegedaccessmanager": "0.2.2", - "rapidmigrationassessment": "1.1.2", - "recaptchaenterprise": "2.19.1", - "recommendationengine": "0.9.2", - "recommender": "1.13.2", - "redis": "1.17.2", - "resourcemanager": "1.10.2", - "resourcesettings": "1.8.2", - "retail": "1.19.1", - "run": "1.8.0", - "scheduler": "1.11.2", - "secretmanager": "1.14.2", - "securesourcemanager": "1.3.0", - "security": "1.18.2", - "securitycenter": "1.35.2", - "securitycentermanagement": "1.1.2", - "securityposture": "0.2.2", - "servicecontrol": "1.14.2", - "servicedirectory": "1.12.2", - "servicehealth": "1.2.0", - "servicemanagement": "1.10.2", - "serviceusage": "1.9.2", - "shell": "1.8.2", - "shopping": "0.14.0", - "speech": "1.25.2", - "storageinsights": "1.1.2", - "storagetransfer": "1.12.0", - "streetview": "0.2.2", - "support": "1.1.2", - "talent": "1.7.2", - "telcoautomation": "1.1.2", - "texttospeech": "1.10.0", - "tpu": "1.7.2", - "trace": "1.11.2", - "translate": "1.12.2", - "video": "1.23.2", - "videointelligence": "1.12.2", - "vision": "2.9.2", - "visionai": "0.4.2", - "vmmigration": "1.8.2", - "vmwareengine": "1.3.2", - "vpcaccess": "1.8.2", - "webrisk": "1.10.2", - "websecurityscanner": "1.7.2", - "workflows": "1.13.2", - "workstations": "1.1.2" + "confidentialcomputing": "1.8.1", + "config": "1.3.0", + "contactcenterinsights": "1.17.1", + "container": "1.42.1", + "containeranalysis": "0.13.3", + "datacatalog": "1.24.3", + "dataflow": "0.10.3", + "dataform": "0.10.3", + "datafusion": "1.8.3", + "datalabeling": "0.9.3", + "dataplex": "1.21.0", + "dataproc": "2.10.1", + "dataqna": "0.9.3", + "datastream": "1.12.1", + "deploy": "1.26.1", + "developerconnect": "0.3.1", + "dialogflow": "1.64.1", + "discoveryengine": "1.16.1", + "dlp": "1.20.1", + "documentai": "1.35.1", + "domains": "0.10.3", + "edgecontainer": "1.4.1", + "edgenetwork": "1.2.3", + "essentialcontacts": "1.7.3", + "eventarc": "1.15.1", + "filestore": "1.9.3", + "functions": "1.19.3", + "gkebackup": "1.6.3", + "gkeconnect": "0.12.1", + "gkehub": "0.15.3", + "gkemulticloud": "1.5.1", + "grafeas": "0.3.13", + "gsuiteaddons": "1.7.3", + "iam": "1.3.1", + "iap": "1.10.3", + "identitytoolkit": "0.2.3", + "ids": "1.5.3", + "iot": "1.8.3", + "kms": "1.20.5", + "language": "1.14.3", + "lifesciences": "0.10.3", + "longrunning": "0.6.4", + "managedidentities": "1.7.3", + "managedkafka": "0.4.0", + "maps": "1.17.1", + "mediatranslation": "0.9.3", + "memcache": "1.11.3", + "memorystore": "0.1.1", + "metastore": "1.14.3", + "migrationcenter": "1.1.3", + "monitoring": "1.23.0", + "netapp": "1.6.0", + "networkconnectivity": "1.16.1", + "networkmanagement": "1.18.0", + "networksecurity": "0.10.3", + "networkservices": "0.2.3", + "notebooks": "1.12.3", + "optimization": "1.7.3", + "oracledatabase": "0.2.0", + "orchestration": "1.11.4", + "orgpolicy": "1.14.2", + "osconfig": "1.14.3", + "oslogin": "1.14.3", + "parallelstore": "0.9.2", + "parametermanager": "0.0.0", + "phishingprotection": "0.9.3", + "policysimulator": "0.3.3", + "policytroubleshooter": "1.11.3", + "privatecatalog": "0.10.4", + "privilegedaccessmanager": "0.2.3", + "rapidmigrationassessment": "1.1.3", + "recaptchaenterprise": "2.19.4", + "recommendationengine": "0.9.3", + "recommender": "1.13.3", + "redis": "1.17.3", + "resourcemanager": "1.10.3", + "resourcesettings": "1.8.3", + "retail": "1.19.2", + "run": "1.8.1", + "scheduler": "1.11.3", + "secretmanager": "1.14.3", + "securesourcemanager": "1.3.1", + "security": "1.18.3", + "securitycenter": "1.35.3", + "securitycentermanagement": "1.1.3", + "securityposture": "0.2.3", + "servicecontrol": "1.14.3", + "servicedirectory": "1.12.3", + "servicehealth": "1.2.1", + "servicemanagement": "1.10.3", + "serviceusage": "1.9.3", + "shell": "1.8.3", + "shopping": "0.16.0", + "speech": "1.26.0", + "storageinsights": "1.1.3", + "storagetransfer": "1.12.1", + "streetview": "0.2.3", + "support": "1.1.3", + "talent": "1.8.0", + "telcoautomation": "1.1.3", + "texttospeech": "1.11.0", + "tpu": "1.8.0", + "trace": "1.11.3", + "translate": "1.12.3", + "video": "1.23.3", + "videointelligence": "1.12.3", + "vision": "2.9.3", + "visionai": "0.4.3", + "vmmigration": "1.8.3", + "vmwareengine": "1.3.3", + "vpcaccess": "1.8.3", + "webrisk": "1.10.3", + "websecurityscanner": "1.7.3", + "workflows": "1.13.3", + "workstations": "1.1.3" } diff --git a/vendor/cloud.google.com/go/.release-please-manifest.json b/vendor/cloud.google.com/go/.release-please-manifest.json index 87c6277740c6f..fcbe01199ba23 100644 --- a/vendor/cloud.google.com/go/.release-please-manifest.json +++ b/vendor/cloud.google.com/go/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.118.0" + ".": "0.118.1" } diff --git a/vendor/cloud.google.com/go/CHANGES.md b/vendor/cloud.google.com/go/CHANGES.md index 74c920023e3bd..8bb3f7ce4cbf9 100644 --- a/vendor/cloud.google.com/go/CHANGES.md +++ b/vendor/cloud.google.com/go/CHANGES.md @@ -1,5 +1,13 @@ # Changes + +## [0.118.1](https://github.com/googleapis/google-cloud-go/compare/v0.118.0...v0.118.1) (2025-01-30) + + +### Bug Fixes + +* **main:** Remove OpenCensus dependency ([6243d91](https://github.com/googleapis/google-cloud-go/commit/6243d910b2bb502211d8308f9cc7723829d9f844)) + ## [0.118.0](https://github.com/googleapis/google-cloud-go/compare/v0.117.0...v0.118.0) (2025-01-02) diff --git a/vendor/cloud.google.com/go/README.md b/vendor/cloud.google.com/go/README.md index 7a6d74af1a3ff..f6f18c8b5736c 100644 --- a/vendor/cloud.google.com/go/README.md +++ b/vendor/cloud.google.com/go/README.md @@ -20,16 +20,12 @@ For an updated list of all of our released APIs please see our ## [Go Versions Supported](#supported-versions) -**Note:** As of Jan 1, 2025 the Cloud Client Libraries for Go will support the -two most-recent major Go releases -- the same [policy](https://go.dev/doc/devel/release#policy) -the Go programming language follows. +Our libraries are compatible with the two most recent major Go +releases, the same [policy](https://go.dev/doc/devel/release#policy) the Go +programming language follows. This means the currently supported versions are: -Our libraries are compatible with at least the three most recent, major Go -releases. They are currently compatible with: - -- Go 1.23 - Go 1.22 -- Go 1.21 +- Go 1.23 ## Authorization diff --git a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json index 0c23edb915fa5..b1a50e873881f 100644 --- a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json +++ b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json @@ -39,6 +39,16 @@ "release_level": "preview", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/ai/generativelanguage/apiv1alpha": { + "api_shortname": "generativelanguage", + "distribution_name": "cloud.google.com/go/ai/generativelanguage/apiv1alpha", + "description": "Generative Language API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/ai/latest/generativelanguage/apiv1alpha", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/ai/generativelanguage/apiv1beta": { "api_shortname": "generativelanguage", "distribution_name": "cloud.google.com/go/ai/generativelanguage/apiv1beta", @@ -779,6 +789,16 @@ "release_level": "stable", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/cloudquotas/apiv1beta": { + "api_shortname": "cloudquotas", + "distribution_name": "cloud.google.com/go/cloudquotas/apiv1beta", + "description": "Cloud Quotas API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/cloudquotas/latest/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/cloudtasks/apiv2": { "api_shortname": "cloudtasks", "distribution_name": "cloud.google.com/go/cloudtasks/apiv2", @@ -1402,7 +1422,7 @@ "cloud.google.com/go/gsuiteaddons/apiv1": { "api_shortname": "gsuiteaddons", "distribution_name": "cloud.google.com/go/gsuiteaddons/apiv1", - "description": "Google Workspace Add-ons API", + "description": "Google Workspace add-ons API", "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/gsuiteaddons/latest/apiv1", @@ -2019,6 +2039,16 @@ "release_level": "preview", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/parametermanager/apiv1": { + "api_shortname": "parametermanager", + "distribution_name": "cloud.google.com/go/parametermanager/apiv1", + "description": "Parameter Manager API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/parametermanager/latest/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/phishingprotection/apiv1beta1": { "api_shortname": "phishingprotection", "distribution_name": "cloud.google.com/go/phishingprotection/apiv1beta1", diff --git a/vendor/cloud.google.com/go/pubsub/CHANGES.md b/vendor/cloud.google.com/go/pubsub/CHANGES.md index b0f74906d9451..4eabbed79fe6f 100644 --- a/vendor/cloud.google.com/go/pubsub/CHANGES.md +++ b/vendor/cloud.google.com/go/pubsub/CHANGES.md @@ -1,5 +1,12 @@ # Changes +## [1.47.0](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.46.0...pubsub/v1.47.0) (2025-01-31) + + +### Features + +* **pubsub:** Support new forms of topic ingestion ([#11537](https://github.com/googleapis/google-cloud-go/issues/11537)) ([46d6ed4](https://github.com/googleapis/google-cloud-go/commit/46d6ed475e6ae6b96f3e11e17496fd75fd8ea7c4)) + ## [1.46.0](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.45.3...pubsub/v1.46.0) (2025-01-24) diff --git a/vendor/cloud.google.com/go/pubsub/internal/version.go b/vendor/cloud.google.com/go/pubsub/internal/version.go index fc6b11e22267c..f2754293858ec 100644 --- a/vendor/cloud.google.com/go/pubsub/internal/version.go +++ b/vendor/cloud.google.com/go/pubsub/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.46.0" +const Version = "1.47.0" diff --git a/vendor/cloud.google.com/go/pubsub/topic.go b/vendor/cloud.google.com/go/pubsub/topic.go index eb46c318282da..393ca0638f34c 100644 --- a/vendor/cloud.google.com/go/pubsub/topic.go +++ b/vendor/cloud.google.com/go/pubsub/topic.go @@ -592,6 +592,189 @@ func (i *IngestionDataSourceCloudStoragePubSubAvroFormat) isCloudStorageIngestio return true } +// EventHubsState denotes the possible states for ingestion from Event Hubs. +type EventHubsState int + +const ( + // EventHubsStateUnspecified is the default value. This value is unused. + EventHubsStateUnspecified = iota + + // EventHubsStateActive means the state is active. + EventHubsStateActive + + // EventHubsStatePermissionDenied indicates encountered permission denied error + // while consuming data from Event Hubs. + // This can happen when `client_id`, or `tenant_id` are invalid. Or the + // right permissions haven't been granted. + EventHubsStatePermissionDenied + + // EventHubsStatePublishPermissionDenied indicates permission denied encountered + // while publishing to the topic. + EventHubsStatePublishPermissionDenied + + // EventHubsStateNamespaceNotFound indicates the provided Event Hubs namespace couldn't be found. + EventHubsStateNamespaceNotFound + + // EventHubsStateNotFound indicates the provided Event Hub couldn't be found. + EventHubsStateNotFound + + // EventHubsStateSubscriptionNotFound indicates the provided Event Hubs subscription couldn't be found. + EventHubsStateSubscriptionNotFound + + // EventHubsStateResourceGroupNotFound indicates the provided Event Hubs resource group couldn't be found. + EventHubsStateResourceGroupNotFound +) + +// IngestionDataSourceAzureEventHubs are ingestion settings for Azure Event Hubs. +type IngestionDataSourceAzureEventHubs struct { + // Output only field that indicates the state of the Event Hubs ingestion source. + State EventHubsState + + // Name of the resource group within the Azure subscription + ResourceGroup string + + // Name of the Event Hubs namespace + Namespace string + + // Rame of the Event Hub. + EventHub string + + // Client ID of the Azure application that is being used to authenticate Pub/Sub. + ClientID string + + // Tenant ID of the Azure application that is being used to authenticate Pub/Sub. + TenantID string + + // The Azure subscription ID + SubscriptionID string + + // GCPServiceAccount is the GCP service account to be used for Federated Identity + // authentication. + GCPServiceAccount string +} + +var _ IngestionDataSource = (*IngestionDataSourceAzureEventHubs)(nil) + +func (i *IngestionDataSourceAzureEventHubs) isIngestionDataSource() bool { + return true +} + +// AmazonMSKState denotes the possible states for ingestion from Amazon MSK. +type AmazonMSKState int + +const ( + // AmazonMSKStateUnspecified is the default value. This value is unused. + AmazonMSKStateUnspecified = iota + + // AmazonMSKActive indicates MSK topic is active. + AmazonMSKActive + + // AmazonMSKPermissionDenied indicates permission denied encountered while consuming data from Amazon MSK. + AmazonMSKPermissionDenied + + // AmazonMSKPublishPermissionDenied indicates permission denied encountered while publishing to the topic. + AmazonMSKPublishPermissionDenied + + // AmazonMSKClusterNotFound indicates the provided Msk cluster wasn't found. + AmazonMSKClusterNotFound + + // AmazonMSKTopicNotFound indicates the provided topic wasn't found. + AmazonMSKTopicNotFound +) + +// IngestionDataSourceAmazonMSK are ingestion settings for Amazon MSK. +type IngestionDataSourceAmazonMSK struct { + // An output-only field that indicates the state of the Amazon + // MSK ingestion source. + State AmazonMSKState + + // The Amazon Resource Name (ARN) that uniquely identifies the + // cluster. + ClusterARN string + + // The name of the topic in the Amazon MSK cluster that Pub/Sub + // will import from. + Topic string + + // AWS role ARN to be used for Federated Identity authentication + // with Amazon MSK. Check the Pub/Sub docs for how to set up this role and + // the required permissions that need to be attached to it. + AWSRoleARN string + + // The GCP service account to be used for Federated Identity + // authentication with Amazon MSK (via a `AssumeRoleWithWebIdentity` call + // for the provided role). The `aws_role_arn` must be set up with + // `accounts.google.com:sub` equals to this service account number. + GCPServiceAccount string +} + +var _ IngestionDataSource = (*IngestionDataSourceAmazonMSK)(nil) + +func (i *IngestionDataSourceAmazonMSK) isIngestionDataSource() bool { + return true +} + +// ConfluentCloudState denotes state of ingestion topic with confluent cloud +type ConfluentCloudState int + +const ( + // ConfluentCloudStateUnspecified is the default value. This value is unused. + ConfluentCloudStateUnspecified = iota + + // ConfluentCloudActive indicates the state is active. + ConfluentCloudActive = 1 + + // ConfluentCloudPermissionDenied indicates permission denied encountered + // while consuming data from Confluent Cloud. + ConfluentCloudPermissionDenied = 2 + + // ConfluentCloudPublishPermissionDenied indicates permission denied encountered + // while publishing to the topic. + ConfluentCloudPublishPermissionDenied = 3 + + // ConfluentCloudUnreachableBootstrapServer indicates the provided bootstrap + // server address is unreachable. + ConfluentCloudUnreachableBootstrapServer = 4 + + // ConfluentCloudClusterNotFound indicates the provided cluster wasn't found. + ConfluentCloudClusterNotFound = 5 + + // ConfluentCloudTopicNotFound indicates the provided topic wasn't found. + ConfluentCloudTopicNotFound = 6 +) + +// IngestionDataSourceConfluentCloud are ingestion settings for confluent cloud. +type IngestionDataSourceConfluentCloud struct { + // An output-only field that indicates the state of the + // Confluent Cloud ingestion source. + State ConfluentCloudState + + // The address of the bootstrap server. The format is url:port. + BootstrapServer string + + // The id of the cluster. + ClusterID string + + // The name of the topic in the Confluent Cloud cluster that + // Pub/Sub will import from. + Topic string + + // The id of the identity pool to be used for Federated Identity + // authentication with Confluent Cloud. See + // https://docs.confluent.io/cloud/current/security/authenticate/workload-identities/identity-providers/oauth/identity-pools.html#add-oauth-identity-pools. + IdentityPoolID string + + // The GCP service account to be used for Federated Identity + // authentication with `identity_pool_id`. + GCPServiceAccount string +} + +var _ IngestionDataSource = (*IngestionDataSourceConfluentCloud)(nil) + +func (i *IngestionDataSourceConfluentCloud) isIngestionDataSource() bool { + return true +} + func protoToIngestionDataSourceSettings(pbs *pb.IngestionDataSourceSettings) *IngestionDataSourceSettings { if pbs == nil { return nil @@ -625,6 +808,34 @@ func protoToIngestionDataSourceSettings(pbs *pb.IngestionDataSourceSettings) *In MinimumObjectCreateTime: cs.GetMinimumObjectCreateTime().AsTime(), MatchGlob: cs.GetMatchGlob(), } + } else if e := pbs.GetAzureEventHubs(); e != nil { + s.Source = &IngestionDataSourceAzureEventHubs{ + State: EventHubsState(e.GetState()), + ResourceGroup: e.GetResourceGroup(), + Namespace: e.GetNamespace(), + EventHub: e.GetEventHub(), + ClientID: e.GetClientId(), + TenantID: e.GetTenantId(), + SubscriptionID: e.GetSubscriptionId(), + GCPServiceAccount: e.GetGcpServiceAccount(), + } + } else if m := pbs.GetAwsMsk(); m != nil { + s.Source = &IngestionDataSourceAmazonMSK{ + State: AmazonMSKState(m.GetState()), + ClusterARN: m.GetClusterArn(), + Topic: m.GetTopic(), + AWSRoleARN: m.GetAwsRoleArn(), + GCPServiceAccount: m.GetGcpServiceAccount(), + } + } else if c := pbs.GetConfluentCloud(); c != nil { + s.Source = &IngestionDataSourceConfluentCloud{ + State: ConfluentCloudState(c.GetState()), + BootstrapServer: c.GetBootstrapServer(), + ClusterID: c.GetClusterId(), + Topic: c.GetTopic(), + IdentityPoolID: c.GetIdentityPoolId(), + GCPServiceAccount: c.GetGcpServiceAccount(), + } } if pbs.PlatformLogsSettings != nil { @@ -681,7 +892,6 @@ func (i *IngestionDataSourceSettings) toProto() *pb.IngestionDataSourceSettings case *IngestionDataSourceCloudStorageAvroFormat: pbs.Source = &pb.IngestionDataSourceSettings_CloudStorage_{ CloudStorage: &pb.IngestionDataSourceSettings_CloudStorage{ - State: pb.IngestionDataSourceSettings_CloudStorage_State(cs.State), Bucket: cs.Bucket, InputFormat: &pb.IngestionDataSourceSettings_CloudStorage_AvroFormat_{ AvroFormat: &pb.IngestionDataSourceSettings_CloudStorage_AvroFormat{}, @@ -704,6 +914,40 @@ func (i *IngestionDataSourceSettings) toProto() *pb.IngestionDataSourceSettings } } } + if e, ok := out.(*IngestionDataSourceAzureEventHubs); ok { + pbs.Source = &pb.IngestionDataSourceSettings_AzureEventHubs_{ + AzureEventHubs: &pb.IngestionDataSourceSettings_AzureEventHubs{ + ResourceGroup: e.ResourceGroup, + Namespace: e.Namespace, + EventHub: e.EventHub, + ClientId: e.ClientID, + TenantId: e.TenantID, + SubscriptionId: e.SubscriptionID, + GcpServiceAccount: e.GCPServiceAccount, + }, + } + } + if m, ok := out.(*IngestionDataSourceAmazonMSK); ok { + pbs.Source = &pb.IngestionDataSourceSettings_AwsMsk_{ + AwsMsk: &pb.IngestionDataSourceSettings_AwsMsk{ + ClusterArn: m.ClusterARN, + Topic: m.Topic, + AwsRoleArn: m.AWSRoleARN, + GcpServiceAccount: m.GCPServiceAccount, + }, + } + } + if c, ok := out.(*IngestionDataSourceConfluentCloud); ok { + pbs.Source = &pb.IngestionDataSourceSettings_ConfluentCloud_{ + ConfluentCloud: &pb.IngestionDataSourceSettings_ConfluentCloud{ + BootstrapServer: c.BootstrapServer, + ClusterId: c.ClusterID, + Topic: c.Topic, + IdentityPoolId: c.IdentityPoolID, + GcpServiceAccount: c.GCPServiceAccount, + }, + } + } } return pbs } diff --git a/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json b/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json index f2029f249bb50..a0c4aacc86b6a 100644 --- a/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json +++ b/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json @@ -12,6 +12,12 @@ "advisorynotifications": { "component": "advisorynotifications" }, + "ai": { + "component": "ai" + }, + "aiplatform": { + "component": "aiplatform" + }, "alloydb": { "component": "alloydb" }, @@ -303,6 +309,9 @@ "parallelstore": { "component": "parallelstore" }, + "parametermanager": { + "component": "parametermanager" + }, "phishingprotection": { "component": "phishingprotection" }, diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_libinit.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_libinit.go index e5cb46be4557c..d229d842b99c3 100644 --- a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_libinit.go +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_libinit.go @@ -50,9 +50,11 @@ func _cgo_try_pthread_create(thread *pthread_t, attr *pthread_attr_t, pfn unsafe var err int for tries = 0; tries < 20; tries++ { - err = int(pthread_create(thread, attr, pfn, unsafe.Pointer(arg))) + // inlined this call because it ran out of stack when inlining was disabled + err = int(call5(pthread_createABI0, uintptr(unsafe.Pointer(thread)), uintptr(unsafe.Pointer(attr)), uintptr(pfn), uintptr(unsafe.Pointer(arg)), 0)) if err == 0 { - pthread_detach(*thread) + // inlined this call because it ran out of stack when inlining was disabled + call5(pthread_detachABI0, uintptr(*thread), 0, 0, 0, 0) return 0 } if err != int(syscall.EAGAIN) { @@ -60,7 +62,8 @@ func _cgo_try_pthread_create(thread *pthread_t, attr *pthread_attr_t, pfn unsafe } ts.Sec = 0 ts.Nsec = (tries + 1) * 1000 * 1000 // Milliseconds. - nanosleep(&ts, nil) + // inlined this call because it ran out of stack when inlining was disabled + call5(nanosleepABI0, uintptr(unsafe.Pointer(&ts)), 0, 0, 0, 0) } return int(syscall.EAGAIN) } diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo.go index 74626c64a0e9a..38f94419397d8 100644 --- a/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo.go +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo.go @@ -6,7 +6,11 @@ package fakecgo type ( - size_t uintptr + size_t uintptr + // Sources: + // Darwin (32 bytes) - https://github.com/apple/darwin-xnu/blob/2ff845c2e033bd0ff64b5b6aa6063a1f8f65aa32/bsd/sys/_types.h#L74 + // FreeBSD (32 bytes) - https://github.com/DoctorWkt/xv6-freebsd/blob/d2a294c2a984baed27676068b15ed9a29b06ab6f/include/signal.h#L98C9-L98C21 + // Linux (128 bytes) - https://github.com/torvalds/linux/blob/ab75170520d4964f3acf8bb1f91d34cbc650688e/arch/x86/include/asm/signal.h#L25 sigset_t [128]byte pthread_attr_t [64]byte pthread_t int diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols.go index 3d19fd822a73e..7a3a1bbb4714a 100644 --- a/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols.go +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols.go @@ -18,84 +18,104 @@ func setg_trampoline(setg uintptr, G uintptr) // call5 takes fn the C function and 5 arguments and calls the function with those arguments func call5(fn, a1, a2, a3, a4, a5 uintptr) uintptr +//go:nosplit func malloc(size uintptr) unsafe.Pointer { ret := call5(mallocABI0, uintptr(size), 0, 0, 0, 0) // this indirection is to avoid go vet complaining about possible misuse of unsafe.Pointer return *(*unsafe.Pointer)(unsafe.Pointer(&ret)) } +//go:nosplit func free(ptr unsafe.Pointer) { call5(freeABI0, uintptr(ptr), 0, 0, 0, 0) } +//go:nosplit func setenv(name *byte, value *byte, overwrite int32) int32 { return int32(call5(setenvABI0, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)), uintptr(overwrite), 0, 0)) } +//go:nosplit func unsetenv(name *byte) int32 { return int32(call5(unsetenvABI0, uintptr(unsafe.Pointer(name)), 0, 0, 0, 0)) } +//go:nosplit func sigfillset(set *sigset_t) int32 { return int32(call5(sigfillsetABI0, uintptr(unsafe.Pointer(set)), 0, 0, 0, 0)) } +//go:nosplit func nanosleep(ts *syscall.Timespec, rem *syscall.Timespec) int32 { return int32(call5(nanosleepABI0, uintptr(unsafe.Pointer(ts)), uintptr(unsafe.Pointer(rem)), 0, 0, 0)) } +//go:nosplit func abort() { call5(abortABI0, 0, 0, 0, 0, 0) } +//go:nosplit func pthread_attr_init(attr *pthread_attr_t) int32 { return int32(call5(pthread_attr_initABI0, uintptr(unsafe.Pointer(attr)), 0, 0, 0, 0)) } +//go:nosplit func pthread_create(thread *pthread_t, attr *pthread_attr_t, start unsafe.Pointer, arg unsafe.Pointer) int32 { return int32(call5(pthread_createABI0, uintptr(unsafe.Pointer(thread)), uintptr(unsafe.Pointer(attr)), uintptr(start), uintptr(arg), 0)) } +//go:nosplit func pthread_detach(thread pthread_t) int32 { return int32(call5(pthread_detachABI0, uintptr(thread), 0, 0, 0, 0)) } +//go:nosplit func pthread_sigmask(how sighow, ign *sigset_t, oset *sigset_t) int32 { return int32(call5(pthread_sigmaskABI0, uintptr(how), uintptr(unsafe.Pointer(ign)), uintptr(unsafe.Pointer(oset)), 0, 0)) } +//go:nosplit func pthread_self() pthread_t { return pthread_t(call5(pthread_selfABI0, 0, 0, 0, 0, 0)) } +//go:nosplit func pthread_get_stacksize_np(thread pthread_t) size_t { return size_t(call5(pthread_get_stacksize_npABI0, uintptr(thread), 0, 0, 0, 0)) } +//go:nosplit func pthread_attr_getstacksize(attr *pthread_attr_t, stacksize *size_t) int32 { return int32(call5(pthread_attr_getstacksizeABI0, uintptr(unsafe.Pointer(attr)), uintptr(unsafe.Pointer(stacksize)), 0, 0, 0)) } +//go:nosplit func pthread_attr_setstacksize(attr *pthread_attr_t, size size_t) int32 { return int32(call5(pthread_attr_setstacksizeABI0, uintptr(unsafe.Pointer(attr)), uintptr(size), 0, 0, 0)) } +//go:nosplit func pthread_attr_destroy(attr *pthread_attr_t) int32 { return int32(call5(pthread_attr_destroyABI0, uintptr(unsafe.Pointer(attr)), 0, 0, 0, 0)) } +//go:nosplit func pthread_mutex_lock(mutex *pthread_mutex_t) int32 { return int32(call5(pthread_mutex_lockABI0, uintptr(unsafe.Pointer(mutex)), 0, 0, 0, 0)) } +//go:nosplit func pthread_mutex_unlock(mutex *pthread_mutex_t) int32 { return int32(call5(pthread_mutex_unlockABI0, uintptr(unsafe.Pointer(mutex)), 0, 0, 0, 0)) } +//go:nosplit func pthread_cond_broadcast(cond *pthread_cond_t) int32 { return int32(call5(pthread_cond_broadcastABI0, uintptr(unsafe.Pointer(cond)), 0, 0, 0, 0)) } +//go:nosplit func pthread_setspecific(key pthread_key_t, value unsafe.Pointer) int32 { return int32(call5(pthread_setspecificABI0, uintptr(key), uintptr(value), 0, 0, 0)) } diff --git a/vendor/github.com/thanos-io/objstore/inmem.go b/vendor/github.com/thanos-io/objstore/inmem.go index 6a34406661fc9..50e0441ca5bf9 100644 --- a/vendor/github.com/thanos-io/objstore/inmem.go +++ b/vendor/github.com/thanos-io/objstore/inmem.go @@ -193,6 +193,32 @@ func (b *InMemBucket) GetRange(_ context.Context, name string, off, length int64 }, nil } +func (b *InMemBucket) GetAndReplace(ctx context.Context, name string, f func(io.Reader) (io.Reader, error)) error { + reader, err := b.Get(ctx, name) + if err != nil && !errors.Is(err, errNotFound) { + return err + } + b.mtx.Lock() + defer b.mtx.Unlock() + + if reader == nil { + reader = io.NopCloser(bytes.NewReader(nil)) + } + + new, err := f(reader) + if err != nil { + return err + } + + newObj, err := io.ReadAll(new) + if err != nil { + return err + } + + b.objects[name] = newObj + return nil +} + // Exists checks if the given directory exists in memory. func (b *InMemBucket) Exists(_ context.Context, name string) (bool, error) { b.mtx.RLock() diff --git a/vendor/github.com/thanos-io/objstore/objstore.go b/vendor/github.com/thanos-io/objstore/objstore.go index 86ecfa268148b..77540d2817b91 100644 --- a/vendor/github.com/thanos-io/objstore/objstore.go +++ b/vendor/github.com/thanos-io/objstore/objstore.go @@ -64,6 +64,10 @@ type Bucket interface { // Upload should be idempotent. Upload(ctx context.Context, name string, r io.Reader) error + // GetAndReplace an existing object with a new object + // If the previous object is created or updated before the new object is uploaded, then the call will fail with an error. + GetAndReplace(ctx context.Context, name string, f func(existing io.Reader) (io.Reader, error)) error + // Delete removes the object with the given name. // If object does not exist in the moment of deletion, Delete should throw error. Delete(ctx context.Context, name string) error @@ -731,6 +735,10 @@ func (b *metricBucket) GetRange(ctx context.Context, name string, off, length in ), nil } +func (b *metricBucket) GetAndReplace(ctx context.Context, name string, f func(io.Reader) (io.Reader, error)) error { + return b.bkt.GetAndReplace(ctx, name, f) +} + func (b *metricBucket) Exists(ctx context.Context, name string) (bool, error) { const op = OpExists b.metrics.ops.WithLabelValues(op).Inc() diff --git a/vendor/github.com/thanos-io/objstore/prefixed_bucket.go b/vendor/github.com/thanos-io/objstore/prefixed_bucket.go index a37450ca8707a..34830bf376f3b 100644 --- a/vendor/github.com/thanos-io/objstore/prefixed_bucket.go +++ b/vendor/github.com/thanos-io/objstore/prefixed_bucket.go @@ -79,6 +79,10 @@ func (p *PrefixedBucket) GetRange(ctx context.Context, name string, off int64, l return p.bkt.GetRange(ctx, conditionalPrefix(p.prefix, name), off, length) } +func (b *PrefixedBucket) GetAndReplace(ctx context.Context, name string, f func(io.Reader) (io.Reader, error)) error { + return b.bkt.GetAndReplace(ctx, conditionalPrefix(b.prefix, name), f) +} + // Exists checks if the given object exists in the bucket. func (p *PrefixedBucket) Exists(ctx context.Context, name string) (bool, error) { return p.bkt.Exists(ctx, conditionalPrefix(p.prefix, name)) diff --git a/vendor/github.com/thanos-io/objstore/providers/azure/azure.go b/vendor/github.com/thanos-io/objstore/providers/azure/azure.go index 05fbdb55c0cfa..45537290927cc 100644 --- a/vendor/github.com/thanos-io/objstore/providers/azure/azure.go +++ b/vendor/github.com/thanos-io/objstore/providers/azure/azure.go @@ -429,3 +429,7 @@ func NewTestBucket(t testing.TB, component string) (objstore.Bucket, func(), err func (b *Bucket) Close() error { return nil } + +func (b *Bucket) GetAndReplace(ctx context.Context, name string, f func(io.Reader) (io.Reader, error)) error { + panic("unimplemented: Azure.GetAndReplace") +} diff --git a/vendor/github.com/thanos-io/objstore/providers/bos/bos.go b/vendor/github.com/thanos-io/objstore/providers/bos/bos.go index 0cc4352cccd2d..16b01ee10d033 100644 --- a/vendor/github.com/thanos-io/objstore/providers/bos/bos.go +++ b/vendor/github.com/thanos-io/objstore/providers/bos/bos.go @@ -440,3 +440,7 @@ func validateForTest(conf Config) error { } return nil } + +func (b *Bucket) GetAndReplace(ctx context.Context, name string, f func(io.Reader) (io.Reader, error)) error { + panic("unimplemented: BOS.GetAndReplace") +} diff --git a/vendor/github.com/thanos-io/objstore/providers/filesystem/filesystem.go b/vendor/github.com/thanos-io/objstore/providers/filesystem/filesystem.go index df602877be815..920f3ace74715 100644 --- a/vendor/github.com/thanos-io/objstore/providers/filesystem/filesystem.go +++ b/vendor/github.com/thanos-io/objstore/providers/filesystem/filesystem.go @@ -11,6 +11,7 @@ import ( "path/filepath" "github.com/efficientgo/core/errcapture" + "github.com/gofrs/flock" "github.com/pkg/errors" "gopkg.in/yaml.v2" @@ -269,6 +270,41 @@ func (b *Bucket) Upload(ctx context.Context, name string, r io.Reader) (err erro return nil } +func (b *Bucket) GetAndReplace(ctx context.Context, name string, f func(io.Reader) (io.Reader, error)) error { + file := filepath.Join(b.rootDir, name) + + // Acquire a file lock before modifiying as file-systems don't support conditional writes like cloud providers. + fileLock := flock.New(file + ".lock") + locked, err := fileLock.TryLock() + if err != nil { + return err + } + if !locked { + return errors.New("file is locked by another process") + } + defer fileLock.Unlock() + + var r io.ReadCloser + r, err = os.Open(file) + if err != nil && !os.IsNotExist(err) { + return err + } else if err == nil { + defer r.Close() + } + + newContent, err := f(r) + if err != nil { + return err + } + + content, err := io.ReadAll(newContent) + if err != nil { + return err + } + + return os.WriteFile(file, content, 0600) +} + func isDirEmpty(name string) (ok bool, err error) { f, err := os.Open(filepath.Clean(name)) if os.IsNotExist(err) { diff --git a/vendor/github.com/thanos-io/objstore/providers/gcs/gcs.go b/vendor/github.com/thanos-io/objstore/providers/gcs/gcs.go index b89f8735bc51e..cd9105ca31c1b 100644 --- a/vendor/github.com/thanos-io/objstore/providers/gcs/gcs.go +++ b/vendor/github.com/thanos-io/objstore/providers/gcs/gcs.go @@ -5,6 +5,7 @@ package gcs import ( + "bytes" "context" "fmt" "io" @@ -37,6 +38,8 @@ var DefaultConfig = Config{ HTTPConfig: exthttp.DefaultHTTPConfig, } +var _ objstore.Bucket = &Bucket{} + // Config stores the configuration for gcs bucket. type Config struct { Bucket string `yaml:"bucket"` @@ -273,7 +276,7 @@ func (b *Bucket) Iter(ctx context.Context, dir string, f func(string) error, opt // Get returns a reader for the given object name. func (b *Bucket) Get(ctx context.Context, name string) (io.ReadCloser, error) { - r, err := b.bkt.Object(name).NewReader(ctx) + r, err := b.get(ctx, name) if err != nil { return r, err } @@ -286,6 +289,10 @@ func (b *Bucket) Get(ctx context.Context, name string) (io.ReadCloser, error) { }, nil } +func (b *Bucket) get(ctx context.Context, name string) (*storage.Reader, error) { + return b.bkt.Object(name).NewReader(ctx) +} + // GetRange returns a new range reader for the given object name and range. func (b *Bucket) GetRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) { r, err := b.bkt.Object(name).NewRangeReader(ctx, off, length) @@ -333,7 +340,21 @@ func (b *Bucket) Exists(ctx context.Context, name string) (bool, error) { // Upload writes the file specified in src to remote GCS location specified as target. func (b *Bucket) Upload(ctx context.Context, name string, r io.Reader) error { - w := b.bkt.Object(name).NewWriter(ctx) + return b.upload(ctx, name, r, 0, false) +} + +// Upload writes the file specified in src to remote GCS location specified as target. +func (b *Bucket) upload(ctx context.Context, name string, r io.Reader, generation int64, requireNewObject bool) error { + o := b.bkt.Object(name) + + var w *storage.Writer + if generation != 0 { + o = o.If(storage.Conditions{GenerationMatch: generation}) + } + if requireNewObject { + o = o.If(storage.Conditions{DoesNotExist: true}) + } + w = o.NewWriter(ctx) // if `chunkSize` is 0, we don't set any custom value for writer's ChunkSize. // It uses whatever the default value https://pkg.go.dev/google.golang.org/cloud/storage#Writer @@ -347,6 +368,41 @@ func (b *Bucket) Upload(ctx context.Context, name string, r io.Reader) error { return w.Close() } +func (b *Bucket) GetAndReplace(ctx context.Context, name string, f func(io.Reader) (io.Reader, error)) error { + var mustNotExist bool + var generation int64 + + // Get the current object + storageReader, err := b.get(ctx, name) + if err != nil && !errors.Is(err, storage.ErrObjectNotExist) { + return err + } else if errors.Is(err, storage.ErrObjectNotExist) { + mustNotExist = true + } + + // If object exists, ensure we close the reader when done + if storageReader != nil { + generation = storageReader.Attrs.Generation + defer storageReader.Close() + } + + newContent, err := f(wrapReader(storageReader)) + if err != nil { + return err + } + + // Upload with the previous generation, or mustNotExist for new objects + return b.upload(ctx, name, newContent, generation, mustNotExist) +} + +func wrapReader(r *storage.Reader) io.ReadCloser { + if r == nil { + return io.NopCloser(bytes.NewReader(nil)) + } + + return r +} + // Delete removes the object with the given name. func (b *Bucket) Delete(ctx context.Context, name string) error { return b.bkt.Object(name).Delete(ctx) diff --git a/vendor/github.com/thanos-io/objstore/providers/oss/oss.go b/vendor/github.com/thanos-io/objstore/providers/oss/oss.go index 2a6cb219ad409..761ed174db609 100644 --- a/vendor/github.com/thanos-io/objstore/providers/oss/oss.go +++ b/vendor/github.com/thanos-io/objstore/providers/oss/oss.go @@ -426,3 +426,7 @@ func (b *Bucket) IsAccessDeniedErr(err error) bool { } return false } + +func (b *Bucket) GetAndReplace(ctx context.Context, name string, f func(io.Reader) (io.Reader, error)) error { + panic("unimplemented: OSS.GetAndReplace") +} diff --git a/vendor/github.com/thanos-io/objstore/providers/s3/s3.go b/vendor/github.com/thanos-io/objstore/providers/s3/s3.go index cda78838ec491..5fbed6464c5c3 100644 --- a/vendor/github.com/thanos-io/objstore/providers/s3/s3.go +++ b/vendor/github.com/thanos-io/objstore/providers/s3/s3.go @@ -458,7 +458,7 @@ func (b *Bucket) Iter(ctx context.Context, dir string, f func(string) error, opt }, filteredOpts...) } -func (b *Bucket) getRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) { +func (b *Bucket) getRange(ctx context.Context, name string, off, length int64) (*minio.Object, error) { sse, err := b.getServerSideEncryption(ctx) if err != nil { return nil, err @@ -488,6 +488,16 @@ func (b *Bucket) getRange(ctx context.Context, name string, off, length int64) ( return nil, err } + return r, nil +} + +// Get returns a reader for the given object name. +func (b *Bucket) Get(ctx context.Context, name string) (io.ReadCloser, error) { + r, err := b.getRange(ctx, name, 0, -1) + if err != nil { + return r, err + } + return objstore.ObjectSizerReadCloser{ ReadCloser: r, Size: func() (int64, error) { @@ -501,14 +511,24 @@ func (b *Bucket) getRange(ctx context.Context, name string, off, length int64) ( }, nil } -// Get returns a reader for the given object name. -func (b *Bucket) Get(ctx context.Context, name string) (io.ReadCloser, error) { - return b.getRange(ctx, name, 0, -1) -} - // GetRange returns a new range reader for the given object name and range. func (b *Bucket) GetRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) { - return b.getRange(ctx, name, off, length) + r, err := b.getRange(ctx, name, off, length) + if err != nil { + return r, err + } + + return objstore.ObjectSizerReadCloser{ + ReadCloser: r, + Size: func() (int64, error) { + stat, err := r.Stat() + if err != nil { + return 0, err + } + + return stat.Size, nil + }, + }, nil } // Exists checks if the given object exists. @@ -526,6 +546,10 @@ func (b *Bucket) Exists(ctx context.Context, name string) (bool, error) { // Upload the contents of the reader as an object into the bucket. func (b *Bucket) Upload(ctx context.Context, name string, r io.Reader) error { + return b.upload(ctx, name, r, "", false) +} + +func (b *Bucket) upload(ctx context.Context, name string, r io.Reader, etag string, requireNewObject bool) error { sse, err := b.getServerSideEncryption(ctx) if err != nil { return err @@ -549,24 +573,33 @@ func (b *Bucket) Upload(ctx context.Context, name string, r io.Reader) error { userMetadata[k] = v } + putOpts := minio.PutObjectOptions{ + DisableMultipart: b.disableMultipart, + PartSize: partSize, + ServerSideEncryption: sse, + UserMetadata: userMetadata, + StorageClass: b.storageClass, + SendContentMd5: b.sendContentMd5, + // 4 is what minio-go have as the default. To be certain we do micro benchmark before any changes we + // ensure we pin this number to four. + // TODO(bwplotka): Consider adjusting this number to GOMAXPROCS or to expose this in config if it becomes bottleneck. + NumThreads: 4, + } + if etag != "" { + if requireNewObject { + putOpts.SetMatchETagExcept(etag) + } else { + putOpts.SetMatchETag(etag) + } + } + if _, err := b.client.PutObject( ctx, b.name, name, r, size, - minio.PutObjectOptions{ - DisableMultipart: b.disableMultipart, - PartSize: partSize, - ServerSideEncryption: sse, - UserMetadata: userMetadata, - StorageClass: b.storageClass, - SendContentMd5: b.sendContentMd5, - // 4 is what minio-go have as the default. To be certain we do micro benchmark before any changes we - // ensure we pin this number to four. - // TODO(bwplotka): Consider adjusting this number to GOMAXPROCS or to expose this in config if it becomes bottleneck. - NumThreads: 4, - }, + putOpts, ); err != nil { return errors.Wrap(err, "upload s3 object") } @@ -574,6 +607,30 @@ func (b *Bucket) Upload(ctx context.Context, name string, r io.Reader) error { return nil } +// Upload the contents of the reader as an object into the bucket. +func (b *Bucket) GetAndReplace(ctx context.Context, name string, f func(io.Reader) (io.Reader, error)) error { + var requireNewObject bool + originalContent, err := b.getRange(ctx, name, 0, -1) + if err != nil && !b.IsObjNotFoundErr(err) { + return err + } else if b.IsObjNotFoundErr(err) { + requireNewObject = true + } + + // Call work function to get a new version of the file + newContent, err := f(originalContent) + if err != nil { + return err + } + + stats, err := originalContent.Stat() + if err != nil { + return err + } + + return b.upload(ctx, name, newContent, stats.ETag, requireNewObject) +} + // Attributes returns information about the specified object. func (b *Bucket) Attributes(ctx context.Context, name string) (objstore.ObjectAttributes, error) { objInfo, err := b.client.StatObject(ctx, b.name, name, minio.StatObjectOptions{}) diff --git a/vendor/github.com/thanos-io/objstore/providers/swift/swift.go b/vendor/github.com/thanos-io/objstore/providers/swift/swift.go index 19eb0d454556b..aedb7b792369c 100644 --- a/vendor/github.com/thanos-io/objstore/providers/swift/swift.go +++ b/vendor/github.com/thanos-io/objstore/providers/swift/swift.go @@ -375,6 +375,10 @@ func (c *Container) Upload(_ context.Context, name string, r io.Reader) (err err return nil } +func (b *Container) GetAndReplace(ctx context.Context, name string, f func(io.Reader) (io.Reader, error)) error { + panic("unimplemented: Swift.GetAndReplace") +} + // Delete removes the object with the given name. func (c *Container) Delete(_ context.Context, name string) error { return errors.Wrap(c.connection.LargeObjectDelete(c.name, name), "delete object") diff --git a/vendor/github.com/thanos-io/objstore/testing.go b/vendor/github.com/thanos-io/objstore/testing.go index 80f1e198e0cb8..6f049c7f5433e 100644 --- a/vendor/github.com/thanos-io/objstore/testing.go +++ b/vendor/github.com/thanos-io/objstore/testing.go @@ -287,6 +287,10 @@ func (d *delayingBucket) Get(ctx context.Context, name string) (io.ReadCloser, e return d.bkt.Get(ctx, name) } +func (b *delayingBucket) GetAndReplace(ctx context.Context, name string, f func(io.Reader) (io.Reader, error)) error { + panic("unimplemented: delayingBucket.GetAndReplace") +} + func (d *delayingBucket) Attributes(ctx context.Context, name string) (ObjectAttributes, error) { time.Sleep(d.delay) return d.bkt.Attributes(ctx, name) diff --git a/vendor/github.com/thanos-io/objstore/tracing/opentracing/opentracing.go b/vendor/github.com/thanos-io/objstore/tracing/opentracing/opentracing.go index 58bdea0776c59..ca3d5a7a55b9b 100644 --- a/vendor/github.com/thanos-io/objstore/tracing/opentracing/opentracing.go +++ b/vendor/github.com/thanos-io/objstore/tracing/opentracing/opentracing.go @@ -118,6 +118,14 @@ func (t TracingBucket) Upload(ctx context.Context, name string, r io.Reader) (er return } +func (t TracingBucket) GetAndReplace(ctx context.Context, name string, f func(io.Reader) (io.Reader, error)) (err error) { + doWithSpan(ctx, "bucket_get_and_replace", func(spanCtx context.Context, span opentracing.Span) { + span.LogKV("name", name) + err = t.bkt.GetAndReplace(spanCtx, name, f) + }) + return +} + func (t TracingBucket) Delete(ctx context.Context, name string) (err error) { doWithSpan(ctx, "bucket_delete", func(spanCtx context.Context, span opentracing.Span) { span.LogKV("name", name) diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/timestamp.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/timestamp.go index 666f86f43f649..037213a0cafb9 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/timestamp.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/timestamp.go @@ -13,13 +13,13 @@ type Timestamp uint64 // NewTimestampFromTime constructs a new Timestamp from the provided time.Time. func NewTimestampFromTime(t time.Time) Timestamp { - // nolint:gosec + //nolint:gosec return Timestamp(uint64(t.UnixNano())) } // AsTime converts this to a time.Time. func (ts Timestamp) AsTime() time.Time { - // nolint:gosec + //nolint:gosec return time.Unix(0, int64(ts)).UTC() } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/value.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/value.go index ad2e1c7ae476a..73a95bcf2e280 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/value.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/value.go @@ -148,7 +148,7 @@ func (v Value) FromRaw(iv any) error { case int64: v.SetInt(tv) case uint: - // nolint:gosec + //nolint:gosec v.SetInt(int64(tv)) case uint8: v.SetInt(int64(tv)) @@ -157,7 +157,7 @@ func (v Value) FromRaw(iv any) error { case uint32: v.SetInt(int64(tv)) case uint64: - // nolint:gosec + //nolint:gosec v.SetInt(int64(tv)) case float32: v.SetDouble(float64(tv)) diff --git a/vendor/modules.txt b/vendor/modules.txt index ac55aeaa60afd..4f92f6e5f9ae5 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,8 +1,8 @@ # cel.dev/expr v0.19.1 ## explicit; go 1.21.1 cel.dev/expr -# cloud.google.com/go v0.118.0 -## explicit; go 1.21 +# cloud.google.com/go v0.118.1 +## explicit; go 1.22.7 cloud.google.com/go cloud.google.com/go/internal cloud.google.com/go/internal/detect @@ -55,7 +55,7 @@ cloud.google.com/go/longrunning/autogen/longrunningpb cloud.google.com/go/monitoring/apiv3/v2 cloud.google.com/go/monitoring/apiv3/v2/monitoringpb cloud.google.com/go/monitoring/internal -# cloud.google.com/go/pubsub v1.46.0 +# cloud.google.com/go/pubsub v1.47.0 ## explicit; go 1.22.7 cloud.google.com/go/pubsub cloud.google.com/go/pubsub/apiv1 @@ -667,7 +667,7 @@ github.com/eapache/go-xerial-snappy # github.com/eapache/queue v1.1.0 ## explicit github.com/eapache/queue -# github.com/ebitengine/purego v0.8.1 +# github.com/ebitengine/purego v0.8.2 ## explicit; go 1.18 github.com/ebitengine/purego github.com/ebitengine/purego/internal/cgo @@ -1584,7 +1584,7 @@ github.com/segmentio/fasthash/fnv1a # github.com/sercand/kuberesolver/v5 v5.1.1 ## explicit; go 1.18 github.com/sercand/kuberesolver/v5 -# github.com/shirou/gopsutil/v4 v4.24.12 +# github.com/shirou/gopsutil/v4 v4.25.1 ## explicit; go 1.18 github.com/shirou/gopsutil/v4/common github.com/shirou/gopsutil/v4/cpu @@ -1633,7 +1633,7 @@ github.com/stretchr/testify/assert/yaml github.com/stretchr/testify/mock github.com/stretchr/testify/require github.com/stretchr/testify/suite -# github.com/thanos-io/objstore v0.0.0-20250115091151-a54d0f04b42a +# github.com/thanos-io/objstore v0.0.0-20250115091151-a54d0f04b42a => github.com/grafana/objstore v0.0.0-20250128154815-d7e99f81f866 ## explicit; go 1.22 github.com/thanos-io/objstore github.com/thanos-io/objstore/clientutil @@ -1782,7 +1782,7 @@ go.opencensus.io/tag ## explicit; go 1.22.0 go.opentelemetry.io/auto/sdk go.opentelemetry.io/auto/sdk/internal/telemetry -# go.opentelemetry.io/collector/pdata v1.24.0 +# go.opentelemetry.io/collector/pdata v1.25.0 ## explicit; go 1.22.0 go.opentelemetry.io/collector/pdata/internal go.opentelemetry.io/collector/pdata/internal/data @@ -2562,3 +2562,4 @@ sigs.k8s.io/yaml/goyaml.v2 # github.com/grafana/regexp => github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc # github.com/grafana/loki/pkg/push => ./pkg/push # github.com/influxdata/go-syslog/v3 => github.com/leodido/go-syslog/v4 v4.2.0 +# github.com/thanos-io/objstore => github.com/grafana/objstore v0.0.0-20250128154815-d7e99f81f866