diff --git a/.github/renovate.json b/.github/renovate.json
index e0a417fddf132..d0011e7e0ab45 100644
--- a/.github/renovate.json
+++ b/.github/renovate.json
@@ -1,36 +1,53 @@
{
- "$schema": "https://docs.renovatebot.com/renovate-schema.json",
- "extends": [
- "config:base"
- ],
- "labels": ["dependencies"],
- "prHourlyLimit": 4,
- "baseBranches": ["main"],
- "packageRules": [
- {
- "matchBaseBranches": ["release-2.9.x","release-2.8.x"],
- "packagePatterns": ["*"],
- "enabled": false
- },
- {
- "matchFileNames": [ "operator/go.mod" ],
- "matchPackageNames": [
- "github.com/grafana/loki",
- "github.com/grafana/loki/operator/apis/loki"
- ],
- "enabled": false
- }
- ],
- "vulnerabilityAlerts": {
- "enabled": true,
- "addLabels": ["area/security"]
+ "$schema": "https://docs.renovatebot.com/renovate-schema.json",
+ "extends": [
+ "config:recommended"
+ ],
+ "labels": [
+ "dependencies"
+ ],
+ "prHourlyLimit": 4,
+ "baseBranches": [
+ "main"
+ ],
+ "packageRules": [
+ {
+ "matchBaseBranches": [
+ "release-2.9.x",
+ "release-2.8.x"
+ ],
+ "enabled": false,
+ "matchPackageNames": [
+ "*"
+ ]
},
-
- "osvVulnerabilityAlerts": true,
- "prConcurrentLimit": 10,
- "rebaseWhen": "conflicted",
- "branchPrefix": "deps-update/",
- "postUpdateOptions": ["gomodTidy"],
- "semanticCommitType": "fix",
- "semanticCommitScope": "deps"
- }
+ {
+ "matchFileNames": [
+ "operator/go.mod"
+ ],
+ "matchPackageNames": [
+ "github.com/grafana/loki",
+ "github.com/grafana/loki/operator/api/loki"
+ ],
+ "enabled": false
+ }
+ ],
+ "digest": {
+ "enabled": false
+ },
+ "vulnerabilityAlerts": {
+ "enabled": true,
+ "addLabels": [
+ "area/security"
+ ]
+ },
+ "osvVulnerabilityAlerts": true,
+ "prConcurrentLimit": 10,
+ "rebaseWhen": "conflicted",
+ "branchPrefix": "deps-update/",
+ "postUpdateOptions": [
+ "gomodTidy"
+ ],
+ "semanticCommitType": "fix",
+ "semanticCommitScope": "deps"
+}
diff --git a/.github/workflows/helm-release.yaml b/.github/workflows/helm-release.yaml
index b0ee40c36568d..c303939fdf39c 100644
--- a/.github/workflows/helm-release.yaml
+++ b/.github/workflows/helm-release.yaml
@@ -1,7 +1,13 @@
name: helm-release
on:
- workflow_dispatch: # must be invoked manually
+ workflow_dispatch: # for manual testing
+ push:
+ branches:
+ - main
+ - k[0-9]+
+ paths:
+ - 'production/helm/loki/Chart.yaml'
jobs:
call-update-helm-repo:
diff --git a/.github/workflows/helm-tagged-release-pr.yaml b/.github/workflows/helm-tagged-release-pr.yaml
index 1a5e6bdeccff8..fb6ed43154977 100644
--- a/.github/workflows/helm-tagged-release-pr.yaml
+++ b/.github/workflows/helm-tagged-release-pr.yaml
@@ -1,15 +1,23 @@
-name: helm-weekly-release-pr
+name: Helm tagged release PR
on:
release:
types:
- released
+ workflow_dispatch: # for manual testing
+
jobs:
weekly-release-pr:
runs-on: ubuntu-latest
+ env:
+ RELEASE_VERSION: "${{ github.event.release.tag_name || 'test' }}"
+ BUILD_IN_CONTAINER: false
steps:
- uses: actions/checkout@v4
+ - uses: gabe565/setup-helm-docs-action@v1
+ with:
+ version: v1.11.2
- id: "get_github_app_token"
name: "get github app token"
@@ -21,13 +29,14 @@ jobs:
- name: Update/regenerate files
id: update
- run: bash .github/workflows/scripts/helm-tagged-release.sh ${{ github.event.release.tag_name }}
+ run: |
+ bash .github/workflows/scripts/helm-tagged-release.sh ${RELEASE_VERSION}
- name: Create Pull Request
uses: peter-evans/create-pull-request@v5
with:
token: ${{ steps.get_github_app_token.outputs.token }}
- title: Release loki Helm chart ${{ steps.update.outputs.new_chart_version }}
+ title: "chore: release loki helm chart ${{ steps.update.outputs.new_chart_version }}"
body: Automated PR created by [helm-tagged-release-pr.yaml](https://github.com/grafana/loki/blob/main/.github/workflows/helm-tagged-release-pr.yaml)
commit-message: Update loki chart to ${{ steps.update.outputs.new_chart_version }}
branch: helm-chart-tagged-${{ steps.update.outputs.new_chart_version }}
diff --git a/.github/workflows/helm-weekly-release-pr.yaml b/.github/workflows/helm-weekly-release-pr.yaml
index 7ac88b7b95841..45f7d6e0288d5 100644
--- a/.github/workflows/helm-weekly-release-pr.yaml
+++ b/.github/workflows/helm-weekly-release-pr.yaml
@@ -1,17 +1,26 @@
-name: helm-weekly-release-pr
+name: Helm weekly release PR
on:
schedule:
- - cron: '0 10 * * 1-5' # 10 UTC on weekdays; if we miss published images one day, they should align the day after
+ - cron: '0 10 * * 2' # 10 UTC every Tuesday (since ks get cut on Monday)
workflow_dispatch: # for manual testing
+permissions:
+ contents: "read"
+ id-token: "write"
+ pull-requests: "write"
+
jobs:
weekly-release-pr:
runs-on: ubuntu-latest
+ env:
+ BUILD_IN_CONTAINER: false
steps:
- uses: actions/checkout@v4
- - uses: imjasonh/setup-crane@v0.4
+ - uses: gabe565/setup-helm-docs-action@v1
+ with:
+ version: v1.11.2
- id: "get_github_app_token"
name: "get github app token"
@@ -21,15 +30,37 @@ jobs:
owner: "${{ github.repository_owner }}"
private-key: "${{ secrets.APP_PRIVATE_KEY }}"
- - name: Update/regenerate files
+ - name: "Login to DockerHub (from vault)"
+ uses: "grafana/shared-workflows/actions/dockerhub-login@main"
+
+ - uses: imjasonh/setup-crane@v0.4
+
+ - name: Update/regenerate files for k release
+ id: update-k
+ run: |
+ bash .github/workflows/scripts/helm-weekly-release.sh -k
+
+ - name: Create Pull Request
+ uses: peter-evans/create-pull-request@v5
+ with:
+ token: ${{ steps.get_github_app_token.outputs.token }}
+ title: "chore: release loki helm chart ${{ steps.update-k.outputs.new_chart_version }}"
+ body: Automated PR created by [helm-weekly-release-pr.yaml](https://github.com/grafana/loki/blob/main/.github/workflows/helm-weekly-release-pr.yaml)
+ commit-message: Update loki chart to ${{ steps.update-k.outputs.new_chart_version }}
+ branch: helm-chart-weekly-${{ steps.update-k.outputs.new_chart_version }}
+ base: ${{ steps.update-k.outputs.weekly }}
+ labels: helm
+
+ - name: Update/regenerate files for standard release
id: update
- run: bash .github/workflows/scripts/helm-weekly-release.sh
+ run: |
+ bash .github/workflows/scripts/helm-weekly-release.sh
- name: Create Pull Request
uses: peter-evans/create-pull-request@v5
with:
token: ${{ steps.get_github_app_token.outputs.token }}
- title: Release loki Helm chart ${{ steps.update.outputs.new_chart_version }}
+ title: "chore: release loki helm chart ${{ steps.update.outputs.new_chart_version }}"
body: Automated PR created by [helm-weekly-release-pr.yaml](https://github.com/grafana/loki/blob/main/.github/workflows/helm-weekly-release-pr.yaml)
commit-message: Update loki chart to ${{ steps.update.outputs.new_chart_version }}
branch: helm-chart-weekly-${{ steps.update.outputs.new_chart_version }}
diff --git a/.github/workflows/logql-analyzer.yml b/.github/workflows/logql-analyzer.yml
new file mode 100644
index 0000000000000..d78d90fb805a1
--- /dev/null
+++ b/.github/workflows/logql-analyzer.yml
@@ -0,0 +1,114 @@
+name: LogQL Analyzer
+
+on:
+ workflow_dispatch:
+ release:
+ types:
+ - released
+
+permissions:
+ contents: read
+ id-token: write
+
+jobs:
+ analyze:
+ runs-on: ubuntu-latest
+
+ env:
+ BUILD_TIMEOUT: 60
+ IMAGE_PREFIX: "grafana"
+ RELEASE_VERSION: "${{ github.event.release.tag_name || 'test' }}"
+ steps:
+ - uses: actions/checkout@v4
+ with:
+ fetch-tags: true
+ path: loki
+
+ - name: prepare
+ id: prepare
+ env:
+ MAJOR_MINOR_VERSION_REGEXP: '([0-9]+\\.[0-9]+)'
+ RELEASE_TAG_REGEXP: '^([0-9]+\\.[0-9]+\\.[0-9]+)$'
+ working-directory: loki
+ run: |
+ echo "$(./tools/image-tag)" > .tag
+ if [[ "$RELEASE_VERSION" == "test" ]]; then
+ echo "RELEASE_VERSION is not set, using image tag"
+ RELEASE_VERSION="$(cat .tag)"
+ fi
+ echo "RELEASE_VERSION: $RELEASE_VERSION"
+
+ # if the tag matches the pattern `D.D.D` then RELEASE_NAME="D-D-x", otherwise RELEASE_NAME="next"
+ RELEASE_NAME=$([[ $RELEASE_VERSION =~ $RELEASE_TAG_REGEXP ]] && echo $RELEASE_TAG | grep -oE $MAJOR_MINOR_VERSION_REGEXP | sed "s/\\./-/g" | sed "s/$/-x/" || echo "next")
+ echo "RELEASE_NAME: $RELEASE_NAME"
+
+ echo "release_version=${RELEASE_VERSION}" >> "$GITHUB_OUTPUT"
+ echo "release_name=${RELEASE_NAME}" >> "$GITHUB_OUTPUT"
+
+ - id: "get-github-app-token"
+ name: "get github app token"
+ uses: "actions/create-github-app-token@v1"
+ with:
+ app-id: "${{ secrets.APP_ID }}"
+ owner: "${{ github.repository_owner }}"
+ private-key: "${{ secrets.APP_PRIVATE_KEY }}"
+
+ - name: "Set up QEMU"
+ uses: "docker/setup-qemu-action@v3"
+ - name: "set up docker buildx"
+ uses: "docker/setup-buildx-action@v3"
+ - name: "Login to DockerHub (from vault)"
+ uses: "grafana/shared-workflows/actions/dockerhub-login@main"
+
+ - name: "Build and push"
+ timeout-minutes: "${{ fromJSON(env.BUILD_TIMEOUT) }}"
+ uses: "docker/build-push-action@v6"
+ with:
+ build-args: "IMAGE_TAG=${{ steps.prepare.outputs.release_version }}"
+ context: loki
+ file: "loki/cmd/logql-analyzer/Dockerfile"
+ platforms: "linux/amd64"
+ push: true
+ tags: "grafana/logql-analyzer:${{ steps.prepare.outputs.release_version }}"
+
+ - name: Log in to Google Artifact Registry
+ uses: grafana/shared-workflows/actions/login-to-gar@main
+ with:
+ registry: "us-docker.pkg.dev"
+ environment: "prod"
+
+ - name: Update to latest image
+ env:
+ GITHUB_TOKEN: ${{ steps.get-github-app-token.outputs.token }}
+ RELEASE_NAME: ${{ steps.prepare.outputs.release_name }}
+ RELEASE_VERSION: ${{ steps.prepare.outputs.release_version }}
+ run: |
+ set -e -o pipefail
+
+ cat << EOF > config.json
+ {
+ "repo_name": "deployment_tools",
+ "destination_branch": "master",
+ "git_author_email": "119986603+updater-for-ci[bot]@users.noreply.github.com",
+ "git_author_name": "version_bumper[bot]",
+ "git_committer_email": "119986603+updater-for-ci[bot]@users.noreply.github.com",
+ "git_committer_name": "version_bumper[bot]",
+ "pull_request_branch_prefix": "logql-analyzer/updater",
+ "pull_request_enabled": true,
+ "pull_request_existing_strategy": "replace",
+ "pull_request_title_prefix": "[logql-analyzer updater] ",
+ "pull_request_message": "Add logql-analyzer version to ${RELEASE_VERSION} to supported versions",
+ "update_jsonnet_attribute_configs": [
+ {
+ "file_path": "ksonnet/environments/logql-analyzer/supported-versions.libsonnet",
+ "jsonnet_key": "${RELEASE_NAME}",
+ "jsonnet_value": "grafana/logql-analyzer:${RELEASE_VERSION}-amd64",
+ "upsert": true
+ }
+ ]
+ }
+ EOF
+
+ docker run --rm \
+ -e GITHUB_TOKEN="$GITHUB_TOKEN" \
+ -e CONFIG_JSON="$(cat config.json)" us-docker.pkg.dev/grafanalabs-global/docker-deployment-tools-prod/updater |& tee updater-output.log
diff --git a/.github/workflows/scripts/common.sh b/.github/workflows/scripts/common.sh
old mode 100644
new mode 100755
index b5cba118af718..e6d37b25c6173
--- a/.github/workflows/scripts/common.sh
+++ b/.github/workflows/scripts/common.sh
@@ -22,24 +22,20 @@ get_yaml_node() {
# Increments the part of the semver string
# $1: version itself
# $2: number of part: 0 – major, 1 – minor, 2 – patch
+# shellcheck disable=SC2207,SC2046,SC2248,SC2250
increment_semver() {
local delimiter=.
- local array=("$(echo "$1" | tr "${delimiter}" '\n')")
- array[$2]=$((array[$2] + 1))
- echo "$(
- local IFS=${delimiter}
- echo "${array[*]}"
- )"
+ local array=($(echo "$1" | tr $delimiter '\n'))
+ array[$2]=$((array[$2]+1))
+ echo $(local IFS=$delimiter ; echo "${array[*]}")
}
# Sets the patch segment of a semver to 0
# $1: version itself
+# shellcheck disable=SC2207,SC2046,SC2248,SC2250
set_semver_patch_to_zero() {
local delimiter=.
- local array=("$(echo "$1" | tr "${delimiter}" '\n')")
+ local array=($(echo "$1" | tr $delimiter '\n'))
array[2]="0"
- echo "$(
- local IFS=${delimiter}
- echo "${array[*]}"
- )"
+ echo $(local IFS=$delimiter ; echo "${array[*]}")
}
diff --git a/.github/workflows/scripts/helm-tagged-release.sh b/.github/workflows/scripts/helm-tagged-release.sh
index fd6c06f520d04..4a0d90eec59af 100755
--- a/.github/workflows/scripts/helm-tagged-release.sh
+++ b/.github/workflows/scripts/helm-tagged-release.sh
@@ -51,4 +51,5 @@ sed --in-place \
make TTY='' helm-docs
-echo "::set-output name=new_chart_version::${new_chart_version}"
+# shellcheck disable=SC2154,SC2250
+echo "new_chart_version=${new_chart_version}" >> "$GITHUB_OUTPUT"
diff --git a/.github/workflows/scripts/helm-weekly-release.sh b/.github/workflows/scripts/helm-weekly-release.sh
index 64d5f29f4557f..6b3d6043b604d 100755
--- a/.github/workflows/scripts/helm-weekly-release.sh
+++ b/.github/workflows/scripts/helm-weekly-release.sh
@@ -11,7 +11,11 @@ source "${script_dir}/common.sh"
find_latest_image_tag() {
local docker_hub_repo=$1
local regExp="^(k|weekly-k)\d+-[a-z0-9]+"
- crane ls "${docker_hub_repo}" | grep -P "${regExp}" | sed -E "s/([weekly-]*k[[:digit:]]*)-([^-]*).*/\1-\2/g" | uniq | sort -Vur | head -1
+ local crane_results
+ crane_results="$(crane ls "${docker_hub_repo}" | grep -P "${regExp}" | sed -E "s/([weekly-]*k[[:digit:]]*)-([^-]*).*/\1-\2/g" | sort -Vur)"
+ set +o pipefail
+ echo "${crane_results}" | head -1
+ set -o pipefail
}
# takes k197-abcdef and returns r197, k197-abcdef-arm64 and returns k197, weekly-k197-abcdef and returns k197
@@ -22,6 +26,7 @@ extract_k_version() {
calculate_next_chart_version() {
local current_chart_version=$1
local latest_image_tag=$2
+ local k_release=$3
local current_chart_semver
current_chart_semver=$(echo "${current_chart_version}" | grep -P -o '^(\d+.){2}\d+')
@@ -35,7 +40,12 @@ calculate_next_chart_version() {
# Also reset the patch release number to 0.
new_chart_semver=$(set_semver_patch_to_zero "${new_chart_semver}")
fi
- echo "${new_chart_semver}-weekly.${new_chart_weekly}"
+
+ if ${k_release}; then
+ echo "${new_chart_semver}-weekly.${new_chart_weekly}"
+ else
+ echo "${new_chart_semver}"
+ fi
}
validate_version_update() {
@@ -60,25 +70,45 @@ validate_version_update() {
fi
}
+k_release=false
+if [[ "$1" == "-k" ]]; then
+ k_release=true
+ shift
+fi
+
values_file=production/helm/loki/values.yaml
chart_file=production/helm/loki/Chart.yaml
latest_loki_tag=$(find_latest_image_tag grafana/loki)
latest_gel_tag=$(find_latest_image_tag grafana/enterprise-logs)
current_chart_version=$(get_yaml_node "${chart_file}" .version)
-new_chart_version=$(calculate_next_chart_version "${current_chart_version}" "${latest_loki_tag}")
+new_chart_version=$(calculate_next_chart_version "${current_chart_version}" "${latest_loki_tag}" "${k_release}")
validate_version_update "${new_chart_version}" "${current_chart_version}" "${latest_gel_tag}" "${latest_loki_tag}"
-update_yaml_node "${values_file}" .loki.image.tag "${latest_loki_tag}"
-update_yaml_node "${values_file}" .enterprise.image.tag "${latest_gel_tag}"
-update_yaml_node "${chart_file}" .appVersion "$(extract_k_version "${latest_loki_tag}")"
+if ${k_release}; then
+ update_yaml_node "${values_file}" .loki.image.tag "${latest_loki_tag}"
+ update_yaml_node "${values_file}" .enterprise.image.tag "${latest_gel_tag}"
+ update_yaml_node "${chart_file}" .appVersion "$(extract_k_version "${latest_loki_tag}")"
+fi
+
update_yaml_node "${chart_file}" .version "${new_chart_version}"
-sed --in-place \
- --regexp-extended \
- "s/(.*\.*)/\1\n\n## ${new_chart_version}\n\n- \[CHANGE\] Changed version of Grafana Loki to ${latest_loki_tag}\n- \[CHANGE\] Changed version of Grafana Enterprise Logs to ${latest_gel_tag}/g" production/helm/loki/CHANGELOG.md
+if ${k_release}; then
+ sed --in-place \
+ --regexp-extended \
+ "s/(.*\.*)/\1\n\n## ${new_chart_version}\n\n- \[CHANGE\] Changed version of Grafana Loki to ${latest_loki_tag}\n- \[CHANGE\] Changed version of Grafana Enterprise Logs to ${latest_gel_tag}/g" production/helm/loki/CHANGELOG.md
+else
+ sed --in-place \
+ --regexp-extended \
+ "s/(.*\.*)/\1\n\n## ${new_chart_version}/g" production/helm/loki/CHANGELOG.md
+fi
make TTY='' helm-docs
-echo "::set-output name=new_chart_version::${new_chart_version}"
+# shellcheck disable=SC2154,SC2250
+echo "new_chart_version=${new_chart_version}" >> "$GITHUB_OUTPUT"
+if ${k_release}; then
+ # shellcheck disable=SC2154,SC2250
+ echo "weekly=$(extract_k_version "${latest_loki_tag}")" >> "$GITHUB_OUTPUT"
+fi
diff --git a/CHANGELOG.md b/CHANGELOG.md
index dfc370a4ea44b..9b4503d14a7e1 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,11 @@
# Changelog
+## Unreleased
+
+### Bug Fixes
+
+* **promtail:** fix parser for azureeventhubs message without time field ([#14218](https://github.com/grafana/loki/pull/14218))
+
## [3.1.1](https://github.com/grafana/loki/compare/v3.1.0...v3.1.1) (2024-08-08)
@@ -12,7 +18,6 @@
* **deps:** bumped dependencies versions to resolve CVEs ([#13789](https://github.com/grafana/loki/issues/13789)) ([34206cd](https://github.com/grafana/loki/commit/34206cd2d6290566034710ae6c2d08af8804bc91))
-
## [3.1.0](https://github.com/grafana/loki/compare/v3.0.0...v3.1.0) (2024-07-02)
diff --git a/clients/pkg/promtail/targets/azureeventhubs/parser.go b/clients/pkg/promtail/targets/azureeventhubs/parser.go
index 0001dc525019e..659f1a2e7a643 100644
--- a/clients/pkg/promtail/targets/azureeventhubs/parser.go
+++ b/clients/pkg/promtail/targets/azureeventhubs/parser.go
@@ -13,7 +13,6 @@ import (
"github.com/prometheus/prometheus/model/relabel"
"github.com/grafana/loki/v3/clients/pkg/promtail/api"
-
"github.com/grafana/loki/v3/pkg/logproto"
)
@@ -33,7 +32,9 @@ func (l azureMonitorResourceLogs) validate() error {
// azureMonitorResourceLog used to unmarshal common schema for Azure resource logs
// https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/resource-logs-schema
type azureMonitorResourceLog struct {
- Time string `json:"time"`
+ Time string `json:"time"`
+ // Some logs have `time` field, some have `timeStamp` field : https://github.com/grafana/loki/issues/14176
+ TimeStamp string `json:"timeStamp"`
Category string `json:"category"`
ResourceID string `json:"resourceId"`
OperationName string `json:"operationName"`
@@ -41,7 +42,7 @@ type azureMonitorResourceLog struct {
// validate check if fields marked as required by schema for Azure resource log are not empty
func (l azureMonitorResourceLog) validate() error {
- valid := len(l.Time) != 0 &&
+ valid := l.isTimeOrTimeStampFieldSet() &&
len(l.Category) != 0 &&
len(l.ResourceID) != 0 &&
len(l.OperationName) != 0
@@ -53,6 +54,34 @@ func (l azureMonitorResourceLog) validate() error {
return nil
}
+func (l azureMonitorResourceLog) isTimeOrTimeStampFieldSet() bool {
+ return len(l.Time) != 0 || len(l.TimeStamp) != 0
+}
+
+// getTime returns time from `time` or `timeStamp` field. If both fields are set, `time` is used. If both fields are empty, error is returned.
+func (l azureMonitorResourceLog) getTime() (time.Time, error) {
+ if len(l.Time) == 0 && len(l.TimeStamp) == 0 {
+ var t time.Time
+ return t, errors.New("time and timeStamp fields are empty")
+ }
+
+ if len(l.Time) != 0 {
+ t, err := time.Parse(time.RFC3339, l.Time)
+ if err != nil {
+ return t, err
+ }
+
+ return t.UTC(), nil
+ }
+
+ t, err := time.Parse(time.RFC3339, l.TimeStamp)
+ if err != nil {
+ return t, err
+ }
+
+ return t.UTC(), nil
+}
+
type messageParser struct {
disallowCustomMessages bool
}
@@ -153,11 +182,11 @@ func (e *messageParser) parseRecord(record []byte, labelSet model.LabelSet, rela
}
func (e *messageParser) getTime(messageTime time.Time, useIncomingTimestamp bool, logRecord *azureMonitorResourceLog) time.Time {
- if !useIncomingTimestamp || logRecord.Time == "" {
+ if !useIncomingTimestamp || !logRecord.isTimeOrTimeStampFieldSet() {
return messageTime
}
- recordTime, err := time.Parse(time.RFC3339, logRecord.Time)
+ recordTime, err := logRecord.getTime()
if err != nil {
return messageTime
}
diff --git a/clients/pkg/promtail/targets/azureeventhubs/parser_test.go b/clients/pkg/promtail/targets/azureeventhubs/parser_test.go
index 156dc48d961c1..662dce4358790 100644
--- a/clients/pkg/promtail/targets/azureeventhubs/parser_test.go
+++ b/clients/pkg/promtail/targets/azureeventhubs/parser_test.go
@@ -253,3 +253,37 @@ func readFile(t *testing.T, filename string) []byte {
assert.NoError(t, err)
return data
}
+
+func Test_parseMessage_message_without_time_with_time_stamp(t *testing.T) {
+ messageParser := &messageParser{
+ disallowCustomMessages: true,
+ }
+
+ message := &sarama.ConsumerMessage{
+ Value: readFile(t, "testdata/message_without_time_with_time_stamp.json"),
+ Timestamp: time.Date(2023, time.March, 17, 8, 44, 02, 0, time.UTC),
+ }
+
+ entries, err := messageParser.Parse(message, nil, nil, true)
+ assert.NoError(t, err)
+ assert.Len(t, entries, 1)
+
+ expectedLine1 := "{\n \"timeStamp\": \"2024-09-18T00:45:09+00:00\",\n \"resourceId\": \"/RESOURCE_ID\",\n \"operationName\": \"ApplicationGatewayAccess\",\n \"category\": \"ApplicationGatewayAccessLog\"\n }"
+ assert.Equal(t, expectedLine1, entries[0].Line)
+
+ assert.Equal(t, time.Date(2024, time.September, 18, 00, 45, 9, 0, time.UTC), entries[0].Timestamp)
+}
+
+func Test_parseMessage_message_without_time_and_time_stamp(t *testing.T) {
+ messageParser := &messageParser{
+ disallowCustomMessages: true,
+ }
+
+ message := &sarama.ConsumerMessage{
+ Value: readFile(t, "testdata/message_without_time_and_time_stamp.json"),
+ Timestamp: time.Date(2023, time.March, 17, 8, 44, 02, 0, time.UTC),
+ }
+
+ _, err := messageParser.Parse(message, nil, nil, true)
+ assert.EqualError(t, err, "required field or fields is empty")
+}
diff --git a/clients/pkg/promtail/targets/azureeventhubs/testdata/message_without_time_and_time_stamp.json b/clients/pkg/promtail/targets/azureeventhubs/testdata/message_without_time_and_time_stamp.json
new file mode 100644
index 0000000000000..f9fc41ad02aea
--- /dev/null
+++ b/clients/pkg/promtail/targets/azureeventhubs/testdata/message_without_time_and_time_stamp.json
@@ -0,0 +1,9 @@
+{
+ "records": [
+ {
+ "resourceId": "/RESOURCE_ID",
+ "operationName": "ApplicationGatewayAccess",
+ "category": "ApplicationGatewayAccessLog"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/clients/pkg/promtail/targets/azureeventhubs/testdata/message_without_time_with_time_stamp.json b/clients/pkg/promtail/targets/azureeventhubs/testdata/message_without_time_with_time_stamp.json
new file mode 100644
index 0000000000000..8579fc489761a
--- /dev/null
+++ b/clients/pkg/promtail/targets/azureeventhubs/testdata/message_without_time_with_time_stamp.json
@@ -0,0 +1,10 @@
+{
+ "records": [
+ {
+ "timeStamp": "2024-09-18T00:45:09+00:00",
+ "resourceId": "/RESOURCE_ID",
+ "operationName": "ApplicationGatewayAccess",
+ "category": "ApplicationGatewayAccessLog"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/cmd/logcli/main.go b/cmd/logcli/main.go
index 976395cd4f420..e3e4034ce479d 100644
--- a/cmd/logcli/main.go
+++ b/cmd/logcli/main.go
@@ -483,6 +483,7 @@ func newQueryClient(app *kingpin.Application) client.Client {
app.Flag("max-backoff", "Maximum backoff time between retries. Can also be set using LOKI_CLIENT_MAX_BACKOFF env var.").Default("0").Envar("LOKI_CLIENT_MAX_BACKOFF").IntVar(&client.BackoffConfig.MaxBackoff)
app.Flag("auth-header", "The authorization header used. Can also be set using LOKI_AUTH_HEADER env var.").Default("Authorization").Envar("LOKI_AUTH_HEADER").StringVar(&client.AuthHeader)
app.Flag("proxy-url", "The http or https proxy to use when making requests. Can also be set using LOKI_HTTP_PROXY_URL env var.").Default("").Envar("LOKI_HTTP_PROXY_URL").StringVar(&client.ProxyURL)
+ app.Flag("compress", "Request that Loki compress returned data in transit. Can also be set using LOKI_HTTP_COMPRESSION env var.").Default("false").Envar("LOKI_HTTP_COMPRESSION").BoolVar(&client.Compression)
return client
}
diff --git a/docs/Makefile b/docs/Makefile
index 4bed302d71794..ec038bd7a056e 100644
--- a/docs/Makefile
+++ b/docs/Makefile
@@ -17,7 +17,12 @@ ifeq ($(BUILD_IN_CONTAINER),true)
-c /helm-docs/production/helm/ \
-t reference.md.gotmpl \
-o reference.md
+ $(PODMAN) run --rm --volume "$(realpath ..):/helm-docs" -u "$$(id -u)" "docker.io/jnorwood/helm-docs:v1.11.0" \
+ -c /helm-docs/production/helm/ \
+ -t README.md.gotmpl \
+ -o README.md
else
helm-docs -c ../production/helm/ -t reference.md.gotmpl -o reference.md
+ helm-docs -c ../production/helm/ -t README.md.gotmpl -o README.md
endif
mv "$(basename $<)" "$@"
diff --git a/docs/sources/community/maintaining/release/backport-commits.md b/docs/sources/community/maintaining/release/backport-commits.md
index accf357f60a44..18171f23cc045 100644
--- a/docs/sources/community/maintaining/release/backport-commits.md
+++ b/docs/sources/community/maintaining/release/backport-commits.md
@@ -2,6 +2,7 @@
title: Backport commits
description: Backport commits
---
+
# Backport commits
Any PRs or commits not on the release branch that you want to include in the release must be backported to the release branch.
@@ -19,17 +20,16 @@ Any PRs or commits not on the release branch that you want to include in the rel
1. Add two labels to the PR. First, one of the `product-approved`, `type/doc` or `type/bug` appropriately. This is to make sure the PRs that are backported are done with right intention. Second `backport release-VERSION_PREFIX` label.
Now CI should automatically create backport PR to the correct release branch. Example [PR](https://github.com/grafana/loki/pull/10333)
- {{% admonition type="note" %}}
- CI automation can fail sometimes if there are some merge conflicts in cherry picking the commits. In those cases, the original PR where you added the label should have additional comment explaining how to backport it manually.
- {{% /admonition %}}
+ {{< admonition type="note" >}}
+ CI automation can fail sometimes if there are some merge conflicts in cherry picking the commits. In those cases, the original PR where you added the label should have additional comment explaining how to backport it manually.
+ {{< /admonition >}}
- {{% admonition type="note" %}}
- The CI job that helps with backporting PR is `.github/workflows/backport.yml`. Useful for debugging purposes.
- {{% /admonition %}}
+ {{< admonition type="note" >}}
+ The CI job that helps with backporting PR is `.github/workflows/backport.yml`. Useful for debugging purposes.
+ {{< /admonition >}}
1. Repeat the above steps for any PRs that need to be backported.
-
## Backporting Release PRs
If backporting a release PR, make sure you remove any `autorelease: pending` or `autorelease: tagged` labels before merging the backport PR. By default our backport action brings over all labels, but these labels are reserved for the release workflow and will cause future pipelines to fail if left of backport PRs.
diff --git a/docs/sources/get-started/_index.md b/docs/sources/get-started/_index.md
index 76ac072a3a96b..98cfe1be7505d 100644
--- a/docs/sources/get-started/_index.md
+++ b/docs/sources/get-started/_index.md
@@ -9,7 +9,7 @@ description: Provides an overview of the steps for implementing Grafana Loki to
{{< youtube id="1uk8LtQqsZQ" >}}
-Loki is a horizontally scalable, highly available, multi-tenant log aggregation system inspired by Prometheus. It is designed to be very cost effective and easy to operate. It does not index the contents of the logs, but rather a set of labels for each log stream.
+Loki is a horizontally scalable, highly available, multi-tenant log aggregation system inspired by Prometheus. It is designed to be very cost-effective and easy to operate. It does not index the contents of the logs, but rather a set of labels for each log stream.
Because all Loki implementations are unique, the installation process is
different for every customer. But there are some steps in the process that
@@ -25,7 +25,7 @@ To collect logs and view your log data generally involves the following steps:
- There are [examples](https://grafana.com/docs/loki//configure/examples/) for specific Object Storage providers that you can modify.
1. Deploy [Grafana Alloy](https://grafana.com/docs/alloy/latest/) to collect logs from your applications.
1. On Kubernetes, deploy Grafana Alloy using the Helm chart. Configure Grafana Alloy to scrape logs from your Kubernetes cluster, and add your Loki endpoint details. See the following section for an example Grafana Alloy configuration file.
- 1. Add [labels](https://grafana.com/docs/loki//get-started/labels/) to your logs following our [best practices](https://grafana.com/docs/loki//get-started/labels/bp-labels/). Most Loki users start by adding labels which describe where the logs are coming from (region, cluster, environment, etc.).
+ 1. Add [labels](https://grafana.com/docs/loki//get-started/labels/) to your logs following our [best practices](https://grafana.com/docs/loki//get-started/labels/bp-labels/). Most Loki users start by adding labels that describe where the logs are coming from (region, cluster, environment, etc.).
1. Deploy [Grafana](https://grafana.com/docs/grafana/latest/setup-grafana/) or [Grafana Cloud](https://grafana.com/docs/grafana-cloud/quickstart/) and configure a [Loki data source](https://grafana.com/docs/grafana/latest/datasources/loki/configure-loki-data-source/).
1. Select the [Explore feature](https://grafana.com/docs/grafana/latest/explore/) in the Grafana main menu. To [view logs in Explore](https://grafana.com/docs/grafana/latest/explore/logs-integration/):
1. Pick a time range.
@@ -36,7 +36,7 @@ To collect logs and view your log data generally involves the following steps:
## Example Grafana Alloy and Agent configuration files to ship Kubernetes Pod logs to Loki
-To deploy Grafana Alloy or Agent to collect Pod logs from your Kubernetes cluster and ship them to Loki, you an use a Helm chart, and a `values.yaml` file.
+To deploy Grafana Alloy or Agent to collect Pod logs from your Kubernetes cluster and ship them to Loki, you can use a Helm chart, and a `values.yaml` file.
This sample `values.yaml` file is configured to:
diff --git a/docs/sources/get-started/hash-rings.md b/docs/sources/get-started/hash-rings.md
index 8bb024f4085fb..a4a242015ee26 100644
--- a/docs/sources/get-started/hash-rings.md
+++ b/docs/sources/get-started/hash-rings.md
@@ -31,7 +31,6 @@ These components need to be connected into a hash ring:
- query schedulers
- compactors
- rulers
-- bloom compactors (Experimental)
These components can optionally be connected into a hash ring:
- index gateway
@@ -104,13 +103,3 @@ The ruler ring is used to determine which rulers evaluate which rule groups.
## About the index gateway ring
The index gateway ring is used to determine which gateway is responsible for which tenant's indexes when queried by rulers or queriers.
-
-## About the Bloom Compactor ring
-{{% admonition type="warning" %}}
-This feature is an [experimental feature](/docs/release-life-cycle/). Engineering and on-call support is not available. No SLA is provided.
-{{% /admonition %}}
-
-The Bloom Compactor ring is used to determine which subset of compactors own a given tenant,
-and which series fingerprint ranges each compactor owns.
-The ring is also used to determine which compactor owns retention.
-Retention will be applied by the compactor owning the smallest token in the ring.
diff --git a/docs/sources/get-started/labels/structured-metadata.md b/docs/sources/get-started/labels/structured-metadata.md
index 91fe5d80ab676..ac91f58e0c87b 100644
--- a/docs/sources/get-started/labels/structured-metadata.md
+++ b/docs/sources/get-started/labels/structured-metadata.md
@@ -37,7 +37,8 @@ See the [Promtail: Structured metadata stage](https://grafana.com/docs/loki//send-data/logstash/).
{{% admonition type="warning" %}}
-There are defaults for how much structured metadata can be attached per log line.
+Structured metadata size is taken into account while asserting ingestion rate limiting.
+Along with that, there are separate limits on how much structured metadata can be attached per log line.
```
# Maximum size accepted for structured metadata per log line.
# CLI flag: -limits.max-structured-metadata-size
diff --git a/docs/sources/get-started/quick-start.md b/docs/sources/get-started/quick-start.md
index cfe0c5e6ed874..69910fa0c3154 100644
--- a/docs/sources/get-started/quick-start.md
+++ b/docs/sources/get-started/quick-start.md
@@ -37,7 +37,7 @@ The Docker Compose configuration runs the following components, each in its own
- **Gateway** (nginx) which receives requests and redirects them to the appropriate container based on the request's URL.
- **Loki read component**: which runs a Query Frontend and a Querier.
- **Loki write component**: which runs a Distributor and an Ingester.
-- **Loki backend component**: which runs an Index Gateway, Compactor, Ruler, Bloom Compactor (experimental), and Bloom Gateway (experimental).
+- **Loki backend component**: which runs an Index Gateway, Compactor, Ruler, Bloom Planner (experimental), Bloom Builder (experimental), and Bloom Gateway (experimental).
- **Minio**: which Loki uses to store its index and chunks.
- **Grafana**: which provides visualization of the log lines captured within Loki.
@@ -141,9 +141,9 @@ This quickstart assumes you are running Linux.
- You can access the Grafana Alloy UI at [http://localhost:12345](http://localhost:12345).
6. (Optional) You can check all the containers are running by running the following command:
-
+
```bash
- docker ps -a
+ docker ps -a
```
@@ -321,7 +321,7 @@ Within the entrypoint section, the Loki data source is configured with the follo
- `URL: http://gateway:3100` (URL of the Loki data source. Loki uses an nginx gateway to direct traffic to the appropriate component)
- `jsonData.httpHeaderName1: "X-Scope-OrgID"` (header name for the organization ID)
- `secureJsonData.httpHeaderValue1: "tenant1"` (header value for the organization ID)
-
+
It is important to note when Loki is configured in any other mode other than monolithic deployment, you are required to pass a tenant ID in the header. Without this, queries will return an authorization error.
@@ -344,4 +344,4 @@ It's a self-contained environment for learning about Mimir, Loki, Tempo, and Gra
The project includes detailed explanations of each component and annotated configurations for a single-instance deployment.
You can also push the data from the environment to [Grafana Cloud](https://grafana.com/cloud/).
-
\ No newline at end of file
+
diff --git a/docs/sources/operations/bloom-filters.md b/docs/sources/operations/bloom-filters.md
new file mode 100644
index 0000000000000..63b0c4ecfaa6e
--- /dev/null
+++ b/docs/sources/operations/bloom-filters.md
@@ -0,0 +1,199 @@
+---
+title: Bloom filters (Experimental)
+menuTitle: Bloom filters
+description: Describes how to enable and configure query acceleration with bloom filters.
+weight:
+keywords:
+ - blooms
+ - query acceleration
+aliases:
+ - ./query-acceleration-blooms
+---
+
+# Bloom filters (Experimental)
+
+{{% admonition type="warning" %}}
+This feature is an [experimental feature](/docs/release-life-cycle/). Engineering and on-call support is not available. No SLA is provided.
+{{% /admonition %}}
+
+Loki leverages [bloom filters](https://en.wikipedia.org/wiki/Bloom_filter) to speed up queries by reducing the amount of data Loki needs to load from the store and iterate through.
+Loki is often used to run "needle in a haystack" queries; these are queries where a large number of log lines are searched, but only a few log lines match the query.
+Some common use cases are searching all logs tied to a specific trace ID or customer ID.
+
+An example of such queries would be looking for a trace ID on a whole cluster for the past 24 hours:
+
+```logql
+{cluster="prod"} | traceID="3c0e3dcd33e7"
+```
+
+Without accelerated filtering, Loki downloads all the chunks for all the streams matching `{cluster="prod"}` for the last 24 hours and iterates through each log line in the chunks, checking if the [structured metadata][] key `traceID` with value `3c0e3dcd33e7` is present.
+
+With accelerated filtering, Loki is able to skip most of the chunks and only process the ones where we have a statistical confidence that the structured metadata pair might be present.
+
+To learn how to write queries to use bloom filters, refer to [Query acceleration][].
+
+## Enable bloom filters
+
+{{< admonition type="warning" >}}
+Building and querying bloom filters are by design not supported in single binary deployment.
+It can be used with Single Scalable deployment (SSD), but it is recommended to run bloom components only in fully distributed microservice mode.
+The reason is that bloom filters also come with a relatively high cost for both building and querying the bloom filters that only pays off at large scale deployments.
+{{< /admonition >}}
+
+To start building and using blooms you need to:
+
+- Deploy the [Bloom Planner and Builder](#bloom-planner-and-builder) components (as [microservices][microservices] or via the [SSD][ssd] `backend` target) and enable the components in the [Bloom Build config][bloom-build-cfg].
+- Deploy the [Bloom Gateway](#bloom-gateway) component (as a [microservice][microservices] or via the [SSD][ssd] `backend` target) and enable the component in the [Bloom Gateway config][bloom-gateway-cfg].
+- Enable blooms building and filtering for each tenant individually, or for all of them by default.
+
+```yaml
+# Configuration block for the bloom creation.
+bloom_build:
+ enabled: true
+ planner:
+ planning_interval: 6h
+ builder:
+ planner_address: bloom-planner..svc.cluster.local.:9095
+
+# Configuration block for bloom filtering.
+bloom_gateway:
+ enabled: true
+ client:
+ addresses: dnssrvnoa+_bloom-gateway-grpc._tcp.bloom-gateway-headless..svc.cluster.local
+
+# Enable blooms creation and filtering for all tenants by default
+# or do it on a per-tenant basis.
+limits_config:
+ bloom_creation_enabled: true
+ bloom_split_series_keyspace_by: 1024
+ bloom_gateway_enable_filtering: true
+```
+
+For more configuration options refer to the [Bloom Gateway][bloom-gateway-cfg], [Bloom Build][bloom-build-cfg] and [per tenant-limits][tenant-limits] configuration docs.
+We strongly recommend reading the whole documentation for this experimental feature before using it.
+
+## Bloom Planner and Builder
+
+Building bloom filters from the chunks in the object storage is done by two components: the Bloom Planner and the Bloom
+Builder, where the planner creates tasks for bloom building, and sends the tasks to the builders to process and upload the resulting blocks.
+Bloom filters are grouped in bloom blocks spanning multiple streams (also known as series) and chunks from a given day.
+To learn more about how blocks and metadata files are organized, refer to the [Building blooms](#building-blooms) section below.
+
+The Bloom Planner runs as a single instance and calculates the gaps in fingerprint ranges for a certain time period for a tenant for which bloom filters need to be built.
+It dispatches these tasks to the available builders. The planner also applies the [blooms retention](#retention).
+
+{{< admonition type="warning" >}}
+Do not run more than one instance of the Bloom Planner.
+{{< /admonition >}}
+
+The Bloom Builder is a stateless horizontally scalable component and can be scaled independently of the planner to fulfill the processing demand of the created tasks.
+
+You can find all the configuration options for these components in the [Configure section for the Bloom Builder][bloom-build-cfg].
+Refer to the [Enable bloom filters](#enable-bloom-filters) section above for a configuration snippet enabling this feature.
+
+### Retention
+
+The Bloom Planner applies bloom block retention on object storage. Retention is disabled by default.
+When enabled, retention is applied to all tenants. The retention for each tenant is the longest of its [configured][tenant-limits] general retention (`retention_period`) and the streams retention (`retention_stream`).
+
+For example, in the following example, tenant A has a bloom retention of 30 days, and tenant B a bloom retention of 40 days for the `{namespace="prod"}` stream.
+
+```yaml
+overrides:
+ "A":
+ retention_period: 30d
+ "B":
+ retention_period: 30d
+ retention_stream:
+ - selector: '{namespace="prod"}'
+ priority: 1
+ period: 40d
+```
+
+### Sizing and configuration
+
+The single planner instance runs the planning phase for bloom blocks for each tenant in the given interval and puts the created tasks to an internal task queue.
+Builders process tasks sequentially by pulling them from the queue. The amount of builder replicas required to complete all pending tasks before the next planning iteration depends on the value of `-bloom-build.planner.bloom_split_series_keyspace_by`, the number of tenants, and the log volume of the streams.
+
+The maximum block size is configured per tenant via `-bloom-build.max-block-size`.
+The actual block size might exceed this limit given that we append streams blooms to the block until the block is larger than the configured maximum size.
+Blocks are created in memory and as soon as they are written to the object store they are freed. Chunks and TSDB files are downloaded from the object store to the file system.
+We estimate that builders are able to process 4MB worth of data per second per core.
+
+## Bloom Gateway
+
+Bloom Gateways handle chunks filtering requests from the [index gateway](https://grafana.com/docs/loki//get-started/components/#index-gateway).
+The service takes a list of chunks and a filtering expression and matches them against the blooms, filtering out those chunks not matching the given filter expression.
+
+This component is horizontally scalable and every instance only owns a subset of the stream fingerprint range for which it performs the filtering.
+The sharding of the data is performed on the client side using DNS discovery of the server instances and the [jumphash](https://arxiv.org/abs/1406.2294) algorithm for consistent hashing and even distribution of the stream fingerprints across Bloom Gateway instances.
+
+You can find all the configuration options for this component in the Configure section for the [Bloom Gateways][bloom-gateway-cfg].
+Refer to the [Enable bloom filters](#enable-bloom-filters) section above for a configuration snippet enabling this feature.
+
+### Sizing and configuration
+
+Bloom Gateways use their local file system as a Least Recently Used (LRU) cache for blooms that are downloaded from object storage.
+The size of the blooms depend on the ingest volume and number of unique structured metadata key-value pairs, as well as on build settings of the blooms, namely false-positive-rate.
+With default settings, bloom filters make up <1% of the raw structured metadata size.
+
+Since reading blooms depends heavily on disk IOPS, Bloom Gateways should make use of multiple, locally attached SSD disks (NVMe) to increase I/O throughput.
+Multiple directories on different disk mounts can be specified using the `-bloom.shipper.working-directory` [setting][storage-config-cfg] when using a comma separated list of mount points, for example:
+
+```
+-bloom.shipper.working-directory="/mnt/data0,/mnt/data1,/mnt/data2,/mnt/data3"
+```
+
+Bloom Gateways need to deal with relatively large files: the bloom filter blocks.
+Even though the binary format of the bloom blocks allows for reading them into memory in smaller pages, the memory consumption depends on the number of pages that are concurrently loaded into memory for processing.
+The product of three settings control the maximum amount of bloom data in memory at any given time: `-bloom-gateway.worker-concurrency`, `-bloom-gateway.block-query-concurrency`, and `-bloom.max-query-page-size`.
+
+Example, assuming 4 CPU cores:
+
+```
+-bloom-gateway.worker-concurrency=4 // 1x NUM_CORES
+-bloom-gateway.block-query-concurrency=8 // 2x NUM_CORES
+-bloom.max-query-page-size=64MiB
+
+4 x 8 x 64MiB = 2048MiB
+```
+
+Here, the memory requirement for block processing is 2GiB.
+To get the minimum requirements for the Bloom Gateways, you need to double the value.
+
+## Building blooms
+
+Bloom filters are built per stream and aggregated together into block files.
+Streams are assigned to blocks by their fingerprint, following the same ordering scheme as Loki’s TSDB and sharding calculation.
+This gives a data locality benefit when querying as streams in the same shard are likely to be in the same block.
+
+In addition to blocks, builders maintain a list of metadata files containing references to bloom blocks and the
+TSDB index files they were built from. Gateways and the planner use these metadata files to discover existing blocks.
+
+Every `-bloom-build.planner.interval`, the planner will load the latest TSDB files for all tenants for which bloom building is enabled, and compares the TSDB files with the latest bloom metadata files.
+If there are new TSDB files or any of them have changed, the planner will create a task for the streams and chunks referenced by the TSDB file.
+
+The builder pulls a task from the planner's queue and processes the containing streams and chunks.
+For a given stream, the builder will iterate through all the log lines inside its new chunks and build a bloom for the stream.
+In case of changes for a previously processed TSDB file, builders will try to reuse blooms from existing blocks instead of building new ones from scratch.
+The builder converts structured metadata from each log line of each chunk of a stream and appends the hash of each key, value, and key-value pair to the bloom, followed by the hashes combined with the chunk identifier.
+The first set of hashes allows gateways to skip whole streams, while the latter is for skipping individual chunks.
+
+For example, given structured metadata `foo=bar` in the chunk `c6dj8g`, we append to the stream bloom the following hashes: `hash("foo")`, `hash("bar")`, `hash("foo=bar")`, `hash("c6dj8g" + "foo")` ... `hash("c6dj8g" + "foo=bar")`.
+
+## Query sharding
+
+Query acceleration does not just happen while processing chunks, but also happens from the query planning phase where the query frontend applies [query sharding](https://lokidex.com/posts/tsdb/#sharding).
+Loki 3.0 introduces a new [per-tenant configuration][tenant-limits] flag `tsdb_sharding_strategy` which defaults to computing shards as in previous versions of Loki by using the index stats to come up with the closest power of two that would optimistically divide the data to process in shards of roughly the same size.
+Unfortunately, the amount of data each stream has is often unbalanced with the rest, therefore, some shards end up processing more data than others.
+
+Query acceleration introduces a new sharding strategy: `bounded`, which uses blooms to reduce the chunks to be processed right away during the planning phase in the query frontend, as well as evenly distributes the amount of chunks each sharded query will need to process.
+
+[Query acceleration]: https://grafana.com/docs/loki//query/query-acceleration
+[structured metadata]: https://grafana.com/docs/loki//get-started/labels/structured-metadata
+[tenant-limits]: https://grafana.com/docs/loki//configure/#limits_config
+[bloom-gateway-cfg]: https://grafana.com/docs/loki//configure/#bloom_gateway
+[bloom-build-cfg]: https://grafana.com/docs/loki//configure/#bloom_build
+[storage-config-cfg]: https://grafana.com/docs/loki//configure/#storage_config
+[microservices]: https://grafana.com/docs/loki//get-started/deployment-modes/#microservices-mode
+[ssd]: https://grafana.com/docs/loki//get-started/deployment-modes/#simple-scalable
diff --git a/docs/sources/operations/query-acceleration-blooms.md b/docs/sources/operations/query-acceleration-blooms.md
deleted file mode 100644
index 2fec5f2922705..0000000000000
--- a/docs/sources/operations/query-acceleration-blooms.md
+++ /dev/null
@@ -1,244 +0,0 @@
----
-title: Query Acceleration with Blooms (Experimental)
-menuTitle: Query Acceleration with Blooms
-description: Describes how to enable and configure query acceleration with blooms.
-weight:
-keywords:
- - blooms
- - query acceleration
----
-
-# Query Acceleration with Blooms (Experimental)
-{{% admonition type="warning" %}}
-This feature is an [experimental feature](/docs/release-life-cycle/). Engineering and on-call support is not available. No SLA is provided.
-{{% /admonition %}}
-
-Loki 3.0 leverages [bloom filters](https://en.wikipedia.org/wiki/Bloom_filter) to speed up queries by reducing the
-amount of data Loki needs to load from the store and iterate through. Loki is often used to run “needle in a haystack”
-queries; these are queries where a large number of log lines are searched, but only a few log lines match the [filtering
-expressions]({{< relref "../query/log_queries#line-filter-expression" >}}) of the query.
-Some common use cases are needing to find a specific text pattern in a message, or all logs tied to a specific customer ID.
-
-An example of such queries would be looking for a trace ID on a whole cluster for the past 24 hours:
-
-```logql
-{cluster="prod"} |= "traceID=3c0e3dcd33e7"
-```
-
-Loki would download all the chunks for all the streams matching `{cluster=”prod”}` for the last 24 hours and iterate
-through each log line in the chunks checking if the string `traceID=3c0e3dcd33e7` is present.
-
-With accelerated filtering, Loki is able to skip most of the chunks and only process the ones where we have a
-statistical confidence that the string might be present.
-The underlying blooms are built by the [Bloom Builder](#bloom-planner-and-builder) component
-and served by the new [Bloom Gateway](#bloom-gateway) component.
-
-## Enable Query Acceleration with Blooms
-{{< admonition type="warning" >}}
-Building and querying bloom filters are by design not supported in single binary deployment.
-It can be used with Single Scalable deployment (SSD), but it is recommended to
-run bloom components only in fully distributed microservice mode.
-The reason is that bloom filters also come with a relatively high cost for both building
-and querying the bloom filters that only pays off at large scale deployments.
-{{< /admonition >}}
-
-To start building and using blooms you need to:
-- Deploy the [Bloom Planner and Builder](#bloom-planner-and-builder) components (as [microservices][microservices] or via the [SSD][ssd] `backend` target) and enable the components in the [Bloom Build config][bloom-build-cfg].
-- Deploy the [Bloom Gateway](#bloom-gateway) component (as a [microservice][microservices] or via the [SSD][ssd] `backend` target) and enable the component in the [Bloom Gateway config][bloom-gateway-cfg].
-- Enable blooms building and filtering for each tenant individually, or for all of them by default.
-
-```yaml
-# Configuration block for the bloom creation.
-bloom_build:
- enabled: true
- planner:
- planning_interval: 6h
- builder:
- planner_address: bloom-planner..svc.cluster.local.:9095
-
-# Configuration block for bloom filtering.
-bloom_gateway:
- enabled: true
- client:
- addresses: dnssrvnoa+_bloom-gateway-grpc._tcp.bloom-gateway-headless..svc.cluster.local
-
-# Enable blooms creation and filtering for all tenants by default
-# or do it on a per-tenant basis.
-limits_config:
- bloom_creation_enabled: true
- bloom_split_series_keyspace_by: 1024
- bloom_gateway_enable_filtering: true
-```
-
-For more configuration options refer to the [Bloom Gateway][bloom-gateway-cfg], [Bloom Build][bloom-build-cfg] and
-[per tenant-limits][tenant-limits] configuration docs.
-We strongly recommend reading the whole documentation for this experimental feature before using it.
-
-## Bloom Planner and Builder
-Building bloom filters from the chunks in the object storage is done by two components: the Bloom Planner and the Bloom
-Builder, where the planner creates tasks for bloom building, and sends the tasks to the builders to process and
-upload the resulting blocks.
-Bloom filters are grouped in bloom blocks spanning multiple streams (also known as series) and chunks from a given day.
-To learn more about how blocks and metadata files are organized, refer to the
-[Building and querying blooms](#building-and-querying-blooms) section below.
-
-The Bloom Planner runs as a single instance and calculates the gaps in fingerprint ranges for a certain time period for
-a tenant for which bloom filters need to be built. It dispatches these tasks to the available builders.
-The planner also applies the [blooms retention](#retention).
-
-The Bloom Builder is a stateless horizontally scalable component and can be scaled independently of the planner to fulfill
-the processing demand of the created tasks.
-
-You can find all the configuration options for these components in the [Configure section for the Bloom Builder][bloom-build-cfg].
-Refer to the [Enable Query Acceleration with Blooms](#enable-query-acceleration-with-blooms) section below for
-a configuration snippet enabling this feature.
-
-### Retention
-The Bloom Planner applies bloom block retention on object storage. Retention is disabled by default.
-When enabled, retention is applied to all tenants. The retention for each tenant is the longest of its [configured][tenant-limits]
-general retention (`retention_period`) and the streams retention (`retention_stream`).
-
-For example, in the following example, tenant A has a bloom retention of 30 days, and tenant B a bloom retention of 40 days.
-
-```yaml
-overrides:
- "A":
- retention_period: 30d
- "B":
- retention_period: 30d
- retention_stream:
- - selector: '{namespace="prod"}'
- priority: 1
- period: 40d
-```
-
-### Sizing and configuration
-The single planner instance runs the planning phase for bloom blocks for each tenant in the given interval
-and puts the created tasks to an internal task queue.
-Builders process tasks sequentially by pulling them from the queue. The amount of builder replicas required to complete
-all pending tasks before the next planning iteration depends on the value of `-bloom-build.planner.bloom_split_series_keyspace_by`,
-the amount of tenants, and the log volume of the streams.
-
-The maximum block size is configured per tenant via `-bloom-build.max-block-size`.
-The actual block size might exceed this limit given that we append streams blooms to the block until the
-block is larger than the configured maximum size. Blocks are created in memory and as soon as they are written to the
-object store they are freed. Chunks and TSDB files are downloaded from the object store to the file system.
-We estimate that builders are able to process 4MB worth of data per second per core.
-
-## Bloom Gateway
-Bloom Gateways handle chunks filtering requests from the [index gateway](https://grafana.com/docs/loki//get-started/components/#index-gateway).
-The service takes a list of chunks and a filtering expression and matches them against the blooms,
-filtering out those chunks not matching the given filter expression.
-
-This component is horizontally scalable and every instance only owns a subset of the stream
-fingerprint range for which it performs the filtering.
-The sharding of the data is performed on the client side using DNS discovery of the server instances
-and the [jumphash](https://arxiv.org/abs/1406.2294) algorithm for consistent hashing
-and even distribution of the stream fingerprints across Bloom Gateway instances.
-
-You can find all the configuration options for this component in the Configure section for the [Bloom Gateways][bloom-gateway-cfg].
-Refer to the [Enable Query Acceleration with Blooms](#enable-query-acceleration-with-blooms) section below for a configuration snippet enabling this feature.
-
-### Sizing and configuration
-Bloom Gateways use their local file system as a Least Recently Used (LRU) cache for blooms that are
-downloaded from object storage. The size of the blooms depend on the ingest volume and the log content cardinality,
-as well as on build settings of the blooms, namely n-gram length, skip-factor, and false-positive-rate.
-With default settings, bloom filters make up roughly 3% of the chunk data.
-
-Example calculation for storage requirements of blooms for a single tenant.
-```
-100 MB/s ingest rate ~> 8.6 TB/day chunks ~> 260 GB/day blooms
-```
-
-Since reading blooms depends heavily on disk IOPS, Bloom Gateways should make use of multiple,
-locally attached SSD disks (NVMe) to increase i/o throughput.
-Multiple directories on different disk mounts can be specified using the `-bloom.shipper.working-directory` [setting][storage-config-cfg]
-when using a comma separated list of mount points, for example:
-```
--bloom.shipper.working-directory="/mnt/data0,/mnt/data1,/mnt/data2,/mnt/data3"
-```
-
-Bloom Gateways need to deal with relatively large files: the bloom filter blocks.
-Even though the binary format of the bloom blocks allows for reading them into memory in smaller pages,
-the memory consumption depends on the amount of pages that are concurrently loaded into memory for processing.
-The product of three settings control the maximum amount of bloom data in memory at any given
-time: `-bloom-gateway.worker-concurrency`, `-bloom-gateway.block-query-concurrency`, and `-bloom.max-query-page-size`.
-
-Example, assuming 4 CPU cores:
-```
--bloom-gateway.worker-concurrency=4 // 1x NUM_CORES
--bloom-gateway.block-query-concurrency=8 // 2x NUM_CORES
--bloom.max-query-page-size=64MiB
-
-4 x 8 x 64MiB = 2048MiB
-```
-
-Here, the memory requirement for block processing is 2GiB.
-To get the minimum requirements for the Bloom Gateways, you need to double the value.
-
-## Building and querying blooms
-Bloom filters are built per stream and aggregated together into block files.
-Streams are assigned to blocks by their fingerprint, following the same ordering scheme as Loki’s TSDB and sharding calculation.
-This gives a data locality benefit when querying as streams in the same shard are likely to be in the same block.
-
-In addition to blocks, builders maintain a list of metadata files containing references to bloom blocks and the
-TSDB index files they were built from. Gateways and the planner use these metadata files to discover existing blocks.
-
-Every `-bloom-build.planner.interval`, the planner will load the latest TSDB files for all tenants for
-which bloom building is enabled, and compares the TSDB files with the latest bloom metadata files.
-If there are new TSDB files or any of them have changed, the planner will create a task for the streams and chunks
-referenced by the TSDB file.
-
-The builder pulls a task from the planner's queue and processes the containing streams and chunks.
-For a given stream, the builder will iterate through all the log lines inside its new chunks and build a bloom for the
-stream. In case of changes for a previously processed TSDB file, builders will try to reuse blooms from existing blocks
-instead of building new ones from scratch.
-The builder computes [n-grams](https://en.wikipedia.org/wiki/N-gram#:~:text=An%20n%2Dgram%20is%20a,pairs%20extracted%20from%20a%20genome.)
-for each log line of each chunk of a stream and appends both the hash of each n-gram and the hash of each n-gram plus
-the chunk identifier to the bloom. The former allows gateways to skip whole streams while the latter is for skipping
-individual chunks.
-
-For example, given a log line `abcdef` in the chunk `c6dj8g`, we compute its n-grams: `abc`, `bcd`, `cde`, `def`.
-And append to the stream bloom the following hashes: `hash("abc")`, `hash("abc" + "c6dj8g")` ... `hash("def")`, `hash("def" + "c6dj8g")`.
-
-By adding n-grams to blooms instead of whole log lines, we can perform partial matches.
-For the example above, a filter expression `|= "bcd"` would match against the bloom.
-The filter `|= "bcde` would also match the bloom since we decompose the filter into n-grams:
-`bcd`, `cde` which both are present in the bloom.
-
-N-grams sizes are configurable. The longer the n-gram is, the fewer tokens we need to append to the blooms,
-but the longer filtering expressions need to be able to check them against blooms.
-For the example above, where the n-gram length is 3, we need filtering expressions that have at least 3 characters.
-
-### Queries for which blooms are used
-Loki will check blooms for any log filtering expression within a query that satisfies the following criteria:
-- The filtering expression contains at least as many characters as the n-gram length used to build the blooms.
- - For example, if the n-grams length is 5, the filter `|= "foo"` will not take advantage of blooms but `|= "foobar"` would.
-- If the filter is a regex, we use blooms only if we can simplify the regex to a set of simple matchers.
- - For example, `|~ "(error|warn)"` would be simplified into `|= "error" or "warn"` thus would make use of blooms,
- whereas `|~ "f.*oo"` would not be simplifiable.
-- The filtering expression is a match (`|=`) or regex match (`|~`) filter. We don’t use blooms for not equal (`!=`) or not regex (`!~`) expressions.
- - For example, `|= "level=error"` would use blooms but `!= "level=error"` would not.
-- The filtering expression is placed before a [line format expression](https://grafana.com/docs/loki//query/log_queries/#line-format-expression).
- - For example, with `|= "level=error" | logfmt | line_format "ERROR {{.err}}" |= "traceID=3ksn8d4jj3"`,
- the first filter (`|= "level=error"`) will benefit from blooms but the second one (`|= "traceID=3ksn8d4jj3"`) will not.
-
-## Query sharding
-Query acceleration does not just happen while processing chunks, but also happens from the query planning phase where
-the query frontend applies [query sharding](https://lokidex.com/posts/tsdb/#sharding).
-Loki 3.0 introduces a new [per-tenant configuration][tenant-limits] flag `tsdb_sharding_strategy` which defaults to computing
-shards as in previous versions of Loki by using the index stats to come up with the closest power of two that would
-optimistically divide the data to process in shards of roughly the same size. Unfortunately,
-the amount of data each stream has is often unbalanced with the rest,
-therefore, some shards end up processing more data than others.
-
-Query acceleration introduces a new sharding strategy: `bounded`, which uses blooms to reduce the chunks to be
-processed right away during the planning phase in the query frontend,
-as well as evenly distributes the amount of chunks each sharded query will need to process.
-
-[tenant-limits]: https://grafana.com/docs/loki//configure/#limits_config
-[bloom-gateway-cfg]: https://grafana.com/docs/loki//configure/#bloom_gateway
-[bloom-build-cfg]: https://grafana.com/docs/loki//configure/#bloom_build
-[storage-config-cfg]: https://grafana.com/docs/loki//configure/#storage_config
-[microservices]: https://grafana.com/docs/loki//get-started/deployment-modes/#microservices-mode
-[ssd]: https://grafana.com/docs/loki//get-started/deployment-modes/#simple-scalable
diff --git a/docs/sources/operations/request-validation-rate-limits.md b/docs/sources/operations/request-validation-rate-limits.md
index 6d67b3d26d2c0..cb602c17c2292 100644
--- a/docs/sources/operations/request-validation-rate-limits.md
+++ b/docs/sources/operations/request-validation-rate-limits.md
@@ -16,7 +16,7 @@ It is recommended that Loki operators set up alerts or dashboards with these met
### Terminology
-- **sample**: a log line
+- **sample**: a log line with [structured metadata]({{< relref "../get-started/labels/structured-metadata" >}})
- **stream**: samples with a unique combination of labels
- **active stream**: streams that are present in the ingesters - these have recently received log lines within the `chunk_idle_period` period (default: 30m)
diff --git a/docs/sources/operations/storage/filesystem.md b/docs/sources/operations/storage/filesystem.md
index d97a80e7823d6..68ca759cafbc9 100644
--- a/docs/sources/operations/storage/filesystem.md
+++ b/docs/sources/operations/storage/filesystem.md
@@ -1,8 +1,8 @@
---
title: Filesystem object store
-menuTitle: Filesystem object store
+menuTitle: Filesystem object store
description: Describes the features and limitations of using a filesystem object store with Loki.
-weight: 300
+weight: 300
---
# Filesystem object store
@@ -20,7 +20,7 @@ A folder is created for every tenant all the chunks for one tenant are stored in
If Loki is run in single-tenant mode, all the chunks are put in a folder named `fake` which is the synthesized tenant name used for single tenant mode.
-See [multi-tenancy]({{< relref "../multi-tenancy" >}}) for more information.
+See [multi-tenancy](https://grafana.com/docs/loki//operations/multi-tenancy/) for more information.
## Pros
@@ -30,6 +30,8 @@ Great for low volume applications, proof of concepts, and just playing around wi
## Cons
+The filesystem is not supported by Grafana Labs for production environments (for those customers who have purchased a support contract).
+
### Scaling
At some point there is a limit to how many chunks can be stored in a single directory, for example see [issue #1502](https://github.com/grafana/loki/issues/1502) which explains how a Loki user ran into a strange error with about **5.5 million chunk files** in their file store (and also a workaround for the problem).
@@ -44,4 +46,4 @@ The durability of the objects is at the mercy of the filesystem itself where oth
### High Availability
-Running Loki clustered is not possible with the filesystem store unless the filesystem is shared in some fashion (NFS for example). However using shared filesystems is likely going to be a bad experience with Loki just as it is for almost every other application.
+Running Loki clustered is not possible with the filesystem store unless the filesystem is shared in some fashion (NFS for example). However using shared filesystems is likely going to be a bad experience with Loki just as it is for almost every other application.
diff --git a/docs/sources/query/logcli.md b/docs/sources/query/logcli.md
index 9a7d5b18a6d09..ee282ac73a6de 100644
--- a/docs/sources/query/logcli.md
+++ b/docs/sources/query/logcli.md
@@ -371,6 +371,7 @@ Flags:
--auth-header="Authorization"
The authorization header used. Can also be set using LOKI_AUTH_HEADER env var.
--proxy-url="" The http or https proxy to use when making requests. Can also be set using LOKI_HTTP_PROXY_URL env var.
+ --compress Request that Loki compress returned data in transit. Can also be set using LOKI_HTTP_COMPRESSION env var.
--limit=30 Limit on number of entries to print. Setting it to 0 will fetch all entries.
--since=1h Lookback window.
--from=FROM Start looking for logs at this absolute time (inclusive)
@@ -465,6 +466,7 @@ Flags:
--auth-header="Authorization"
The authorization header used. Can also be set using LOKI_AUTH_HEADER env var.
--proxy-url="" The http or https proxy to use when making requests. Can also be set using LOKI_HTTP_PROXY_URL env var.
+ --compress Request that Loki compress returned data in transit. Can also be set using LOKI_HTTP_COMPRESSION env var.
--limit=30 Limit on number of entries to print. Setting it to 0 will fetch all entries.
--now=NOW Time at which to execute the instant query.
--forward Scan forwards through logs.
@@ -525,6 +527,7 @@ Flags:
--auth-header="Authorization"
The authorization header used. Can also be set using LOKI_AUTH_HEADER env var.
--proxy-url="" The http or https proxy to use when making requests. Can also be set using LOKI_HTTP_PROXY_URL env var.
+ --compress Request that Loki compress returned data in transit. Can also be set using LOKI_HTTP_COMPRESSION env var.
--since=1h Lookback window.
--from=FROM Start looking for labels at this absolute time (inclusive)
--to=TO Stop looking for labels at this absolute time (exclusive)
@@ -581,6 +584,7 @@ Flags:
--auth-header="Authorization"
The authorization header used. Can also be set using LOKI_AUTH_HEADER env var.
--proxy-url="" The http or https proxy to use when making requests. Can also be set using LOKI_HTTP_PROXY_URL env var.
+ --compress Request that Loki compress returned data in transit. Can also be set using LOKI_HTTP_COMPRESSION env var.
--since=1h Lookback window.
--from=FROM Start looking for logs at this absolute time (inclusive)
--to=TO Stop looking for logs at this absolute time (exclusive)
@@ -633,6 +637,7 @@ Flags:
--auth-header="Authorization"
The authorization header used. Can also be set using LOKI_AUTH_HEADER env var.
--proxy-url="" The http or https proxy to use when making requests. Can also be set using LOKI_HTTP_PROXY_URL env var.
+ --compress Request that Loki compress returned data in transit. Can also be set using LOKI_HTTP_COMPRESSION env var.
```
### `stats` command reference
@@ -694,6 +699,7 @@ Flags:
--auth-header="Authorization"
The authorization header used. Can also be set using LOKI_AUTH_HEADER env var.
--proxy-url="" The http or https proxy to use when making requests. Can also be set using LOKI_HTTP_PROXY_URL env var.
+ --compress Request that Loki compress returned data in transit. Can also be set using LOKI_HTTP_COMPRESSION env var.
--since=1h Lookback window.
--from=FROM Start looking for logs at this absolute time (inclusive)
--to=TO Stop looking for logs at this absolute time (exclusive)
@@ -761,6 +767,7 @@ Flags:
--auth-header="Authorization"
The authorization header used. Can also be set using LOKI_AUTH_HEADER env var.
--proxy-url="" The http or https proxy to use when making requests. Can also be set using LOKI_HTTP_PROXY_URL env var.
+ --compress Request that Loki compress returned data in transit. Can also be set using LOKI_HTTP_COMPRESSION env var.
--since=1h Lookback window.
--from=FROM Start looking for logs at this absolute time (inclusive)
--to=TO Stop looking for logs at this absolute time (exclusive)
@@ -833,6 +840,7 @@ Flags:
--auth-header="Authorization"
The authorization header used. Can also be set using LOKI_AUTH_HEADER env var.
--proxy-url="" The http or https proxy to use when making requests. Can also be set using LOKI_HTTP_PROXY_URL env var.
+ --compress Request that Loki compress returned data in transit. Can also be set using LOKI_HTTP_COMPRESSION env var.
--since=1h Lookback window.
--from=FROM Start looking for logs at this absolute time (inclusive)
--to=TO Stop looking for logs at this absolute time (exclusive)
diff --git a/docs/sources/query/query_accceleration.md b/docs/sources/query/query_accceleration.md
new file mode 100644
index 0000000000000..ab377b828243c
--- /dev/null
+++ b/docs/sources/query/query_accceleration.md
@@ -0,0 +1,45 @@
+---
+title: Query acceleration (Experimental)
+menuTitle: Query acceleration
+description: Provides instructions on how to write LogQL queries to benefit from query acceleration.
+weight: 900
+keywords:
+ - blooms
+ - query acceleration
+---
+
+# Query acceleration (Experimental)
+
+{{% admonition type="warning" %}}
+Query acceleration using blooms is an [experimental feature](/docs/release-life-cycle/). Engineering and on-call support is not available. No SLA is provided.
+{{% /admonition %}}
+
+If [bloom filters][] are enabled, you can write LogQL queries using [structured metadata][] to benefit from query acceleration.
+
+## Prerequisites
+
+* [Bloom filters][bloom filters] must be enabled.
+* Logs must be sending [structured metadata][].
+
+### Query blooms
+
+Queries will be accelerated for any [label filter expression][] that satisfies _all_ of the following criteria:
+
+* The label filter expression using **string equality**, such as `| key="value"`.
+* The label filter expression is querying for structured metadata and not a stream label.
+* The label filter expression is placed before any [parser expression][], [labels format expression][], [drop labels expression][], or [keep labels expression][].
+
+To take full advantage of query acceleration with blooms, ensure that filtering structured metadata is done before any parse expression:
+
+```logql
+{cluster="prod"} | logfmt | json | detected_level="error" # NOT ACCELERATED: structured metadata filter is after a parse stage
+{cluster="prod"} | detected_level="error" | logfmt | json # ACCELERATED: structured metadata filter is before any parse stage
+```
+
+[bloom filters]: https://grafana.com/docs/loki//operations/bloom-filters/
+[structured metadata]: https://grafana.com/docs/loki//get-started/labels/structured-metadata
+[label filter expression]: https://grafana.com/docs/loki//query/log_queries/#label-filter-expression
+[parser expression]: https://grafana.com/docs/loki//query/log_queries/#parser-expression
+[labels format expression]: https://grafana.com/docs/loki//query/log_queries/#labels-format-expression
+[drop labels expression]: https://grafana.com/docs/loki//query/log_queries/#drop-labels-expression
+[keep labels expression]: https://grafana.com/docs/loki//query/log_queries/#keep-labels-expression
diff --git a/docs/sources/release-notes/v3-1.md b/docs/sources/release-notes/v3-1.md
index ec63aa2636d51..f623caad2b46c 100644
--- a/docs/sources/release-notes/v3-1.md
+++ b/docs/sources/release-notes/v3-1.md
@@ -79,10 +79,17 @@ Out of an abundance of caution, we advise that users with Loki or Grafana Enterp
## Bug fixes
-## 3.1.1 (2024-08-08)
+### 3.1.2 (2024-10-17)
-- **deps:** Bumped dependencies versions to resolve CVEs ([#13789](https://github.com/grafana/loki/issues/13789)) ([34206cd](https://github.com/grafana/loki/commit/34206cd2d6290566034710ae6c2d08af8804bc91)).
+- **config:** Copy Alibaba and IBM object storage configuration from common ([#14316](https://github.com/grafana/loki/issues/14316)) ([7184d45](https://github.com/grafana/loki/commit/7184d45d8e080874feea8bfd223dedf5f20d3836)).
+- **docker-compose:** upgrade loki and grafana production image tags to 3.1.1 ([#14025](https://github.com/grafana/loki/issues/14025)) ([36fe29e](https://github.com/grafana/loki/commit/36fe29eb334d8300265ca437c0acb423a01c5041)).
+- **logql:** updated JSONExpressionParser not to unescape extracted values if it is JSON object. (backport release-3.1.x) ([#14503](https://github.com/grafana/loki/issues/14503)) ([759f9c8](https://github.com/grafana/loki/commit/759f9c8525227bb1272771a40429d12e015874d9)).
+- **promtail:** Revert build image to Debian Bullseye to fix libc version issue in Promtail ([#14387](https://github.com/grafana/loki/issues/14387)) ([05b6a65](https://github.com/grafana/loki/commit/05b6a65f8bf00b880f17465553b1adaf0cf56d60)).
+- **storage:** have GetObject check for canceled context (backport release-3.1.x) ([#14421](https://github.com/grafana/loki/issues/14421)) ([f3d69ff](https://github.com/grafana/loki/commit/f3d69ffa960c91c0239436a32bb0aa578c0f022a)).
+
+### 3.1.1 (2024-08-08)
+- **deps:** Bumped dependencies versions to resolve CVEs ([#13789](https://github.com/grafana/loki/issues/13789)) ([34206cd](https://github.com/grafana/loki/commit/34206cd2d6290566034710ae6c2d08af8804bc91)).
### 3.1.0 (2024-07-02)
@@ -185,4 +192,3 @@ With this fix, if the `max_line_size` config in Promtail has a value, the docker
- Use to the proper config names in warning messages ([#12114](https://github.com/grafana/loki/issues/12114)) ([4a05964](https://github.com/grafana/loki/commit/4a05964d5520d46d149f2a4e4709eee36c7fb418)).
- **workflows:** Use an intermediate env variable in GitHub workflow ([#12905](https://github.com/grafana/loki/issues/12905)) ([772616c](https://github.com/grafana/loki/commit/772616cd8f5cbac70374dd4a53f1714fb49a7a3b)).
- **workflows:** Don't run metric collector on forks ([#12687](https://github.com/grafana/loki/issues/12687)) ([7253444](https://github.com/grafana/loki/commit/72534449a07cd9f410973f2d01772024e8e4b7ba)).
-
diff --git a/docs/sources/release-notes/v3-2.md b/docs/sources/release-notes/v3-2.md
index 94655ecbc690b..b18b38ab7bb85 100644
--- a/docs/sources/release-notes/v3-2.md
+++ b/docs/sources/release-notes/v3-2.md
@@ -73,6 +73,13 @@ Out of an abundance of caution, we advise that users with Loki or Grafana Enterp
## Bug fixes
+### 3.2.1 (2024-10-17)
+
+- **config:** Copy Alibaba and IBM object storage configuration from common ([#14315](https://github.com/grafana/loki/issues/14315)) ([32a9bc0](https://github.com/grafana/loki/commit/32a9bc0ca852bdc692c2ccebbae448856e191953)).
+- **logql:** updated JSONExpressionParser not to unescape extracted values if it is JSON object. (backport release-3.2.x) ([#14502](https://github.com/grafana/loki/issues/14502)) ([e9bbaf3](https://github.com/grafana/loki/commit/e9bbaf3f20ec8f303e977b8e6752152b11cd75b9)).
+- **promtail:** Revert build image to Debian Bullseye to fix libc version issue in Promtail ([#14386](https://github.com/grafana/loki/issues/14386)) ([1e913df](https://github.com/grafana/loki/commit/1e913dfc36e0c0aba726c850fd2af975b2a2bbdc)).
+- **storage:** have GetObject check for canceled context (backport release-3.2.x) ([#14422](https://github.com/grafana/loki/issues/14422)) ([1b7dd95](https://github.com/grafana/loki/commit/1b7dd95a6c041577c9710c53ae7ad3c93bad771e)).
+
### 3.2.0 (2024-09-19)
- **blooms:** Cleanup temp blockdir in bloom compactor ([#13622](https://github.com/grafana/loki/issues/13622)) ([64215e1](https://github.com/grafana/loki/commit/64215e18495b12e6d5565eba6fe54bc381ac7189)).
diff --git a/docs/sources/send-data/fluentbit/_index.md b/docs/sources/send-data/fluentbit/_index.md
index ea2af6a4ac4b7..5a6884efd5dbc 100644
--- a/docs/sources/send-data/fluentbit/_index.md
+++ b/docs/sources/send-data/fluentbit/_index.md
@@ -1,282 +1,24 @@
---
-title: Fluent Bit client
+title: Fluent Bit
menuTitle: Fluent Bit
description: Provides instructions for how to install, configure, and use the Fluent Bit client to send logs to Loki.
aliases:
- ../clients/fluentbit/
weight: 500
---
-# Fluent Bit client
+# Fluent Bit
-[Fluent Bit](https://fluentbit.io/) is a fast and lightweight logs and metrics processor and forwarder that can be configured with the Grafana Fluent Bit Plugin described here or with the [Fluent-bit Loki output plugin](https://docs.fluentbit.io/manual/pipeline/outputs/loki) to ship logs to Loki.
-This plugin has more configuration options compared to the built-in Fluent Bit Loki plugin.
-You can define which log files you want to collect using the [`Tail`](https://docs.fluentbit.io/manual/pipeline/inputs/tail) or [`Stdin`](https://docs.fluentbit.io/manual/pipeline/inputs/standard-input) data pipeline input. Additionally, Fluent Bit supports multiple `Filter` and `Parser` plugins (`Kubernetes`, `JSON`, etc.) to structure and alter log lines.
+[Fluent Bit](https://fluentbit.io/) is a fast, lightweight logs and metrics agent. It is a CNCF graduated sub-project under the umbrella of Fluentd. Fluent Bit is licensed under the terms of the Apache License v2.0.
-{{< youtube id="s43IBSVyTpQ" >}}
+When using Fluent Bit to ship logs to Loki, you can define which log files you want to collect using the [`Tail`](https://docs.fluentbit.io/manual/pipeline/inputs/tail) or [`Stdin`](https://docs.fluentbit.io/manual/pipeline/inputs/standard-input) data pipeline input. Additionally, Fluent Bit supports multiple `Filter` and `Parser` plugins (`Kubernetes`, `JSON`, etc.) to structure and alter log lines.
-## Usage
+There are two Fluent Bit plugins for Loki:
-### Docker
+1. The integrated `loki` [plugin](https://grafana.com/docs/loki//send-data/fluentbit/fluent-bit-plugin/), which is officially maintained by the Fluent Bit project.
+2. The `grafana-loki` [plugin](https://grafana.com/docs/loki//send-data/fluentbit/community-plugin/), an alternative community plugin by Grafana Labs.
-You can run a Fluent Bit container with Loki output plugin pre-installed using our [Docker Hub](https://hub.docker.com/r/grafana/fluent-bit-plugin-loki) image:
+We recommend using the `loki` plugin as this provides the most complete feature set and is actively maintained by the Fluent Bit project.
-```bash
-docker run -v /var/log:/var/log \
- -e LOG_PATH="/var/log/*.log" -e LOKI_URL="http://localhost:3100/loki/api/v1/push" \
- grafana/fluent-bit-plugin-loki:latest
-```
+## Tutorial
-Or, an alternative is to run the fluent-bit container using [Docker Hub](https://hub.docker.com/r/fluent/fluent-bit) image:
-
-### Docker Container Logs
-
-To ship logs from Docker containers to Grafana Cloud using Fluent Bit, you can use the Fluent Bit Docker image and configure it to forward logs directly to Grafana Cloud's Loki. Below is a step-by-step guide on setting up Fluent Bit for this purpose.
-
-#### Prerequisites
-
-- Docker is installed on your machine.
-- You have a Grafana Cloud account with access to Loki.
-
-#### Configuration
-
-1. Create a Fluent Bit configuration file named `fluent-bit.conf` with the following content, which defines the input from Docker container logs and sets up the output to send logs to your Grafana Cloud Loki instance:
-
- ```ini
- [SERVICE]
- Flush 1
- Log_Level info
-
- [INPUT]
- Name tail
- Path /var/lib/docker/containers/*/*.log
- Parser docker
- Tag docker.*
-
- [OUTPUT]
- Name loki
- Match *
- Host logs-prod-006.grafana.net
- Port 443
- TLS On
- TLS.Verify On
- HTTP_User 478625
- HTTP_Passwd YOUR_GRAFANA_CLOUD_API_KEY
- Labels job=fluentbit
-
-### Kubernetes
-
-You can run Fluent Bit as a [Daemonset](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) to collect all your Kubernetes workload logs.
-
-To do so you can use the [Fluent Bit helm chart](https://github.com/fluent/helm-charts) with the following `values.yaml` changing the value of `FLUENT_LOKI_URL`:
-
-```yaml
-image:
- # Here we use the Docker image which has the plugin installed
- repository: grafana/fluent-bit-plugin-loki
- tag: main-e2ed1c0
-
-args:
- - "-e"
- - "/fluent-bit/bin/out_grafana_loki.so"
- - --workdir=/fluent-bit/etc
- - --config=/fluent-bit/etc/conf/fluent-bit.conf
-
-env:
- # Note that for security reasons you should fetch the credentials through a Kubernetes Secret https://kubernetes.io/docs/concepts/configuration/secret/ . You may use the envFrom for this.
- - name: FLUENT_LOKI_URL
- value: https://user:pass@your-loki.endpoint/loki/api/v1/push
-
-config:
- inputs: |
- [INPUT]
- Name tail
- Tag kube.*
- Path /var/log/containers/*.log
- # Be aware that local clusters like docker-desktop or kind use the docker log format and not the cri (https://docs.fluentbit.io/manual/installation/kubernetes#container-runtime-interface-cri-parser)
- multiline.parser docker, cri
- Mem_Buf_Limit 5MB
- Skip_Long_Lines On
-
- outputs: |
- [Output]
- Name grafana-loki
- Match kube.*
- Url ${FLUENT_LOKI_URL}
- Labels {job="fluent-bit"}
- LabelKeys level,app # this sets the values for actual Loki streams and the other labels are converted to structured_metadata https://grafana.com/docs/loki//get-started/labels/structured-metadata/
- BatchWait 1
- BatchSize 1001024
- LineFormat json
- LogLevel info
- AutoKubernetesLabels true
-```
-
-```bash
-helm repo add fluent https://fluent.github.io/helm-charts
-helm repo update
-helm install fluent-bit fluent/fluent-bit -f values.yaml
-```
-
-By default it will collect all containers logs and extract labels from Kubernetes API (`container_name`, `namespace`, etc..).
-
-If you also want to host your Loki instance inside the cluster install the [official Loki helm chart](https://grafana.com/docs/loki//setup/install/helm/).
-
-### AWS Elastic Container Service (ECS)
-
-You can use fluent-bit Loki Docker image as a Firelens log router in AWS ECS.
-For more information about this see our [AWS documentation]({{< relref "../promtail/cloud/ecs" >}})
-
-### Local
-
-First, you need to follow the [instructions](https://github.com/grafana/loki/blob/main/clients/cmd/fluent-bit/README.md) in order to build the plugin dynamic library.
-
-Assuming you have Fluent Bit installed in your `$PATH`, you can run the plugin using:
-
-```bash
-fluent-bit -e /path/to/built/out_grafana_loki.so -c fluent-bit.conf
-```
-
-You can also adapt your plugins.conf, removing the need to change the command line options:
-
-```conf
-[PLUGINS]
- Path /path/to/built/out_grafana_loki.so
-```
-
-## Configuration Options
-
-| Key | Description | Default |
-|----------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------|
-| Url | Url of Loki server API endpoint. | http://localhost:3100/loki/api/v1/push |
-| TenantID | The tenant ID used by default to push logs to Loki. If omitted or empty it assumes Loki is running in single-tenant mode and no `X-Scope-OrgID` header is sent. | "" |
-| BatchWait | Time to wait before send a log batch to Loki, full or not. | 1s |
-| BatchSize | Log batch size to send a log batch to Loki (unit: Bytes). | 10 KiB (10 * 1024 Bytes) |
-| Timeout | Maximum time to wait for Loki server to respond to a request. | 10s |
-| MinBackoff | Initial backoff time between retries. | 500ms |
-| MaxBackoff | Maximum backoff time between retries. | 5m |
-| MaxRetries | Maximum number of retries when sending batches. Setting it to `0` will retry indefinitely. | 10 |
-| Labels | labels for API requests. | {job="fluent-bit"} |
-| LogLevel | LogLevel for plugin logger. | "info" |
-| RemoveKeys | Specify removing keys. | none |
-| AutoKubernetesLabels | If set to true, it will add all Kubernetes labels to Loki labels | false |
-| LabelKeys | Comma separated list of keys to use as stream labels. All other keys will be placed into the log line. LabelKeys is deactivated when using `LabelMapPath` label mapping configuration. | none |
-| LineFormat | Format to use when flattening the record to a log line. Valid values are "json" or "key_value". If set to "json" the log line sent to Loki will be the fluentd record (excluding any keys extracted out as labels) dumped as json. If set to "key_value", the log line will be each item in the record concatenated together (separated by a single space) in the format =. | json |
-| DropSingleKey | If set to true and after extracting label_keys a record only has a single key remaining, the log line sent to Loki will just be the value of the record key. | true |
-| LabelMapPath | Path to a json file defining how to transform nested records. | none |
-| Buffer | Enable buffering mechanism | false |
-| BufferType | Specify the buffering mechanism to use (currently only dque is implemented). | dque |
-| DqueDir | Path to the directory for queued logs | /tmp/flb-storage/loki |
-| DqueSegmentSize | Segment size in terms of number of records per segment | 500 |
-| DqueSync | Whether to fsync each queue change. Specify no fsync with "normal", and fsync with "full". | "normal" |
-| DqueName | Queue name, must be uniq per output | dque |
-
-### Labels
-
-Labels are used to [query logs]({{< relref "../../query" >}}) `{container_name="nginx", cluster="us-west1"}`, they are usually metadata about the workload producing the log stream (`instance`, `container_name`, `region`, `cluster`, `level`). In Loki labels are indexed consequently you should be cautious when choosing them (high cardinality label values can have performance drastic impact).
-
-You can use `Labels`, `RemoveKeys` , `LabelKeys` and `LabelMapPath` to how the output plugin will perform labels extraction.
-
-### AutoKubernetesLabels
-
-If set to true, it will add all Kubernetes labels to Loki labels automatically and ignore parameters `LabelKeys`, LabelMapPath.
-
-### LabelMapPath
-
-When using the `Parser` and `Filter` plugins Fluent Bit can extract and add data to the current record/log data. While Loki labels are key value pair, record data can be nested structures.
-You can pass a JSON file that defines how to extract labels from each record. Each json key from the file will be matched with the log record to find label values. Values from the configuration are used as label names.
-
-Considering the record below :
-
-```json
-{
- "kubernetes": {
- "container_name": "promtail",
- "pod_name": "promtail-xxx",
- "namespace_name": "prod",
- "labels" : {
- "team": "x-men"
- }
- },
- "HOSTNAME": "docker-desktop",
- "log" : "a log line",
- "time": "20190926T152206Z"
-}
-```
-
-and a LabelMap file as follow :
-
-```json
-{
- "kubernetes": {
- "container_name": "container",
- "pod_name": "pod",
- "namespace_name": "namespace",
- "labels" : {
- "team": "team"
- }
- }
-}
-```
-
-The labels extracted will be `{team="x-men", container="promtail", pod="promtail-xxx", namespace="prod"}`.
-
-If you don't want the `kubernetes` and `HOSTNAME` fields to appear in the log line you can use the `RemoveKeys` configuration field. (e.g. `RemoveKeys kubernetes,HOSTNAME`).
-
-### Buffering
-
-Buffering refers to the ability to store the records somewhere, and while they are processed and delivered, still be able to store more. The Loki output plugin can be blocked by the Loki client because of its design:
-
-- If the BatchSize is over the limit, the output plugin pauses receiving new records until the pending batch is successfully sent to the server
-- If the Loki server is unreachable (retry 429s, 500s and connection-level errors), the output plugin blocks new records until the Loki server is available again, and the pending batch is successfully sent to the server or as long as the maximum number of attempts has been reached within configured back-off mechanism
-
-The blocking state with some of the input plugins is not acceptable, because it can have an undesirable side effect on the part that generates the logs. Fluent Bit implements a buffering mechanism that is based on parallel processing. Therefore, it cannot send logs in order. There are two ways of handling the out-of-order logs:
-
-- Configure Loki to [accept out-of-order writes](https://grafana.com/docs/loki//configure/#accept-out-of-order-writes).
-
-- Configure the Loki output plugin to use the buffering mechanism based on [`dque`](https://github.com/joncrlsn/dque), which is compatible with the Loki server strict time ordering:
-
- ```properties
- [Output]
- Name grafana-loki
- Match *
- Url http://localhost:3100/loki/api/v1/push
- Buffer true
- DqueSegmentSize 8096
- DqueDir /tmp/flb-storage/buffer
- DqueName loki.0
- ```
-
-### Configuration examples
-
-To configure the Loki output plugin add this section to fluent-bit.conf
-
-```properties
-[Output]
- Name grafana-loki
- Match *
- Url http://localhost:3100/loki/api/v1/push
- BatchWait 1s
- BatchSize 30720
- # (30KiB)
- Labels {test="fluent-bit-go", lang="Golang"}
- RemoveKeys key1,key2
- LabelKeys key3,key4
- LineFormat key_value
-```
-
-```properties
-[Output]
- Name grafana-loki
- Match *
- Url http://localhost:3100/loki/api/v1/push
- BatchWait 1s
- BatchSize 30720 # (30KiB)
- AutoKubernetesLabels true
- RemoveKeys key1,key2
-```
-
-A full [example configuration file](https://github.com/grafana/loki/blob/main/clients/cmd/fluent-bit/fluent-bit.conf) is also available in the Loki repository.
-
-### Running multiple plugin instances
-
-You can run multiple plugin instances in the same fluent-bit process, for example if you want to push to different Loki servers or route logs into different Loki tenant IDs. To do so, add additional `[Output]` sections.
+To get started with the `loki` plugin, follow the [Sending logs to Loki using Fluent Bit tutorial](https://grafana.com/docs/loki//send-data/fluentbit/fluent-bit-loki-tutorial/).
diff --git a/docs/sources/send-data/fluentbit/community-plugin.md b/docs/sources/send-data/fluentbit/community-plugin.md
new file mode 100644
index 0000000000000..60dd5fef74a3f
--- /dev/null
+++ b/docs/sources/send-data/fluentbit/community-plugin.md
@@ -0,0 +1,281 @@
+---
+title: Fluent Bit community plugin
+menuTitle: Fluent Bit Community Plugin
+description: Provides instructions for how to install, configure, and use the Fluent Bit Community plugin to send logs to Loki.
+aliases:
+- ../clients/fluentbit/
+weight: 500
+---
+# Fluent Bit community plugin
+
+{{< admonition type="warning" >}}
+
+We recommend using the official [Fluent Bit Loki plugin](https://grafana.com/docs/loki//send-data/fluentbit/fluent-bit-plugin/). The official plugin is more feature-rich and has better support for features such as structured metadata. The community plugin is still available for use, but it's no longer actively maintained.
+
+{{< /admonition >}}
+
+The Fluent Bit community plugin by Grafana Labs (`grafana-loki`) provided an alternative way to send logs to Loki. Although very similar to the [official plugin](https://grafana.com/docs/loki//send-data/fluentbit/fluent-bit-plugin/) there are some differences in the configuration options. This page provides instructions for how to install, configure, and use the Fluent Bit community plugin to send logs to Loki. Although the plugin is no longer actively maintained, this documentation is still available for reference.
+
+{{< youtube id="s43IBSVyTpQ" >}}
+
+## Usage
+
+### Docker
+
+You can run a Fluent Bit container with Loki output plugin pre-installed using our [Docker Hub](https://hub.docker.com/r/grafana/fluent-bit-plugin-loki) image:
+
+```bash
+docker run -v /var/log:/var/log \
+ -e LOG_PATH="/var/log/*.log" -e LOKI_URL="http://localhost:3100/loki/api/v1/push" \
+ grafana/fluent-bit-plugin-loki:latest
+```
+
+Or, an alternative is to run the fluent-bit container using [Docker Hub](https://hub.docker.com/r/fluent/fluent-bit) image:
+
+### Docker container logs
+
+To ship logs from Docker containers to Grafana Cloud using Fluent Bit, you can use the Fluent Bit Docker image and configure it to forward logs directly to Grafana Loki. Below is a step-by-step guide on setting up Fluent Bit for this purpose.
+
+#### Prerequisites
+
+- Docker is installed on your machine.
+- Running instance of Loki OSS.
+
+#### Configuration
+
+1. Create a Fluent Bit configuration file named `fluent-bit.conf` with the following content, which defines the input from Docker container logs and sets up the output to send logs to your Grafana Cloud Loki instance:
+
+ ```ini
+ [SERVICE]
+ Flush 1
+ Log_Level info
+
+ [INPUT]
+ Name tail
+ Path /var/lib/docker/containers/*/*.log
+ Parser docker
+ Tag docker.*
+
+ [OUTPUT]
+ Name grafana-loki
+ Match *
+ Url http://localhost:3100/loki/api/v1/push
+ Labels {job="fluentbit"}
+
+### Kubernetes
+
+You can run Fluent Bit as a [daemonset](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) to collect all your Kubernetes workload logs.
+
+To do so you can use the [Fluent Bit Helm chart](https://github.com/fluent/helm-charts) with the following `values.yaml` changing the value of `FLUENT_LOKI_URL`:
+
+```yaml
+image:
+ # Here we use the Docker image which has the plugin installed
+ repository: grafana/fluent-bit-plugin-loki
+ tag: main-e2ed1c0
+
+args:
+ - "-e"
+ - "/fluent-bit/bin/out_grafana_loki.so"
+ - --workdir=/fluent-bit/etc
+ - --config=/fluent-bit/etc/conf/fluent-bit.conf
+
+env:
+ # Note that for security reasons you should fetch the credentials through a Kubernetes Secret https://kubernetes.io/docs/concepts/configuration/secret/ . You may use the envFrom for this.
+ - name: FLUENT_LOKI_URL
+ value: https://user:pass@your-loki.endpoint/loki/api/v1/push
+
+config:
+ inputs: |
+ [INPUT]
+ Name tail
+ Tag kube.*
+ Path /var/log/containers/*.log
+ # Be aware that local clusters like docker-desktop or kind use the docker log format and not the cri (https://docs.fluentbit.io/manual/installation/kubernetes#container-runtime-interface-cri-parser)
+ multiline.parser docker, cri
+ Mem_Buf_Limit 5MB
+ Skip_Long_Lines On
+
+ outputs: |
+ [Output]
+ Name grafana-loki
+ Match kube.*
+ Url ${FLUENT_LOKI_URL}
+ Labels {job="fluent-bit"}
+ LabelKeys level,app # this sets the values for actual Loki streams and the other labels are converted to structured_metadata https://grafana.com/docs/loki//get-started/labels/structured-metadata/
+ BatchWait 1
+ BatchSize 1001024
+ LineFormat json
+ LogLevel info
+ AutoKubernetesLabels true
+```
+
+```bash
+helm repo add fluent https://fluent.github.io/helm-charts
+helm repo update
+helm install fluent-bit fluent/fluent-bit -f values.yaml
+```
+
+By default it will collect all containers logs and extract labels from Kubernetes API (`container_name`, `namespace`, etc.).
+
+If you also want to host your Loki instance inside the cluster install the [official Loki Helm chart](https://grafana.com/docs/loki//setup/install/helm/).
+
+### AWS Elastic Container Service (ECS)
+
+You can use the fluent-bit Loki Docker image as a Firelens log router in AWS ECS.
+For more information about this see our [AWS documentation](https://grafana.com/docs/loki//send-data/promtail/cloud/ecs/).
+
+### Local
+
+First, you need to follow the [instructions](https://github.com/grafana/loki/blob/main/clients/cmd/fluent-bit/README.md) in order to build the plugin dynamic library.
+
+Assuming you have Fluent Bit installed in your `$PATH` you can run the plugin using:
+
+```bash
+fluent-bit -e /path/to/built/out_grafana_loki.so -c fluent-bit.conf
+```
+
+You can also adapt your plugins.conf, removing the need to change the command line options:
+
+```conf
+[PLUGINS]
+ Path /path/to/built/out_grafana_loki.so
+```
+
+## Configuration options
+
+| Key | Description | Default |
+|----------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------|
+| Url | Url of Loki server API endpoint. | http://localhost:3100/loki/api/v1/push |
+| TenantID | The tenant ID used by default to push logs to Loki. If omitted or empty it assumes Loki is running in single-tenant mode and no `X-Scope-OrgID` header is sent. | "" |
+| BatchWait | Time to wait before send a log batch to Loki, full or not. | 1s |
+| BatchSize | Log batch size to send a log batch to Loki (unit: Bytes). | 10 KiB (10 * 1024 Bytes) |
+| Timeout | Maximum time to wait for Loki server to respond to a request. | 10s |
+| MinBackoff | Initial backoff time between retries. | 500ms |
+| MaxBackoff | Maximum backoff time between retries. | 5m |
+| MaxRetries | Maximum number of retries when sending batches. Setting it to `0` will retry indefinitely. | 10 |
+| Labels | Labels for API requests. | {job="fluent-bit"} |
+| LogLevel | LogLevel for plugin logger. | `info` |
+| RemoveKeys | Specify removing keys. | none |
+| AutoKubernetesLabels | If set to `true`, it will add all Kubernetes labels to Loki labels. | false |
+| LabelKeys | Comma separated list of keys to use as stream labels. All other keys will be placed into the log line. LabelKeys is deactivated when using `LabelMapPath` label mapping configuration. | none |
+| LineFormat | Format to use when flattening the record to a log line. Valid values are `json` or `key_value`. If set to `json` the log line sent to Loki will be the fluentd record (excluding any keys extracted out as labels) dumped as json. If set to `key_value`, the log line will be each item in the record concatenated together (separated by a single space) in the format <key>=<value>. | json |
+| DropSingleKey | If set to true and after extracting label_keys a record only has a single key remaining, the log line sent to Loki will just be the value of the record key. | true |
+| LabelMapPath | Path to a json file defining how to transform nested records. | none |
+| Buffer | Enable buffering mechanism. | false |
+| BufferType | Specify the buffering mechanism to use (currently only `dque` is implemented). | dque |
+| DqueDir | Path to the directory for queued logs. | /tmp/flb-storage/loki |
+| DqueSegmentSize | Segment size in terms of number of records per segment. | 500 |
+| DqueSync | Whether to fsync each queue change. Specify no fsync with `normal`, and fsync with `full`. | `normal` |
+| DqueName | Queue name, must be unique per output. | dque |
+
+### Labels
+
+Labels, for example `{container_name="nginx", cluster="us-west1"}`, are used to [query logs](https://grafana.com/docs/loki//query/). Labels are usually metadata about the workload producing the log stream (`instance`, `container_name`, `region`, `cluster`, `level`). In Loki labels are indexed, so you should be cautious when choosing them. High cardinality label values can have drastic impact on query performance.
+
+You can use the config parameters `Labels`, `RemoveKeys` , `LabelKeys` and `LabelMapPath` to instruct the output plugin how to perform labels extraction from your log entries or to add static labels to all log entries.
+
+### AutoKubernetesLabels
+
+If set to `true`, `AutoKubernetesLabels` will add all Kubernetes labels to Loki labels automatically and ignore parameters `LabelKeys`, `LabelMapPath`.
+
+### LabelMapPath
+
+When using the `Parser` and `Filter` plugins Fluent Bit can extract and add data to the current record/log data. While Loki labels are key value pairs, record data can be nested structures.
+You can pass a JSON file that defines how to extract labels from each record. Each JSON key from the file will be matched with the log record to find label values. Values from the configuration are used as label names.
+
+Considering the record below :
+
+```json
+{
+ "kubernetes": {
+ "container_name": "promtail",
+ "pod_name": "promtail-xxx",
+ "namespace_name": "prod",
+ "labels" : {
+ "team": "x-men"
+ }
+ },
+ "HOSTNAME": "docker-desktop",
+ "log" : "a log line",
+ "time": "20190926T152206Z"
+}
+```
+
+and a LabelMap file as follows :
+
+```json
+{
+ "kubernetes": {
+ "container_name": "container",
+ "pod_name": "pod",
+ "namespace_name": "namespace",
+ "labels" : {
+ "team": "team"
+ }
+ }
+}
+```
+
+The labels extracted will be `{team="x-men", container="promtail", pod="promtail-xxx", namespace="prod"}`.
+
+If you don't want the `kubernetes` and `HOSTNAME` fields to appear in the log line you can use the `RemoveKeys` configuration field. For example, `RemoveKeys kubernetes,HOSTNAME`.
+
+### Buffering
+
+Buffering refers to the ability to store the records somewhere, and while they are processed and delivered, still be able to continue storing more records. The Loki output plugin can be blocked by the Loki client because of its design:
+
+- If the BatchSize is over the limit, the output plugin pauses receiving new records until the pending batch is successfully sent to the server
+- If the Loki server is unreachable (retry 429s, 500s and connection-level errors), the output plugin blocks new records until the Loki server is available again, and the pending batch is successfully sent to the server or as long as the maximum number of attempts has been reached within configured back-off mechanism
+
+The blocking state with some of the input plugins is not acceptable, because it can have an undesirable side effect on the part that generates the logs. Fluent Bit implements a buffering mechanism that is based on parallel processing. Therefore, it cannot send logs in order. There are two ways of handling the out-of-order logs:
+
+- Configure Loki to [accept out-of-order writes](https://grafana.com/docs/loki//configure/#accept-out-of-order-writes).
+
+- Configure the Loki output plugin to use the buffering mechanism based on [`dque`](https://github.com/joncrlsn/dque), which is compatible with the Loki server strict time ordering:
+
+ ```properties
+ [Output]
+ Name grafana-loki
+ Match *
+ Url http://localhost:3100/loki/api/v1/push
+ Buffer true
+ DqueSegmentSize 8096
+ DqueDir /tmp/flb-storage/buffer
+ DqueName loki.0
+ ```
+
+### Configuration examples
+
+To configure the Loki output plugin add this section to your luent-bit.conf file.
+
+```properties
+[Output]
+ Name grafana-loki
+ Match *
+ Url http://localhost:3100/loki/api/v1/push
+ BatchWait 1s
+ BatchSize 30720
+ # (30KiB)
+ Labels {test="fluent-bit-go", lang="Golang"}
+ RemoveKeys key1,key2
+ LabelKeys key3,key4
+ LineFormat key_value
+```
+
+```properties
+[Output]
+ Name grafana-loki
+ Match *
+ Url http://localhost:3100/loki/api/v1/push
+ BatchWait 1s
+ BatchSize 30720 # (30KiB)
+ AutoKubernetesLabels true
+ RemoveKeys key1,key2
+```
+
+A full [example configuration file](https://github.com/grafana/loki/blob/main/clients/cmd/fluent-bit/fluent-bit.conf) is also available in the Loki repository.
+
+### Running multiple plugin instances
+
+You can run multiple plugin instances in the same fluent-bit process, for example if you want to push to different Loki servers or route logs into different Loki tenant IDs. To do so, add additional `[Output]` sections.
diff --git a/docs/sources/send-data/fluentbit/fluent-bit-loki-tutorial.md b/docs/sources/send-data/fluentbit/fluent-bit-loki-tutorial.md
new file mode 100644
index 0000000000000..67e2a583e0445
--- /dev/null
+++ b/docs/sources/send-data/fluentbit/fluent-bit-loki-tutorial.md
@@ -0,0 +1,268 @@
+---
+title: Sending logs to Loki using Fluent Bit tutorial
+menuTitle: Fluent Bit tutorial
+description: Sending logs to Loki using Fluent Bit using the official Fluent Bit Loki output plugin.
+weight: 250
+killercoda:
+ title: Sending logs to Loki using Fluent Bit tutorial
+ description: Sending logs to Loki using Fluent Bit using the official Fluent Bit Loki output plugin.
+ preprocessing:
+ substitutions:
+ - regexp: loki-fundamentals-fluent-bit-1
+ replacement: loki-fundamentals_fluent-bit_1
+ - regexp: docker compose
+ replacement: docker-compose
+ backend:
+ imageid: ubuntu
+---
+
+
+
+# Sending logs to Loki using Fluent Bit tutorial
+
+In this tutorial, you will learn how to send logs to Loki using Fluent Bit. Fluent Bit is a lightweight and fast log processor and forwarder that can collect, process, and deliver logs to various destinations. We will use the official Fluent Bit Loki output plugin to send logs to Loki.
+
+
+
+
+## Dependencies
+
+Before you begin, ensure you have the following to run the demo:
+
+- Docker
+- Docker Compose
+
+{{< admonition type="tip" >}}
+Alternatively, you can try out this example in our interactive learning environment: [Sending logs to Loki using Fluent Bit tutorial](https://killercoda.com/grafana-labs/course/loki/fluentbit-loki-tutorial).
+
+It's a fully configured environment with all the dependencies already installed.
+
+
+
+Provide feedback, report bugs, and raise issues in the [Grafana Killercoda repository](https://github.com/grafana/killercoda).
+{{< /admonition >}}
+
+
+
+## Scenario
+
+In this scenario, we have a microservices application called the Carnivorous Greenhouse. This application consists of the following services:
+
+- **User Service:** Manages user data and authentication for the application. Such as creating users and logging in.
+- **Plant Service:** Manages the creation of new plants and updates other services when a new plant is created.
+- **Simulation Service:** Generates sensor data for each plant.
+- **Websocket Service:** Manages the websocket connections for the application.
+- **Bug Service:** A service that when enabled, randomly causes services to fail and generate additional logs.
+- **Main App:** The main application that ties all the services together.
+- **Database:** A database that stores user and plant data.
+
+Each service has been instrumented with the Fluent Bit logging framework to generate logs. If you would like to learn more about how the Carnivorous Greenhouse application was instrumented with Fluent Bit, refer to the [Carnivorous Greenhouse repository](https://github.com/grafana/loki-fundamentals/blob/fluentbit-official/greenhouse/loggingfw.py).
+
+
+
+
+
+## Step 1: Environment setup
+
+In this step, we will set up our environment by cloning the repository that contains our demo application and spinning up our observability stack using Docker Compose.
+
+1. To get started, clone the repository that contains our demo application:
+
+ ```bash
+ git clone -b fluentbit-official https://github.com/grafana/loki-fundamentals.git
+ ```
+
+1. Next we will spin up our observability stack using Docker Compose:
+
+ ```bash
+ docker compose -f loki-fundamentals/docker-compose.yml up -d
+ ```
+
+ This will spin up the following services:
+ ```console
+ ✔ Container loki-fundamentals-grafana-1 Started
+ ✔ Container loki-fundamentals-loki-1 Started
+ ✔ Container loki-fundamentals-fluent-bit-1 Started
+ ```
+Once we have finished configuring the Fluent Bit agent and sending logs to Loki, we will be able to view the logs in Grafana. To check if Grafana is up and running, navigate to the following URL: [http://localhost:3000](http://localhost:3000)
+
+
+
+
+## Step 2: Configure Fluent Bit to send logs to Loki
+
+To configure Fluent Bit to receive logs from our application, we need to provide a configuration file. This configuration file will define the components and their relationships. We will build the entire observability pipeline within this configuration file.
+
+### Open your code editor and locate the `fluent-bit.conf` file
+
+Fluent Bit requires a configuration file to define the components and their relationships. The configuration file is written using Fluent Bit configuration syntax. We will build the entire observability pipeline within this configuration file. To start, we will open the `fluent-bit.conf` file in the code editor:
+
+{{< docs/ignore >}}
+> Note: Killercoda has an inbuilt Code editor which can be accessed via the `Editor` tab.
+1. Expand the `loki-fundamentals` directory in the file explorer of the `Editor` tab.
+1. Locate the `fluent-bit.conf` file in the top level directory, `loki-fundamentals`.
+1. Click on the `fluent-bit.conf` file to open it in the code editor.
+{{< /docs/ignore >}}
+
+
+1. Open the `loki-fundamentals` directory in a code editor of your choice.
+1. Locate the `fluent-bit.conf` file in the `loki-fundamentals` directory (Top level directory).
+1. Click on the `fluent-bit.conf` file to open it in the code editor.
+
+
+You will copy all of the configuration snippets into the `fluent-bit.conf` file.
+
+### Receiving Fluent Bit protocal logs
+
+The first step is to configure Fluent Bit to receive logs from the Carnivorous Greenhouse application. Since the application is instrumented with Fluent Bit logging framework, it will send logs using the forward protocol (unique to Fluent Bit). We will use the `forward` input plugin to receive logs from the application.
+
+Now add the following configuration to the `fluent-bit.conf` file:
+```conf
+[INPUT]
+ Name forward
+ Listen 0.0.0.0
+ Port 24224
+```
+
+In this configuration:
+- `Name`: The name of the input plugin. In this case, we are using the `forward` input plugin.
+- `Listen`: The IP address to listen on. In this case, we are listening on all IP addresses.
+- `Port`: The port to listen on. In this case, we are listening on port `24224`.
+
+For more information on the `forward` input plugin, see the [Fluent Bit Forward documentation](https://docs.fluentbit.io/manual/pipeline/inputs/forward).
+
+
+
+### Export logs to Loki using the official Loki output plugin
+
+Lastly, we will configure Fluent Bit to export logs to Loki using the official Loki output plugin. The Loki output plugin allows you to send logs or events to a Loki service. It supports data enrichment with Kubernetes labels, custom label keys, and structured metadata.
+
+Add the following configuration to the `fluent-bit.conf` file:
+```conf
+[OUTPUT]
+ name loki
+ match service.**
+ host loki
+ port 3100
+ labels agent=fluent-bit
+ label_map_path /fluent-bit/etc/conf/logmap.json
+```
+
+In this configuration:
+- `name`: The name of the output plugin. In this case, we are using the `loki` output plugin.
+- `match`: The tag to match. In this case, we are matching all logs with the tag `service.**`.
+- `host`: The hostname of the Loki service. In this case, we are using the hostname `loki`.
+- `port`: The port of the Loki service. In this case, we are using port `3100`.
+- `labels`: Additional labels to add to the logs. In this case, we are adding the label `agent=fluent-bit`.
+- `label_map_path`: The path to the label map file. In this case, we are using the file `logmap.json`.
+
+For more information on the `loki` output plugin, see the [Fluent Bit Loki documentation](https://docs.fluentbit.io/manual/pipeline/outputs/loki).
+
+#### `logmap.json` file
+
+The `logmap.json` file is used to map the log fields to the Loki labels. In this tutorial we have pre-filled the `logmap.json` file with the following configuration:
+```json
+{
+"service": "service_name",
+"instance_id": "instance_id"
+ }
+```
+This configuration maps the `service` field to the Loki label `service_name` and the `instance_id` field to the Loki label `instance_id`.
+
+
+### Reload the Fluent Bit configuration
+
+After adding the configuration to the `fluent-bit.conf` file, you will need to reload the Fluent Bit configuration. To reload the configuration, run the following command:
+
+```bash
+docker restart loki-fundamentals-fluent-bit-1
+```
+To verify that the configuration has been loaded successfully, you can check the Fluent Bit logs by running the following command:
+
+```bash
+docker logs loki-fundamentals-fluent-bit-1
+```
+
+## Stuck? Need help?
+
+If you get stuck or need help creating the configuration, you can copy and replace the entire `config.alloy` using the completed configuration file:
+
+```bash
+cp loki-fundamentals/completed/fluent-bit.conf loki-fundamentals/fluent-bit.conf
+docker restart loki-fundamentals-fluent-bit-1
+```
+
+
+
+
+
+## Step 3: Start the Carnivorous Greenhouse
+
+In this step, we will start the Carnivorous Greenhouse application. To start the application, run the following command:
+
+{{< admonition type="note" >}}
+This docker-compose file relies on the `loki-fundamentals_loki` Docker network. If you have not started the observability stack, you will need to start it first.
+{{< /admonition >}}
+
+
+{{< docs/ignore >}}
+
+> Note: This docker-compose file relies on the `loki-fundamentals_loki` docker network. If you have not started the observability stack, you will need to start it first.
+
+{{< /docs/ignore >}}
+
+```bash
+docker compose -f loki-fundamentals/greenhouse/docker-compose-micro.yml up -d --build
+```
+
+This will start the following services:
+```bash
+ ✔ Container greenhouse-db-1 Started
+ ✔ Container greenhouse-websocket_service-1 Started
+ ✔ Container greenhouse-bug_service-1 Started
+ ✔ Container greenhouse-user_service-1 Started
+ ✔ Container greenhouse-plant_service-1 Started
+ ✔ Container greenhouse-simulation_service-1 Started
+ ✔ Container greenhouse-main_app-1 Started
+```
+
+Once started, you can access the Carnivorous Greenhouse application at [http://localhost:5005](http://localhost:5005). Generate some logs by interacting with the application in the following ways:
+
+1. Create a user.
+1. Log in.
+1. Create a few plants to monitor.
+1. Enable bug mode to activate the bug service. This will cause services to fail and generate additional logs.
+
+Finally to view the logs in Loki, navigate to the Loki Logs Explore view in Grafana at [http://localhost:3000/a/grafana-lokiexplore-app/explore](http://localhost:3000/a/grafana-lokiexplore-app/explore).
+
+
+
+
+
+
+## Summary
+
+In this tutorial, you learned how to send logs to Loki using Fluent Bit. You configured Fluent Bit to receive logs from the Carnivorous Greenhouse application and export logs to Loki using the official Loki output plugin. Where to next?
+
+{{< docs/ignore >}}
+
+### Back to Docs
+Head back to where you started from to continue with the [Loki documentation](https://grafana.com/docs/loki/latest/send-data/alloy).
+
+{{< /docs/ignore >}}
+
+
+## Further reading
+
+For more information on Fluent Bit, refer to the following resources:
+- [Fluent Bit documentation](https://docs.fluentbit.io/manual/)
+- [Other examples of Fluent Bit configurations](https://grafana.com/docs/loki/latest/send-data/fluentbit/)
+
+## Complete metrics, logs, traces, and profiling example
+
+If you would like to use a demo that includes Mimir, Loki, Tempo, and Grafana, you can use [Introduction to Metrics, Logs, Traces, and Profiling in Grafana](https://github.com/grafana/intro-to-mlt). `Intro-to-mltp` provides a self-contained environment for learning about Mimir, Loki, Tempo, and Grafana.
+
+The project includes detailed explanations of each component and annotated configurations for a single-instance deployment. Data from `intro-to-mltp` can also be pushed to Grafana Cloud.
+
+
+
diff --git a/docs/sources/send-data/fluentbit/fluent-bit-plugin.md b/docs/sources/send-data/fluentbit/fluent-bit-plugin.md
new file mode 100644
index 0000000000000..7d0cca7393704
--- /dev/null
+++ b/docs/sources/send-data/fluentbit/fluent-bit-plugin.md
@@ -0,0 +1,148 @@
+---
+title: Fluent Bit Loki output plugin
+menuTitle: Fluent Bit
+description: Provides instructions for how to install, configure, and use the Fluent Bit client to send logs to Loki.
+aliases:
+- ../clients/fluentbit/
+weight: 500
+---
+# Fluent Bit Loki output plugin
+
+[Fluent Bit](https://fluentbit.io/) is a fast and lightweight logs and metrics processor and forwarder that can be configured with the [Fluent-bit Loki output plugin](https://docs.fluentbit.io/manual/pipeline/outputs/loki) to ship logs to Loki.
+
+You can define which log files you want to collect using the [`Tail`](https://docs.fluentbit.io/manual/pipeline/inputs/tail) or [`Stdin`](https://docs.fluentbit.io/manual/pipeline/inputs/standard-input) data pipeline input. Additionally, Fluent Bit supports multiple `Filter` and `Parser` plugins (`Kubernetes`, `JSON`, etc.) to structure and alter log lines.
+
+{{< admonition type="note" >}}
+There are two Fluent Bit plugins for Loki: the officially maintained plugin `loki` and the `grafana-loki` plugin. We recommend using the `loki` plugin described within this page as it's officially maintained by the Fluent Bit project.
+
+For more information, see the [Fluent Bit Loki output plugin documentation](https://docs.fluentbit.io/manual/pipeline/outputs/loki). Note that the `grafana-loki` plugin is no longer actively maintained.
+{{< /admonition >}}
+
+## Configuration
+
+All configuration options for the Fluent Bit Loki output plugin are documented in the [Fluent Bit Loki output plugin documentation](https://docs.fluentbit.io/manual/pipeline/outputs/loki#configuration-parameters).
+
+Here is a generic example for connecting Fluent Bit to Loki hosted on Grafana Cloud:
+
+```conf
+ [OUTPUT]
+ Name loki
+ Match *
+ Host YourHostname.company.com
+ port 443
+ tls on
+ tls.verify on
+ http_user XXX
+ http_passwd XXX
+```
+
+Replace `Host`, `http_user`, and `http_passwd` with your Grafana Cloud Loki endpoint and credentials.
+
+
+## Usage examples
+
+Here are some examples of how to use Fluent Bit to send logs to Loki.
+
+### Tail Docker logs
+
+Here is an example to run Fluent Bit in a Docker container, collect Docker logs, and send them to a local Loki instance.
+
+```bash
+docker run -v /var/lib/docker/containers:/var/lib/docker/containers fluent/fluent-bit:latest /fluent-bit/bin/fluent-bit -i tail -p Path="/var/lib/docker/containers/*/*.log" -p Parser=docker -p Tag="docker.*" -o loki -p host=loki -p port=3100 -p labels="agent=fluend-bit,env=docker"
+```
+
+In this example, we are using the `tail` input plugin to collect Docker logs and the `loki` output plugin to send logs to Loki. Note it is recommended to use a configuration file to define the input and output plugins. The `-p` flag is used to pass configuration parameters to the plugins.
+
+#### Configuration file (Alternative to command line arguments)
+
+Create a configuration file `fluent-bit.conf` with the following content:
+
+```conf
+[INPUT]
+ Name tail
+ Path /var/lib/docker/containers/*/*.log
+ Parser docker
+ Tag docker.*
+
+[OUTPUT]
+ Name loki
+ Match *
+ Host loki
+ Port 3100
+ Labels agent=fluend-bit,env=docker
+```
+
+Run Fluent Bit with the configuration file:
+
+```bash
+docker run -v /var/lib/docker/containers:/var/lib/docker/containers -v $(pwd)/fluent-bit.conf:/fluent-bit/etc/fluent-bit.conf fluent/fluent-bit:latest /fluent-bit/bin/fluent-bit -c /fluent-bit/etc/fluent-bit.conf
+```
+
+### Collect Docker events
+
+Here is an example to run Fluent Bit in a Docker container, collect docker events, and send them to a local Loki instance.
+
+```bash
+docker run -v /var/run/docker.sock:/var/run/docker.sock fluent/fluent-bit:latest /fluent-bit/bin/fluent-bit -i docker_events -o loki -p host=loki -p port=3100 -p labels="agent=fluend-bit,env=docker"
+```
+
+In this example, we are using the `docker_events` input plugin to collect Docker events and the `loki` output plugin to send logs to Loki. Note it is recommended to use a configuration file to define the input and output plugins. The `-p` flag is used to pass configuration parameters to the plugins.
+
+#### Configuration file (Alternative to command line arguments)
+
+Create a configuration file `fluent-bit.conf` with the following content:
+
+```conf
+[INPUT]
+ Name docker_events
+
+[OUTPUT]
+ Name loki
+ Match *
+ Host loki
+ Port 3100
+ Labels agent=fluent-bit,env=docker
+```
+
+Run Fluent Bit with the configuration file:
+
+```bash
+docker run -v /var/run/docker.sock:/var/run/docker.sock -v $(pwd)/fluent-bit.conf:/fluent-bit/etc/fluent-bit.conf fluent/fluent-bit:latest /fluent-bit/bin/fluent-bit -c /fluent-bit/etc/fluent-bit.conf
+```
+
+### Collect Kubernetes logs
+
+The recommended way to collect logs from Kubernetes with Fluent Bit is to use the Helm chart provided by the Fluent Bit project. The Helm chart is available at [https://github.com/fluent/helm-charts](https://github.com/fluent/helm-charts).
+
+Here is an example of how to deploy the Fluent Bit Helm chart to collect logs from Kubernetes and send them to Loki:
+
+1. Add the Fluent Bit Helm repository:
+
+ ```bash
+ helm repo add fluent https://fluent.github.io/helm-charts
+1. Create a `values.yaml` file with the following content:
+
+ ```yaml
+ config:
+ outputs: |
+ [OUTPUT]
+ Name loki
+ Match *
+ Host YourHost.Company.net
+ port 443
+ tls on
+ tls.verify on
+ http_user XXX
+ http_passwd XXX
+ Labels agent=fluend-bit
+
+ Note we are only updating the `outputs` section of the Fluent Bit configuration. This is to replace the default output plugin with the Loki output plugin. If you need to update other parts of the Fluent Bit configuration refer to the [Fluent Bit values file reference](https://github.com/fluent/helm-charts/blob/main/charts/fluent-bit/values.yaml).
+
+1. Deploy the Fluent Bit Helm chart:
+
+ ```bash
+ helm install fluent-bit fluent/fluent-bit -f values.yaml
+
+## Next steps
+
+- [Sending logs to Loki using Fluent Bit tutorial](https://grafana.com/docs/loki//send-data/fluentbit/fluent-bit-loki-tutorial/)
\ No newline at end of file
diff --git a/docs/sources/setup/install/docker.md b/docs/sources/setup/install/docker.md
index de2006250a7c3..723adfc617a71 100644
--- a/docs/sources/setup/install/docker.md
+++ b/docs/sources/setup/install/docker.md
@@ -113,13 +113,9 @@ Run the following commands in your command line. They work for Windows or Linux
You should see something similar to the following:
```bash
- ✔ Container mydevice-minio-1 Started 0.0s
- ✔ Container mydevice-flog-1 Started 0.0s
- ✔ Container mydevice-write-1 Started 0.0s
- ✔ Container mydevice-read-1 Started 0.0s
- ✔ Container mydevice-gateway-1 Started 0.0s
- ✔ Container mydevice-grafana-1 Started 0.0s
- ✔ Container mydevice-promtail-1 Started 0.0s
+ ✔ Container loki-loki-1 Started 0.0s
+ ✔ Container loki-grafana-1 Started 0.0s
+ ✔ Container loki-promtail-1 Started 0.0s
```
1. Verify that Loki is up and running.
diff --git a/docs/sources/setup/install/helm/install-microservices/_index.md b/docs/sources/setup/install/helm/install-microservices/_index.md
index f641172d6aae5..fea617e7bb9df 100644
--- a/docs/sources/setup/install/helm/install-microservices/_index.md
+++ b/docs/sources/setup/install/helm/install-microservices/_index.md
@@ -21,7 +21,7 @@ The default Helm chart deploys the following components:
- **QueryFrontend component** (2 replicas, maxUnavailable: 1): Manages frontend queries. Up to 1 replica can be unavailable during updates.
- **QueryScheduler component** (2 replicas): Schedules queries.
-It is not recommended to run scalable mode with `filesystem` storage. For the purpose of this guide, we will use MinIO as the object storage to provide a complete example.
+It is not recommended to run microservice mode with `filesystem` storage. For the purpose of this guide, we will use MinIO as the object storage to provide a complete example.
**Prerequisites**
@@ -96,7 +96,9 @@ It is not recommended to run scalable mode with `filesystem` storage. For the pu
replicas: 2
maxUnavailable: 1
- bloomCompactor:
+ bloomPlanner:
+ replicas: 0
+ bloomBuilder:
replicas: 0
bloomGateway:
replicas: 0
@@ -244,7 +246,9 @@ When deploying Loki using S3 Storage **DO NOT** use the default bucket names; `
replicas: 2
maxUnavailable: 1
- bloomCompactor:
+ bloomPlanner:
+ replicas: 0
+ bloomBuilder:
replicas: 0
bloomGateway:
replicas: 0
@@ -325,7 +329,9 @@ indexGateway:
replicas: 2
maxUnavailable: 1
-bloomCompactor:
+bloomPlanner:
+ replicas: 0
+bloomBuilder:
replicas: 0
bloomGateway:
replicas: 0
diff --git a/docs/sources/setup/install/helm/install-scalable/_index.md b/docs/sources/setup/install/helm/install-scalable/_index.md
index 00d2498f0bd3e..3bc6a3c0cad71 100644
--- a/docs/sources/setup/install/helm/install-scalable/_index.md
+++ b/docs/sources/setup/install/helm/install-scalable/_index.md
@@ -108,7 +108,9 @@ It is not recommended to run scalable mode with `filesystem` storage. For the pu
replicas: 0
indexGateway:
replicas: 0
- bloomCompactor:
+ bloomPlanner:
+ replicas: 0
+ bloomBuilder:
replicas: 0
bloomGateway:
replicas: 0
@@ -209,7 +211,9 @@ compactor:
replicas: 0
indexGateway:
replicas: 0
-bloomCompactor:
+bloomPlanner:
+ replicas: 0
+bloomBuilder:
replicas: 0
bloomGateway:
replicas: 0
@@ -287,7 +291,9 @@ compactor:
replicas: 0
indexGateway:
replicas: 0
-bloomCompactor:
+bloomPlanner:
+ replicas: 0
+bloomBuilder:
replicas: 0
bloomGateway:
replicas: 0
diff --git a/docs/sources/setup/install/helm/reference.md b/docs/sources/setup/install/helm/reference.md
index f5c4ed1122738..5ab32a5fedd5b 100644
--- a/docs/sources/setup/install/helm/reference.md
+++ b/docs/sources/setup/install/helm/reference.md
@@ -53,6 +53,7 @@ This is the generated reference for the Loki Helm Chart values.
"env": [],
"extraArgs": {},
"extraContainers": [],
+ "extraEnvFrom": [],
"extraVolumeMounts": [],
"extraVolumes": [],
"hostAliases": [],
@@ -129,6 +130,15 @@ This is the generated reference for the Loki Helm Chart values.
[]
+
+
+
+ adminApi.extraEnvFrom
+ list
+ Environment variables from secrets or configmaps to add to the admin-api pods
+
+[]
+
@@ -3616,6 +3626,7 @@ false
"env": [],
"extraArgs": {},
"extraContainers": [],
+ "extraEnvFrom": [],
"extraVolumeMounts": [],
"extraVolumes": [],
"hostAliases": [],
@@ -3695,6 +3706,15 @@ false
[]
+
+
+
+ enterpriseGateway.extraEnvFrom
+ list
+ Environment variables from secrets or configmaps to add to the enterprise gateway pods
+
+[]
+
@@ -4213,6 +4233,15 @@ false
{}
+
+
+
+ gateway.nginxConfig.clientMaxBodySize
+ string
+ Allows customizing the `client_max_body_size` directive
+
+"4M"
+
@@ -5780,6 +5809,15 @@ null
[]
+
+
+
+ kubeVersionOverride
+ string
+ Overrides the version used to determine compatibility of resources with the target Kubernetes cluster. This is useful when using `helm template`, because then helm will use the client version of kubectl as the Kubernetes version, which may or may not match your cluster's server version. Example: 'v1.24.4'. Set to null to use the version that helm devises.
+
+null
+
diff --git a/docs/sources/setup/migrate/migrate-to-tsdb/_index.md b/docs/sources/setup/migrate/migrate-to-tsdb/_index.md
index 49ba506dc5536..5be15a42b22d8 100644
--- a/docs/sources/setup/migrate/migrate-to-tsdb/_index.md
+++ b/docs/sources/setup/migrate/migrate-to-tsdb/_index.md
@@ -51,7 +51,7 @@ schema_config:
1. This sample configuration uses filesystem as the storage in both the periods. If you want to use a different storage for the TSDB index and chunks, you can specify a different `object_store` in the new period.
-1. Update the schema to v13 which is the recommended version at the time of writing. Please refer to the [configure page](https://grafana.com/docs/loki//configure/#period_config) for the current recommend version.
+1. Update the schema to v13 which is the recommended version at the time of writing. Please refer to the [configure page](https://grafana.com/docs/loki//configure/#period_config) for the current recommended version.
### Configure TSDB shipper
diff --git a/docs/sources/setup/upgrade/_index.md b/docs/sources/setup/upgrade/_index.md
index 1b68d61828973..a223706ba0cd7 100644
--- a/docs/sources/setup/upgrade/_index.md
+++ b/docs/sources/setup/upgrade/_index.md
@@ -79,7 +79,7 @@ All other CLI arguments (and their YAML counterparts) prefixed with `-bloom-comp
## 3.0.0
{{% admonition type="note" %}}
-If you have questions about upgrading to Loki 3.0, please join us on the [community Slack(https://slack.grafana.com/) in the `#loki-3` channel.
+If you have questions about upgrading to Loki 3.0, please join us on the [community Slack](https://slack.grafana.com/) in the `#loki-3` channel.
Or leave a comment on this [Github Issue](https://github.com/grafana/loki/issues/12506).
{{% /admonition %}}
@@ -121,7 +121,7 @@ A flagship feature of Loki 3.0 is native support for the Open Telemetry Protocol
Structured Metadata is enabled by default in Loki 3.0, however, it requires your active schema be using both the `tsdb` index type AND the `v13` storage schema. If you are not using both of these you have two options:
* Upgrade your index version and schema version before updating to 3.0, see [schema config upgrade](https://grafana.com/docs/loki//operations/storage/schema/).
-* Disable Structured Metadata (and therefor OTLP support) and upgrade to 3.0 and perform the schema migration after. This can be done by setting `allow_structured_metadata: false` in the `limits_config` section or set the command line argument `-validation.allow-structured-metadata=false`.
+* Disable Structured Metadata (and therefore OTLP support) and upgrade to 3.0 and perform the schema migration after. This can be done by setting `allow_structured_metadata: false` in the `limits_config` section or set the command line argument `-validation.allow-structured-metadata=false`.
#### `service_name` label
diff --git a/docs/sources/shared/configuration.md b/docs/sources/shared/configuration.md
index 132de42b81075..a72b48e8e30ce 100644
--- a/docs/sources/shared/configuration.md
+++ b/docs/sources/shared/configuration.md
@@ -793,6 +793,16 @@ kafka_config:
# CLI flag: -kafka.write-timeout
[write_timeout: | default = 10s]
+ # The SASL username for authentication to Kafka using the PLAIN mechanism.
+ # Both username and password must be set.
+ # CLI flag: -kafka.sasl-username
+ [sasl_username: | default = ""]
+
+ # The SASL password for authentication to Kafka using the PLAIN mechanism.
+ # Both username and password must be set.
+ # CLI flag: -kafka.sasl-password
+ [sasl_password: | default = ""]
+
# The consumer group used by the consumer to track the last consumed offset.
# The consumer group must be different for each ingester. If the configured
# consumer group contains the '' placeholder, it is replaced with
@@ -3250,7 +3260,8 @@ The `limits_config` block configures global and per-tenant limits in Loki. The v
# CLI flag: -distributor.ingestion-rate-limit-strategy
[ingestion_rate_strategy: | default = "global"]
-# Per-user ingestion rate limit in sample size per second. Units in MB.
+# Per-user ingestion rate limit in sample size per second. Sample size includes
+# size of the logs line and the size of structured metadata labels. Units in MB.
# CLI flag: -distributor.ingestion-rate-limit-mb
[ingestion_rate_mb: | default = 4]
@@ -3754,12 +3765,21 @@ shard_streams:
# CLI flag: -bloom-build.enable
[bloom_creation_enabled: | default = false]
-# Experimental. Number of splits to create for the series keyspace when building
-# blooms. The series keyspace is split into this many parts to parallelize bloom
-# creation.
+# Experimental. Bloom planning strategy to use in bloom creation. Can be one of:
+# 'split_keyspace_by_factor', 'split_by_series_chunks_size'
+# CLI flag: -bloom-build.planning-strategy
+[bloom_planning_strategy: | default = "split_keyspace_by_factor"]
+
+# Experimental. Only if `bloom-build.planning-strategy` is 'split'. Number of
+# splits to create for the series keyspace when building blooms. The series
+# keyspace is split into this many parts to parallelize bloom creation.
# CLI flag: -bloom-build.split-keyspace-by
[bloom_split_series_keyspace_by: | default = 256]
+# Experimental. Target chunk size in bytes for bloom tasks. Default is 20GB.
+# CLI flag: -bloom-build.split-target-series-chunk-size
+[bloom_task_target_series_chunk_size: | default = 20GB]
+
# Experimental. Compression algorithm for bloom block pages.
# CLI flag: -bloom-build.block-encoding
[bloom_block_encoding: | default = "none"]
@@ -3824,6 +3844,20 @@ otlp_config:
# disables shuffle sharding and tenant is sharded across all partitions.
# CLI flag: -limits.ingestion-partition-tenant-shard-size
[ingestion_partitions_tenant_shard_size: | default = 0]
+
+# S3 server-side encryption type. Required to enable server-side encryption
+# overrides for a specific tenant. If not set, the default S3 client settings
+# are used.
+[s3_sse_type: | default = ""]
+
+# S3 server-side encryption KMS Key ID. Ignored if the SSE type override is not
+# set.
+[s3_sse_kms_key_id: | default = ""]
+
+# S3 server-side encryption KMS encryption context. If unset and the key ID
+# override is set, the encryption context will not be provided to S3. Ignored if
+# the SSE type override is not set.
+[s3_sse_kms_encryption_context: | default = ""]
```
### local_storage_config
@@ -4049,12 +4083,14 @@ When a memberlist config with atleast 1 join_members is defined, kvstore of type
Configures additional object stores for a given storage provider.
Supported stores: aws, azure, bos, filesystem, gcs, swift.
Example:
-storage_config:
- named_stores:
- aws:
- store-1:
- endpoint: s3://foo-bucket
- region: us-west1
+```yaml
+ storage_config:
+ named_stores:
+ aws:
+ store-1:
+ endpoint: s3://foo-bucket
+ region: us-west1
+```
Named store from this example can be used by setting object_store to store-1 in period_config.
```yaml
@@ -5540,12 +5576,14 @@ hedging:
# Configures additional object stores for a given storage provider.
# Supported stores: aws, azure, bos, filesystem, gcs, swift.
# Example:
-# storage_config:
-# named_stores:
-# aws:
-# store-1:
-# endpoint: s3://foo-bucket
-# region: us-west1
+# ```yaml
+# storage_config:
+# named_stores:
+# aws:
+# store-1:
+# endpoint: s3://foo-bucket
+# region: us-west1
+# ```
# Named store from this example can be used by setting object_store to store-1
# in period_config.
[named_stores: ]
diff --git a/flake.nix b/flake.nix
index 32ff2f5b25d9f..c9e52eb589af3 100644
--- a/flake.nix
+++ b/flake.nix
@@ -75,12 +75,16 @@
devShell = pkgs.mkShell {
nativeBuildInputs = with pkgs; [
- (import ./packages/chart-releaser.nix {
- inherit (prev) pkgs lib buildGoModule fetchFromGitHub;
+ (pkgs.callPackage ./nix/packages/chart-releaser.nix {
+ inherit pkgs;
+ inherit (pkgs) buildGoModule fetchFromGitHub;
+ })
+
+ (pkgs.callPackage ./nix/packages/faillint.nix {
+ inherit (pkgs) lib buildGoModule fetchFromGitHub;
})
chart-testing
- faillint
gcc
go
golangci-lint
@@ -89,7 +93,6 @@
nettools
nixpkgs-fmt
statix
- systemd
yamllint
];
};
diff --git a/go.mod b/go.mod
index da86bb21b0c18..75ce97ec05761 100644
--- a/go.mod
+++ b/go.mod
@@ -5,9 +5,9 @@ go 1.23
toolchain go1.23.1
require (
- cloud.google.com/go/bigtable v1.29.0
- cloud.google.com/go/pubsub v1.42.0
- cloud.google.com/go/storage v1.43.0
+ cloud.google.com/go/bigtable v1.33.0
+ cloud.google.com/go/pubsub v1.45.0
+ cloud.google.com/go/storage v1.44.0
github.com/Azure/azure-pipeline-go v0.2.3
github.com/Azure/azure-storage-blob-go v0.14.0
github.com/Azure/go-autorest/autorest/adal v0.9.24
@@ -36,7 +36,7 @@ require (
github.com/fatih/color v1.16.0
github.com/felixge/fgprof v0.9.5
github.com/fluent/fluent-bit-go v0.0.0-20230731091245-a7a013e2473c
- github.com/fsouza/fake-gcs-server v1.7.0
+ github.com/fsouza/fake-gcs-server v1.50.2
github.com/go-kit/log v0.2.1
github.com/go-logfmt/logfmt v0.6.0
github.com/go-redis/redis/v8 v8.11.5
@@ -65,11 +65,11 @@ require (
github.com/jmespath/go-jmespath v0.4.0
github.com/joncrlsn/dque v0.0.0-20211108142734-c2ef48c5192a
github.com/json-iterator/go v1.1.12
- github.com/klauspost/compress v1.17.10
+ github.com/klauspost/compress v1.17.11
github.com/klauspost/pgzip v1.2.6
github.com/leodido/go-syslog/v4 v4.1.0
github.com/mattn/go-ieproxy v0.0.12
- github.com/minio/minio-go/v7 v7.0.77
+ github.com/minio/minio-go/v7 v7.0.78
github.com/mitchellh/go-wordwrap v1.0.1
github.com/mitchellh/mapstructure v1.5.0
github.com/modern-go/reflect2 v1.0.2
@@ -98,13 +98,13 @@ require (
go.etcd.io/bbolt v1.3.11
go.uber.org/atomic v1.11.0
go.uber.org/goleak v1.3.0
- golang.org/x/crypto v0.27.0
- golang.org/x/net v0.29.0
+ golang.org/x/crypto v0.28.0
+ golang.org/x/net v0.30.0
golang.org/x/sync v0.8.0
- golang.org/x/sys v0.25.0
- golang.org/x/time v0.6.0
- google.golang.org/api v0.193.0
- google.golang.org/grpc v1.65.0
+ golang.org/x/sys v0.26.0
+ golang.org/x/time v0.7.0
+ google.golang.org/api v0.201.0
+ google.golang.org/grpc v1.67.1
gopkg.in/alecthomas/kingpin.v2 v2.2.6
gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.1
@@ -138,11 +138,11 @@ require (
github.com/prometheus/common/sigv4 v0.1.0
github.com/richardartoul/molecule v1.0.0
github.com/schollz/progressbar/v3 v3.14.6
- github.com/shirou/gopsutil/v4 v4.24.8
- github.com/thanos-io/objstore v0.0.0-20240818203309-0363dadfdfb1
+ github.com/shirou/gopsutil/v4 v4.24.9
+ github.com/thanos-io/objstore v0.0.0-20241015070247-5f04b8b0b52a
github.com/twmb/franz-go v1.17.1
github.com/twmb/franz-go/pkg/kadm v1.13.0
- github.com/twmb/franz-go/pkg/kfake v0.0.0-20240821035758-b77dd13e2bfa
+ github.com/twmb/franz-go/pkg/kfake v0.0.0-20241015013301-cea7aa5d8037
github.com/twmb/franz-go/pkg/kmsg v1.8.0
github.com/twmb/franz-go/plugin/kotel v1.5.0
github.com/twmb/franz-go/plugin/kprom v1.1.0
@@ -150,50 +150,57 @@ require (
go.opentelemetry.io/collector/pdata v1.12.0
go4.org/netipx v0.0.0-20230125063823-8449b0a6169f
golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8
- golang.org/x/oauth2 v0.22.0
- golang.org/x/text v0.18.0
- google.golang.org/protobuf v1.34.2
+ golang.org/x/oauth2 v0.23.0
+ golang.org/x/text v0.19.0
+ google.golang.org/protobuf v1.35.1
gotest.tools v2.2.0+incompatible
k8s.io/apimachinery v0.29.3
k8s.io/utils v0.0.0-20240902221715-702e33fdd3c3
)
require (
- cel.dev/expr v0.16.0 // indirect
- cloud.google.com/go/auth v0.9.0 // indirect
+ cel.dev/expr v0.16.1 // indirect
+ cloud.google.com/go/auth v0.9.8 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect
- cloud.google.com/go/monitoring v1.21.0 // indirect
+ cloud.google.com/go/monitoring v1.21.1 // indirect
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect
+ github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1 // indirect
+ github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 // indirect
+ github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1 // indirect
github.com/benbjohnson/immutable v0.4.0 // indirect
github.com/coreos/etcd v3.3.27+incompatible // indirect
github.com/coreos/pkg v0.0.0-20220810130054-c7d1c02cb6cf // indirect
github.com/dlclark/regexp2 v1.4.0 // indirect
+ github.com/ebitengine/purego v0.8.0 // indirect
github.com/gabriel-vasile/mimetype v1.4.3 // indirect
github.com/go-ini/ini v1.67.0 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/goccy/go-json v0.10.3 // indirect
+ github.com/gorilla/handlers v1.5.2 // indirect
github.com/hashicorp/go-msgpack/v2 v2.1.2 // indirect
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
github.com/moby/docker-image-spec v1.3.1 // indirect
github.com/moby/sys/userns v0.1.0 // indirect
github.com/ncw/swift v1.0.53 // indirect
github.com/pires/go-proxyproto v0.7.0 // indirect
+ github.com/pkg/xattr v0.4.10 // indirect
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
github.com/rivo/uniseg v0.4.7 // indirect
- github.com/shoenig/go-m1cpu v0.1.6 // indirect
github.com/tklauser/go-sysconf v0.3.12 // indirect
github.com/tklauser/numcpus v0.6.1 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
- go.opentelemetry.io/otel/sdk v1.28.0 // indirect
- go.opentelemetry.io/otel/sdk/metric v1.28.0 // indirect
+ go.opentelemetry.io/contrib/detectors/gcp v1.29.0 // indirect
+ go.opentelemetry.io/otel/sdk v1.29.0 // indirect
+ go.opentelemetry.io/otel/sdk/metric v1.29.0 // indirect
+ google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a // indirect
)
require (
- cloud.google.com/go v0.115.1 // indirect
- cloud.google.com/go/compute/metadata v0.5.0 // indirect
- cloud.google.com/go/iam v1.2.0 // indirect
- cloud.google.com/go/longrunning v0.6.0 // indirect
+ cloud.google.com/go v0.116.0 // indirect
+ cloud.google.com/go/compute/metadata v0.5.2 // indirect
+ cloud.google.com/go/iam v1.2.1 // indirect
+ cloud.google.com/go/longrunning v0.6.1 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
@@ -231,7 +238,7 @@ require (
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect
- github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20 // indirect
+ github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 // indirect
github.com/containerd/log v0.1.0 // indirect
github.com/coreos/go-semver v0.3.0 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
@@ -273,13 +280,13 @@ require (
github.com/golang-jwt/jwt/v4 v4.5.0 // indirect
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
- github.com/google/btree v1.1.2 // indirect
+ github.com/google/btree v1.1.3 // indirect
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/go-querystring v1.1.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da // indirect
github.com/google/s2a-go v0.1.8 // indirect
- github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
+ github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect
github.com/googleapis/gax-go/v2 v2.13.0 // indirect
github.com/gophercloud/gophercloud v1.13.0 // indirect
github.com/grafana/pyroscope-go/godeltaprof v0.1.8 // indirect
@@ -350,19 +357,19 @@ require (
go.mongodb.org/mongo-driver v1.14.0 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/collector/semconv v0.105.0 // indirect
- go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect
- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect
- go.opentelemetry.io/otel v1.28.0
- go.opentelemetry.io/otel/metric v1.28.0 // indirect
- go.opentelemetry.io/otel/trace v1.28.0
+ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect
+ go.opentelemetry.io/otel v1.29.0
+ go.opentelemetry.io/otel/metric v1.29.0 // indirect
+ go.opentelemetry.io/otel/trace v1.29.0
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.21.0 // indirect
golang.org/x/mod v0.19.0 // indirect
- golang.org/x/term v0.24.0 // indirect
+ golang.org/x/term v0.25.0 // indirect
golang.org/x/tools v0.23.0 // indirect
- google.golang.org/genproto v0.0.0-20240820151423-278611b39280 // indirect
- google.golang.org/genproto/googleapis/api v0.0.0-20240820151423-278611b39280 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20240820151423-278611b39280 // indirect
+ google.golang.org/genproto v0.0.0-20241007155032-5fefd90f89a9 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9 // indirect
gopkg.in/fsnotify/fsnotify.v1 v1.4.7 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
diff --git a/go.sum b/go.sum
index 62b957a7ce01c..c47d356cc10d3 100644
--- a/go.sum
+++ b/go.sum
@@ -1,5 +1,5 @@
-cel.dev/expr v0.16.0 h1:yloc84fytn4zmJX2GU3TkXGsaieaV7dQ057Qs4sIG2Y=
-cel.dev/expr v0.16.0/go.mod h1:TRSuuV7DlVCE/uwv5QbAiW/v8l5O8C4eEPHeu7gf7Sg=
+cel.dev/expr v0.16.1 h1:NR0+oFYzR1CqLFhTAqg3ql59G9VfN8fKq1TCHJ6gq1g=
+cel.dev/expr v0.16.1/go.mod h1:AsGA5zb3WruAEQeQng1RZdGEXmBj0jvMWh6l5SnNuC8=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw=
@@ -34,15 +34,15 @@ cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w9
cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc=
cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU=
cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA=
-cloud.google.com/go v0.115.1 h1:Jo0SM9cQnSkYfp44+v+NQXHpcHqlnRJk2qxh6yvxxxQ=
-cloud.google.com/go v0.115.1/go.mod h1:DuujITeaufu3gL68/lOFIirVNJwQeyf5UXyi+Wbgknc=
+cloud.google.com/go v0.116.0 h1:B3fRrSDkLRt5qSHWe40ERJvhvnQwdZiHu0bJOpldweE=
+cloud.google.com/go v0.116.0/go.mod h1:cEPSRWPzZEswwdr9BxE6ChEn01dWlTaF05LiC2Xs70U=
cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI=
cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4=
cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ=
cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o=
cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY=
-cloud.google.com/go/auth v0.9.0 h1:cYhKl1JUhynmxjXfrk4qdPc6Amw7i+GC9VLflgT0p5M=
-cloud.google.com/go/auth v0.9.0/go.mod h1:2HsApZBr9zGZhC9QAXsYVYaWk8kNUt37uny+XVKi7wM=
+cloud.google.com/go/auth v0.9.8 h1:+CSJ0Gw9iVeSENVCKJoLHhdUykDgXSc4Qn+gu2BRtR8=
+cloud.google.com/go/auth v0.9.8/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI=
cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY=
cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc=
cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0=
@@ -53,8 +53,8 @@ cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUM
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA=
-cloud.google.com/go/bigtable v1.29.0 h1:2CnFjKPwjpZMZdTi2RpppvxzD80zKzDYrLYEQw/NnAs=
-cloud.google.com/go/bigtable v1.29.0/go.mod h1:5p909nNdWaNUcWs6KGZO8mI5HUovstlmrIi7+eA5PTQ=
+cloud.google.com/go/bigtable v1.33.0 h1:2BDaWLRAwXO14DJL/u8crbV2oUbMZkIa2eGq8Yao1bk=
+cloud.google.com/go/bigtable v1.33.0/go.mod h1:HtpnH4g25VT1pejHRtInlFPnN5sjTxbQlsYBjh9t5l0=
cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY=
cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM=
cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY=
@@ -64,8 +64,8 @@ cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6m
cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s=
cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU=
cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U=
-cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY=
-cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY=
+cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo=
+cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k=
cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0=
cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs=
cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM=
@@ -84,19 +84,21 @@ cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFP
cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o=
cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0=
cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY=
-cloud.google.com/go/iam v1.2.0 h1:kZKMKVNk/IsSSc/udOb83K0hL/Yh/Gcqpz+oAkoIFN8=
-cloud.google.com/go/iam v1.2.0/go.mod h1:zITGuWgsLZxd8OwAlX+eMFgZDXzBm7icj1PVTYG766Q=
-cloud.google.com/go/kms v1.18.5 h1:75LSlVs60hyHK3ubs2OHd4sE63OAMcM2BdSJc2bkuM4=
-cloud.google.com/go/kms v1.18.5/go.mod h1:yXunGUGzabH8rjUPImp2ndHiGolHeWJJ0LODLedicIY=
+cloud.google.com/go/iam v1.2.1 h1:QFct02HRb7H12J/3utj0qf5tobFh9V4vR6h9eX5EBRU=
+cloud.google.com/go/iam v1.2.1/go.mod h1:3VUIJDPpwT6p/amXRC5GY8fCCh70lxPygguVtI0Z4/g=
+cloud.google.com/go/kms v1.20.0 h1:uKUvjGqbBlI96xGE669hcVnEMw1Px/Mvfa62dhM5UrY=
+cloud.google.com/go/kms v1.20.0/go.mod h1:/dMbFF1tLLFnQV44AoI2GlotbjowyUfgVwezxW291fM=
cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic=
cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8=
-cloud.google.com/go/longrunning v0.6.0 h1:mM1ZmaNsQsnb+5n1DNPeL0KwQd9jQRqSqSDEkBZr+aI=
-cloud.google.com/go/longrunning v0.6.0/go.mod h1:uHzSZqW89h7/pasCWNYdUpwGz3PcVWhrWupreVPYLts=
+cloud.google.com/go/logging v1.11.0 h1:v3ktVzXMV7CwHq1MBF65wcqLMA7i+z3YxbUsoK7mOKs=
+cloud.google.com/go/logging v1.11.0/go.mod h1:5LDiJC/RxTt+fHc1LAt20R9TKiUTReDg6RuuFOZ67+A=
+cloud.google.com/go/longrunning v0.6.1 h1:lOLTFxYpr8hcRtcwWir5ITh1PAKUD/sG2lKrTSYjyMc=
+cloud.google.com/go/longrunning v0.6.1/go.mod h1:nHISoOZpBcmlwbJmiVk5oDRz0qG/ZxPynEGs1iZ79s0=
cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4=
cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE=
cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY=
-cloud.google.com/go/monitoring v1.21.0 h1:EMc0tB+d3lUewT2NzKC/hr8cSR9WsUieVywzIHetGro=
-cloud.google.com/go/monitoring v1.21.0/go.mod h1:tuJ+KNDdJbetSsbSGTqnaBvbauS5kr3Q/koy3Up6r+4=
+cloud.google.com/go/monitoring v1.21.1 h1:zWtbIoBMnU5LP9A/fz8LmWMGHpk4skdfeiaa66QdFGc=
+cloud.google.com/go/monitoring v1.21.1/go.mod h1:Rj++LKrlht9uBi8+Eb530dIrzG/cU/lB8mt+lbeFK1c=
cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA=
cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ=
cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY=
@@ -108,8 +110,8 @@ cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2k
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
-cloud.google.com/go/pubsub v1.42.0 h1:PVTbzorLryFL5ue8esTS2BfehUs0ahyNOY9qcd+HMOs=
-cloud.google.com/go/pubsub v1.42.0/go.mod h1:KADJ6s4MbTwhXmse/50SebEhE4SmUwHi48z3/dHar1Y=
+cloud.google.com/go/pubsub v1.45.0 h1:AjZYygbgofz+T6D6Ln+v95NmQZ25diHWhUJG44btPpc=
+cloud.google.com/go/pubsub v1.45.0/go.mod h1:BD4a/kmE8OePyHoa1qAHEw1rMzXX+Pc8Se54T/8mc3I=
cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4=
cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o=
cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg=
@@ -130,9 +132,11 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y=
cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc=
-cloud.google.com/go/storage v1.43.0 h1:CcxnSohZwizt4LCzQHWvBf1/kvtHUn7gk9QERXPyXFs=
-cloud.google.com/go/storage v1.43.0/go.mod h1:ajvxEa7WmZS1PxvKRq4bq0tFT3vMd502JwstCcYv0Q0=
+cloud.google.com/go/storage v1.44.0 h1:abBzXf4UJKMmQ04xxJf9dYM/fNl24KHoTuBjyJDX2AI=
+cloud.google.com/go/storage v1.44.0/go.mod h1:wpPblkIuMP5jCB/E48Pz9zIo2S/zD8g+ITmxKkPCITE=
cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw=
+cloud.google.com/go/trace v1.11.1 h1:UNqdP+HYYtnm6lb91aNA5JQ0X14GnxkABGlfz2PzPew=
+cloud.google.com/go/trace v1.11.1/go.mod h1:IQKNQuBzH72EGaXEodKlNJrWykGZxet2zgjtS60OtjA=
cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU=
cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0=
cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo=
@@ -248,6 +252,14 @@ github.com/DataDog/sketches-go v1.4.6/go.mod h1:7Y8GN8Jf66DLyDhc94zuWA3uHEt/7ttt
github.com/DataDog/zstd v1.3.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
github.com/DmitriyVTitov/size v1.5.0 h1:/PzqxYrOyOUX1BXj6J9OuVRVGe+66VL4D9FlUaW515g=
github.com/DmitriyVTitov/size v1.5.0/go.mod h1:le6rNI4CoLQV1b9gzp1+3d7hMAD/uu2QcJ+aYbNgiU0=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1 h1:pB2F2JKCj1Znmp2rwxxt1J0Fg0wezTMgWYk5Mpbi1kg=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1/go.mod h1:itPGVDKf9cC/ov4MdvJ2QZ0khw4bfoo9jzwTJlaxy2k=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 h1:UQ0AhxogsIRZDkElkblfnwjc3IaltCm2HUMvezQaL7s=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1/go.mod h1:jyqM3eLpJ3IbIFDTKVz2rF9T/xWGW0rIriGwnz8l9Tk=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.48.1 h1:oTX4vsorBZo/Zdum6OKPA4o7544hm6smoRv1QjpTwGo=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.48.1/go.mod h1:0wEl7vrAD8mehJyohS9HZy+WyEOaQO2mJx86Cvh93kM=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1 h1:8nn+rsCvTq9axyEh382S0PFLBeaFwNsT43IrPWzctRU=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1/go.mod h1:viRWSEhtMZqz1rhwmOVKkWl6SwmVowfL9O2YR5gI2PE=
github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM=
github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo=
github.com/IBM/go-sdk-core/v5 v5.17.5 h1:AjGC7xNee5tgDIjndekBDW5AbypdERHSgib3EZ1KNsA=
@@ -459,8 +471,8 @@ github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWH
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20 h1:N+3sFI5GUjRKBi+i0TxYVST9h4Ie192jJWpHvthBBgg=
-github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
+github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 h1:QVw89YDxXxEe+l8gU8ETbOasdwEV+avkR75ZzsVV9WI=
+github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
@@ -578,6 +590,8 @@ github.com/eapache/go-xerial-snappy v0.0.0-20230111030713-bf00bc1b83b6 h1:8yY/I9
github.com/eapache/go-xerial-snappy v0.0.0-20230111030713-bf00bc1b83b6/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0=
github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc=
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
+github.com/ebitengine/purego v0.8.0 h1:JbqvnEzRvPpxhCJzJJ2y0RbiZ8nyjccVUrSM3q+GvvE=
+github.com/ebitengine/purego v0.8.0/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts=
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ=
@@ -642,8 +656,8 @@ github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
-github.com/fsouza/fake-gcs-server v1.7.0 h1:Un0BXUXrRWYSmYyC1Rqm2e2WJfTPyDy/HGMz31emTi8=
-github.com/fsouza/fake-gcs-server v1.7.0/go.mod h1:5XIRs4YvwNbNoz+1JF8j6KLAyDh7RHGAyAK3EP2EsNk=
+github.com/fsouza/fake-gcs-server v1.50.2 h1:ulrS1pavCOCbMZfN5ZPgBRMFWclON9xDsuLBniXtQoE=
+github.com/fsouza/fake-gcs-server v1.50.2/go.mod h1:VU6Zgei4647KuT4XER8WHv5Hcj2NIySndyG8gfvwckA=
github.com/fullstorydev/emulators/storage v0.0.0-20240401123056-edc69752f474 h1:TufioMBjkJ6/Oqmlye/ReuxHFS35HyLmypj/BNy/8GY=
github.com/fullstorydev/emulators/storage v0.0.0-20240401123056-edc69752f474/go.mod h1:PQwxF4UU8wuL+srGxr3BOhIW5zXqgucwVlO/nPZLsxw=
github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0=
@@ -934,8 +948,8 @@ github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW
github.com/google/btree v0.0.0-20160524151835-7d79101e329e/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU=
-github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
+github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
+github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
@@ -1005,8 +1019,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8=
github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8=
-github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs=
-github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0=
+github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw=
+github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
@@ -1029,8 +1043,9 @@ github.com/gophercloud/gophercloud v1.13.0 h1:8iY9d1DAbzMW6Vok1AxbbK5ZaUjzMp0tdy
github.com/gophercloud/gophercloud v1.13.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
+github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE=
+github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w=
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
-github.com/gorilla/mux v1.7.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
@@ -1304,8 +1319,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o
github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
-github.com/klauspost/compress v1.17.10 h1:oXAz+Vh0PMUvJczoi+flxpnBEPxoER1IaAnU/NMPtT0=
-github.com/klauspost/compress v1.17.10/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
+github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
+github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM=
@@ -1414,8 +1429,8 @@ github.com/miekg/dns v1.1.61/go.mod h1:mnAarhS3nWaW+NVP2wTkYVIZyHNJ098SJZUki3eyk
github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721/go.mod h1:Ickgr2WtCLZ2MDGd4Gr0geeCH5HybhRJbonOgQpvSxc=
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
-github.com/minio/minio-go/v7 v7.0.77 h1:GaGghJRg9nwDVlNbwYjSDJT1rqltQkBFDsypWX1v3Bw=
-github.com/minio/minio-go/v7 v7.0.77/go.mod h1:AVM3IUN6WwKzmwBxVdjzhH8xq+f57JSbbvzqvUzR6eg=
+github.com/minio/minio-go/v7 v7.0.78 h1:LqW2zy52fxnI4gg8C2oZviTaKHcBV36scS+RzJnxUFs=
+github.com/minio/minio-go/v7 v7.0.78/go.mod h1:84gmIilaX4zcvAWWzJ5Z1WI5axN+hAbM5w25xf8xvC0=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI=
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ=
@@ -1589,6 +1604,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
+github.com/pkg/xattr v0.4.10 h1:Qe0mtiNFHQZ296vRgUjRCoPHPqH7VdTOrZx3g0T+pGA=
+github.com/pkg/xattr v0.4.10/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU=
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo=
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8=
github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
@@ -1709,12 +1726,8 @@ github.com/sercand/kuberesolver/v5 v5.1.1/go.mod h1:Fs1KbKhVRnB2aDWN12NjKCB+RgYM
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/shirou/gopsutil v2.20.9+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/shirou/gopsutil/v3 v3.22.8/go.mod h1:s648gW4IywYzUfE/KjXxUsqrqx/T2xO5VqOXxONeRfI=
-github.com/shirou/gopsutil/v4 v4.24.8 h1:pVQjIenQkIhqO81mwTaXjTzOMT7d3TZkf43PlVFHENI=
-github.com/shirou/gopsutil/v4 v4.24.8/go.mod h1:wE0OrJtj4dG+hYkxqDH3QiBICdKSf04/npcvLLc/oRg=
-github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM=
-github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ=
-github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU=
-github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k=
+github.com/shirou/gopsutil/v4 v4.24.9 h1:KIV+/HaHD5ka5f570RZq+2SaeFsb/pq+fp2DGNWYoOI=
+github.com/shirou/gopsutil/v4 v4.24.9/go.mod h1:3fkaHNeYsUFCGZ8+9vZVWtbyM1k2eRnlL+bWO8Bxa/Q=
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ=
@@ -1795,8 +1808,8 @@ github.com/tedsuo/ifrit v0.0.0-20191009134036-9a97d0632f00/go.mod h1:eyZnKCc955u
github.com/tencentcloud/tencentcloud-sdk-go v1.0.162/go.mod h1:asUz5BPXxgoPGaRgZaVm1iGcUAuHyYUo1nXqKa83cvI=
github.com/tencentyun/cos-go-sdk-v5 v0.7.40 h1:W6vDGKCHe4wBACI1d2UgE6+50sJFhRWU4O8IB2ozzxM=
github.com/tencentyun/cos-go-sdk-v5 v0.7.40/go.mod h1:4dCEtLHGh8QPxHEkgq+nFaky7yZxQuYwgSJM87icDaw=
-github.com/thanos-io/objstore v0.0.0-20240818203309-0363dadfdfb1 h1:z0v9BB/p7s4J6R//+0a5M3wCld8KzNjrGRLIwXfrAZk=
-github.com/thanos-io/objstore v0.0.0-20240818203309-0363dadfdfb1/go.mod h1:3ukSkG4rIRUGkKM4oIz+BSuUx2e3RlQVVv3Cc3W+Tv4=
+github.com/thanos-io/objstore v0.0.0-20241015070247-5f04b8b0b52a h1:0etzAoXPjVVUnscliA+xy8vWdE88jbvhcVMr1rVHc60=
+github.com/thanos-io/objstore v0.0.0-20241015070247-5f04b8b0b52a/go.mod h1:/ZMUxFcp/nT6oYV5WslH9k07NU/+86+aibgZRmMMr/4=
github.com/tidwall/gjson v1.6.0/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls=
github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E=
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
@@ -1818,8 +1831,8 @@ github.com/twmb/franz-go v1.17.1 h1:0LwPsbbJeJ9R91DPUHSEd4su82WJWcTY1Zzbgbg4CeQ=
github.com/twmb/franz-go v1.17.1/go.mod h1:NreRdJ2F7dziDY/m6VyspWd6sNxHKXdMZI42UfQ3GXM=
github.com/twmb/franz-go/pkg/kadm v1.13.0 h1:bJq4C2ZikUE2jh/wl9MtMTQ/kpmnBgVFh8XMQBEC+60=
github.com/twmb/franz-go/pkg/kadm v1.13.0/go.mod h1:VMvpfjz/szpH9WB+vGM+rteTzVv0djyHFimci9qm2C0=
-github.com/twmb/franz-go/pkg/kfake v0.0.0-20240821035758-b77dd13e2bfa h1:OmQ4DJhqeOPdIH60Psut1vYU8A6LGyxJbF09w5RAa2w=
-github.com/twmb/franz-go/pkg/kfake v0.0.0-20240821035758-b77dd13e2bfa/go.mod h1:nkBI/wGFp7t1NJnnCeJdS4sX5atPAqwCPpDXKuI7SC8=
+github.com/twmb/franz-go/pkg/kfake v0.0.0-20241015013301-cea7aa5d8037 h1:M4Zj79q1OdZusy/Q8TOTttvx/oHkDVY7sc0xDyRnwWs=
+github.com/twmb/franz-go/pkg/kfake v0.0.0-20241015013301-cea7aa5d8037/go.mod h1:nkBI/wGFp7t1NJnnCeJdS4sX5atPAqwCPpDXKuI7SC8=
github.com/twmb/franz-go/pkg/kmsg v1.8.0 h1:lAQB9Z3aMrIP9qF9288XcFf/ccaSxEitNA1CDTEIeTA=
github.com/twmb/franz-go/pkg/kmsg v1.8.0/go.mod h1:HzYEb8G3uu5XevZbtU0dVbkphaKTHk0X68N5ka4q6mU=
github.com/twmb/franz-go/plugin/kotel v1.5.0 h1:TiPfGUbQK384OO7ZYGdo7JuPCbJn+/8njQ/D9Je9CDE=
@@ -1886,8 +1899,8 @@ github.com/yuin/gopher-lua v1.1.0/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7
github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
-go.einride.tech/aip v0.67.1 h1:d/4TW92OxXBngkSOwWS2CH5rez869KpKMaN44mdxkFI=
-go.einride.tech/aip v0.67.1/go.mod h1:ZGX4/zKw8dcgzdLsrvpOOGxfxI2QSk12SlP7d6c0/XI=
+go.einride.tech/aip v0.68.0 h1:4seM66oLzTpz50u4K1zlJyOXQ3tCzcJN7I22tKkjipw=
+go.einride.tech/aip v0.68.0/go.mod h1:7y9FF8VtPWqpxuAxl0KQWqaULxW4zFIesD6zF5RIHHg=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
@@ -1929,24 +1942,26 @@ go.opentelemetry.io/collector/pdata v1.12.0 h1:Xx5VK1p4VO0md8MWm2icwC1MnJ7f8EimK
go.opentelemetry.io/collector/pdata v1.12.0/go.mod h1:MYeB0MmMAxeM0hstCFrCqWLzdyeYySim2dG6pDT6nYI=
go.opentelemetry.io/collector/semconv v0.105.0 h1:8p6dZ3JfxFTjbY38d8xlQGB1TQ3nPUvs+D0RERniZ1g=
go.opentelemetry.io/collector/semconv v0.105.0/go.mod h1:yMVUCNoQPZVq/IPfrHrnntZTWsLf5YGZ7qwKulIl5hw=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 h1:9G6E0TXzGFVfTnawRzrPl83iHOAV7L8NJiR8RSGYV1g=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg=
-go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo=
-go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4=
+go.opentelemetry.io/contrib/detectors/gcp v1.29.0 h1:TiaiXB4DpGD3sdzNlYQxruQngn5Apwzi1X0DRhuGvDQ=
+go.opentelemetry.io/contrib/detectors/gcp v1.29.0/go.mod h1:GW2aWZNwR2ZxDLdv8OyC2G8zkRoQBuURgV7RPQgcPoU=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8=
+go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw=
+go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 h1:j9+03ymgYhPKmeXGk5Zu+cIZOlVzd9Zv7QIiyItjFBU=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0/go.mod h1:Y5+XiUG4Emn1hTfciPzGPJaSI+RpDts6BnCIir0SLqk=
-go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q=
-go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s=
-go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE=
-go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg=
-go.opentelemetry.io/otel/sdk/metric v1.28.0 h1:OkuaKgKrgAbYrrY0t92c+cC+2F6hsFNnCQArXCKlg08=
-go.opentelemetry.io/otel/sdk/metric v1.28.0/go.mod h1:cWPjykihLAPvXKi4iZc1dpER3Jdq2Z0YLse3moQUCpg=
-go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g=
-go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI=
+go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc=
+go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8=
+go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo=
+go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok=
+go.opentelemetry.io/otel/sdk/metric v1.29.0 h1:K2CfmJohnRgvZ9UAj2/FhIf/okdWcNdBwe1m8xFXiSY=
+go.opentelemetry.io/otel/sdk/metric v1.29.0/go.mod h1:6zZLdCl2fkauYoZIOn/soQIDSWFmNSRcICarHfuhNJQ=
+go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4=
+go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
@@ -2012,8 +2027,8 @@ golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0
golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
-golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A=
-golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70=
+golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw=
+golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -2141,8 +2156,8 @@ golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
-golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo=
-golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0=
+golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4=
+golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -2167,8 +2182,8 @@ golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7Lm
golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=
golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
-golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA=
-golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
+golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
+golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -2291,6 +2306,7 @@ golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -2312,8 +2328,8 @@ golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34=
-golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo=
+golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@@ -2323,8 +2339,8 @@ golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4=
-golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM=
-golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8=
+golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24=
+golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M=
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -2342,8 +2358,8 @@ golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
-golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224=
-golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
+golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM=
+golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
golang.org/x/time v0.0.0-20161028155119-f51c12702a4d/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -2352,8 +2368,8 @@ golang.org/x/time v0.0.0-20190921001708-c4c64cad1fd0/go.mod h1:tRJNPiyCQ0inRvYxb
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U=
-golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
+golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ=
+golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -2453,7 +2469,6 @@ gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6d
gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc=
google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
-google.golang.org/api v0.3.2/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
@@ -2499,8 +2514,8 @@ google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOI
google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw=
google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI=
google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s=
-google.golang.org/api v0.193.0 h1:eOGDoJFsLU+HpCBaDJex2fWiYujAw9KbXgpOAMePoUs=
-google.golang.org/api v0.193.0/go.mod h1:Po3YMV1XZx+mTku3cfJrlIYR03wiGrCOsdpC67hjZvw=
+google.golang.org/api v0.201.0 h1:+7AD9JNM3tREtawRMu8sOjSbb8VYcYXJG/2eEOmfDu0=
+google.golang.org/api v0.201.0/go.mod h1:HVY0FCHVs89xIW9fzf/pBvOEm+OolHa86G/txFezyq4=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@@ -2611,12 +2626,12 @@ google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+S
google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo=
google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo=
google.golang.org/genproto v0.0.0-20220921223823-23cae91e6737/go.mod h1:2r/26NEF3bFmT3eC3aZreahSal0C3Shl8Gi6vyDYqOQ=
-google.golang.org/genproto v0.0.0-20240820151423-278611b39280 h1:oKt8r1ZvaPqBe3oeGTdyx1iNjuBS+VJcc9QdU1CD3d8=
-google.golang.org/genproto v0.0.0-20240820151423-278611b39280/go.mod h1:wxEc5TmU9JSLs1rSqG4z1YzeSNigp/9yIojIPuZVvKQ=
-google.golang.org/genproto/googleapis/api v0.0.0-20240820151423-278611b39280 h1:YDFM9oOjiFhaMAVgbDxfxW+66nRrsvzQzJ51wp3OxC0=
-google.golang.org/genproto/googleapis/api v0.0.0-20240820151423-278611b39280/go.mod h1:fO8wJzT2zbQbAjbIoos1285VfEIYKDDY+Dt+WpTkh6g=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240820151423-278611b39280 h1:XQMA2e105XNlEZ8NRF0HqnUOZzP14sUSsgL09kpdNnU=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240820151423-278611b39280/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
+google.golang.org/genproto v0.0.0-20241007155032-5fefd90f89a9 h1:nFS3IivktIU5Mk6KQa+v6RKkHUpdQpphqGNLxqNnbEk=
+google.golang.org/genproto v0.0.0-20241007155032-5fefd90f89a9/go.mod h1:tEzYTYZxbmVNOu0OAFH9HzdJtLn6h4Aj89zzlBCdHms=
+google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f h1:jTm13A2itBi3La6yTGqn8bVSrc3ZZ1r8ENHlIXBfnRA=
+google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f/go.mod h1:CLGoBuH1VHxAUXVPP8FfPwPEVJB6lz3URE5mY2SuayE=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9 h1:QCqS/PdaHTSWGvupk2F/ehwHtGc0/GYkT+3GAcR1CCc=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI=
google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
@@ -2659,9 +2674,11 @@ google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACu
google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
-google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc=
-google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ=
+google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E=
+google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
+google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a h1:UIpYSuWdWHSzjwcAFRLjKcPXFZVVLXGEM23W+NWqipw=
+google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a/go.mod h1:9i1T9n4ZinTUZGgzENMi8MDDgbGC5mqTS75JAv6xN3A=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -2677,8 +2694,8 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
-google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
-google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
+google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
+google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/DataDog/dd-trace-go.v1 v1.19.0/go.mod h1:DVp8HmDh8PuTu2Z0fVVlBsyWaC++fzwVCaGWylTe3tg=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=
diff --git a/integration/bloom_building_test.go b/integration/bloom_building_test.go
index 2c4662eef4cb3..9e7727b674f1e 100644
--- a/integration/bloom_building_test.go
+++ b/integration/bloom_building_test.go
@@ -61,15 +61,7 @@ func TestBloomBuilding(t *testing.T) {
cliIngester.Now = now
// We now ingest some logs across many series.
- series := make([]labels.Labels, 0, nSeries)
- for i := 0; i < nSeries; i++ {
- lbs := labels.FromStrings("job", fmt.Sprintf("job-%d", i))
- series = append(series, lbs)
-
- for j := 0; j < nLogsPerSeries; j++ {
- require.NoError(t, cliDistributor.PushLogLine(fmt.Sprintf("log line %d", j), now, nil, lbs.Map()))
- }
- }
+ series := writeSeries(t, nSeries, nLogsPerSeries, cliDistributor, now, "job")
// restart ingester which should flush the chunks and index
require.NoError(t, tIngester.Restart())
@@ -96,6 +88,8 @@ func TestBloomBuilding(t *testing.T) {
"-bloom-build.planner.interval=15s",
"-bloom-build.planner.min-table-offset=0", // Disable table offset so we process today's data.
"-bloom.cache-list-ops=0", // Disable cache list operations to avoid caching issues.
+ "-bloom-build.planning-strategy=split_by_series_chunks_size",
+ "-bloom-build.split-target-series-chunk-size=1KB",
)
require.NoError(t, clu.Run())
@@ -122,14 +116,8 @@ func TestBloomBuilding(t *testing.T) {
checkSeriesInBlooms(t, now, tenantID, bloomStore, series)
// Push some more logs so TSDBs need to be updated.
- for i := 0; i < nSeries; i++ {
- lbs := labels.FromStrings("job", fmt.Sprintf("job-new-%d", i))
- series = append(series, lbs)
-
- for j := 0; j < nLogsPerSeries; j++ {
- require.NoError(t, cliDistributor.PushLogLine(fmt.Sprintf("log line %d", j), now, nil, lbs.Map()))
- }
- }
+ newSeries := writeSeries(t, nSeries, nLogsPerSeries, cliDistributor, now, "job-new")
+ series = append(series, newSeries...)
// restart ingester which should flush the chunks and index
require.NoError(t, tIngester.Restart())
@@ -145,6 +133,33 @@ func TestBloomBuilding(t *testing.T) {
checkSeriesInBlooms(t, now, tenantID, bloomStore, series)
}
+func writeSeries(t *testing.T, nSeries int, nLogsPerSeries int, cliDistributor *client.Client, now time.Time, seriesPrefix string) []labels.Labels {
+ series := make([]labels.Labels, 0, nSeries)
+ for i := 0; i < nSeries; i++ {
+ lbs := labels.FromStrings("job", fmt.Sprintf("%s-%d", seriesPrefix, i))
+ series = append(series, lbs)
+
+ for j := 0; j < nLogsPerSeries; j++ {
+ // Only write wtructured metadata for half of the series
+ var metadata map[string]string
+ if i%2 == 0 {
+ metadata = map[string]string{
+ "traceID": fmt.Sprintf("%d%d", i, j),
+ "user": fmt.Sprintf("%d%d", i, j%10),
+ }
+ }
+
+ require.NoError(t, cliDistributor.PushLogLine(
+ fmt.Sprintf("log line %d", j),
+ now,
+ metadata,
+ lbs.Map(),
+ ))
+ }
+ }
+ return series
+}
+
func checkCompactionFinished(t *testing.T, cliCompactor *client.Client) {
checkForTimestampMetric(t, cliCompactor, "loki_boltdb_shipper_compact_tables_operation_last_successful_run_timestamp_seconds")
}
diff --git a/operator/.bingo/Variables.mk b/operator/.bingo/Variables.mk
index 348997ab41c11..910d148a265a6 100644
--- a/operator/.bingo/Variables.mk
+++ b/operator/.bingo/Variables.mk
@@ -77,11 +77,11 @@ $(KIND): $(BINGO_DIR)/kind.mod
@echo "(re)installing $(GOBIN)/kind-v0.23.0"
@cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=kind.mod -o=$(GOBIN)/kind-v0.23.0 "sigs.k8s.io/kind"
-KUSTOMIZE := $(GOBIN)/kustomize-v4.5.7
+KUSTOMIZE := $(GOBIN)/kustomize-v5.4.3
$(KUSTOMIZE): $(BINGO_DIR)/kustomize.mod
@# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies.
- @echo "(re)installing $(GOBIN)/kustomize-v4.5.7"
- @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=kustomize.mod -o=$(GOBIN)/kustomize-v4.5.7 "sigs.k8s.io/kustomize/kustomize/v4"
+ @echo "(re)installing $(GOBIN)/kustomize-v5.4.3"
+ @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=kustomize.mod -o=$(GOBIN)/kustomize-v5.4.3 "sigs.k8s.io/kustomize/kustomize/v5"
OPERATOR_SDK := $(GOBIN)/operator-sdk-v1.37.0
$(OPERATOR_SDK): $(BINGO_DIR)/operator-sdk.mod
diff --git a/operator/.bingo/kustomize.mod b/operator/.bingo/kustomize.mod
index f2af7638a1d52..89cbd27701f15 100644
--- a/operator/.bingo/kustomize.mod
+++ b/operator/.bingo/kustomize.mod
@@ -1,9 +1,5 @@
module _ // Auto generated by https://github.com/bwplotka/bingo. DO NOT EDIT
-go 1.17
+go 1.22.5
-exclude sigs.k8s.io/kustomize/api v0.2.0
-
-exclude sigs.k8s.io/kustomize/cmd/config v0.2.0
-
-require sigs.k8s.io/kustomize/kustomize/v4 v4.5.7
+require sigs.k8s.io/kustomize/kustomize/v5 v5.4.3
diff --git a/operator/.bingo/kustomize.sum b/operator/.bingo/kustomize.sum
index 247834f4b7f42..8f7eee9e3a76a 100644
--- a/operator/.bingo/kustomize.sum
+++ b/operator/.bingo/kustomize.sum
@@ -1,453 +1,98 @@
-cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
-github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
-github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
-github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI=
-github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
-github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=
-github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
-github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM=
-github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
-github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
-github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
-github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA=
-github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
-github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
-github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
-github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
+github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
+github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
-github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
-github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
-github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
-github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
-github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
-github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
-github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
-github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
-github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
-github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
-github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
-github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M=
-github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
-github.com/evanphx/json-patch v4.11.0+incompatible h1:glyUF9yIYtMHzn8xaKw5rMhdWcwsYV8dZHIq5567/xs=
-github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
-github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
-github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg=
-github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
-github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
-github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w=
-github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
-github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
-github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
-github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
-github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
-github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI=
-github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
-github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
-github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk=
-github.com/go-openapi/analysis v0.19.5 h1:8b2ZgKfKIUTVQpTb77MoRDIMEIwvDVw40o3aOXdfYzI=
-github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU=
-github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0=
-github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0=
-github.com/go-openapi/errors v0.19.2 h1:a2kIyV3w+OS3S97zxUndRVD46+FhGOUBDFY7nmu4CsY=
-github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94=
-github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M=
-github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M=
-github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
-github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w=
-github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
-github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY=
-github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
-github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
-github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
-github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
-github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o=
-github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
-github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM=
-github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg=
-github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
-github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
-github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
-github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs=
-github.com/go-openapi/loads v0.19.4 h1:5I4CCSqoWzT+82bBkNIvmLc0UOsoKKQ4Fz+3VxOB7SY=
-github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk=
-github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA=
-github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64=
-github.com/go-openapi/runtime v0.19.4 h1:csnOgcgAiuGoM/Po7PEpKDoNulCcF3FGbSnbHfxgjMI=
-github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4=
-github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
-github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
-github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY=
-github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
-github.com/go-openapi/spec v0.19.5 h1:Xm0Ao53uqnk9QE/LlYV5DEU09UAgpliA85QoT9LzqPw=
-github.com/go-openapi/spec v0.19.5/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk=
-github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU=
-github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU=
-github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY=
-github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU=
-github.com/go-openapi/strfmt v0.19.5 h1:0utjKrw+BAh8s57XE9Xz8DUBsVvPmRUB6styvl9wWIM=
-github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk=
-github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
-github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
-github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
-github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY=
-github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
-github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng=
-github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
-github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4=
-github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA=
-github.com/go-openapi/validate v0.19.8 h1:YFzsdWIDfVuLvIOF+ZmKjVg1MbPJ1QgY9PihMwei1ys=
-github.com/go-openapi/validate v0.19.8/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4=
-github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
-github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
-github.com/gobuffalo/here v0.6.0/go.mod h1:wAG085dHOYqUpf+Ap+WOdrPTp5IYcDAs/x7PLa8Y5fM=
-github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
-github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
-github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
-github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
-github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
-github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
-github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
-github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
-github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
+github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
+github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
+github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
+github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
+github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
+github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
+github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
+github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU=
+github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
-github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54=
-github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ=
-github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
-github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
+github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
+github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
-github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=
-github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
-github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
-github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
-github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
-github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
-github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
-github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q=
-github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
-github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28=
-github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
-github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
-github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
-github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
+github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
+github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
-github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
-github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
-github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
-github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
-github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
-github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM=
-github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
-github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA=
-github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
-github.com/markbates/pkger v0.17.1/go.mod h1:0JoVlrol20BSywW79rN3kdFFsE5xYM+rSCQDXbLhiuI=
-github.com/mattn/go-runewidth v0.0.7 h1:Ei8KR0497xHyKJPAv59M1dkC+rOZCMBJ+t3fZ+twI54=
-github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
-github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
-github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
-github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
-github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag=
-github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
-github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
+github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0=
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4=
-github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
-github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
-github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
-github.com/olekukonko/tablewriter v0.0.4 h1:vHD/YYe1Wolo78koG299f7V/VAS08c6IpCLn+Ejf/w8=
-github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA=
-github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
-github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
-github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
-github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
-github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
-github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
-github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
-github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
-github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
-github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
-github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
-github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
-github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
-github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
-github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
-github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
-github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
-github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
-github.com/spf13/cobra v1.0.0 h1:6m/oheQuQ13N9ks4hubMG6BnvwOeaJrqSPLahSnczz8=
-github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
-github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q=
-github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g=
-github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
-github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
+github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
+github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
+github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
-github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
-github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
-github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
-github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
-github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
-github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
-github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
-github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
-github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw=
-github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
-github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca h1:1CFlNzQhALwjS9mBAUkycX616GzgsuYUOCHA5+HSlXI=
-github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg=
-github.com/xlab/treeprint v1.1.0 h1:G/1DjNkPpfZCFt9CSh6b5/nY4VimlbHF3Rh4obvtzDk=
-github.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0=
-github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
-github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
-go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
-go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
-go.mongodb.org/mongo-driver v1.1.2 h1:jxcFYjlkl8xaERsgLo+RNquI0epW6zuy/ZRQs6jnrFA=
-go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ=
+github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0=
go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 h1:+FNtrFTmVw0YZGpBGX56XDee331t6JAXeK2bcyhLOOc=
go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o=
-go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
-go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
-go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
-golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
-golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
-golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME=
-golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
-golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk=
-golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
-golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e h1:fLOSk5Q00efkSvAm+4xcoXD+RRmLmmulPn5I3Y9F2EM=
-golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
-golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc=
-golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
-golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
-golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
+golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
-google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
-google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
-google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
-google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
-google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
-google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
-google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
-google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=
-google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
-gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
+google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
+gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
-gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
-gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
-gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
-gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ=
-gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
-gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
-k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
-k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
-k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
-k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e h1:KLHHjkdQFomZy8+06csTWZ0m1343QqxZhR2LJ1OxCYM=
-k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
-k8s.io/kube-openapi v0.0.0-20220401212409-b28bf2818661 h1:nqYOUleKLC/0P1zbU29F5q6aoezM6MOAVz+iyfQbZ5M=
-k8s.io/kube-openapi v0.0.0-20220401212409-b28bf2818661/go.mod h1:daOouuuwd9JXpv1L7Y34iV3yf6nxzipkKMWWlqlvK9M=
-k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
-k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19Vz2GdbOCyI4qqhc=
-k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
-sigs.k8s.io/kustomize/api v0.8.5 h1:bfCXGXDAbFbb/Jv5AhMj2BB8a5VAJuuQ5/KU69WtDjQ=
-sigs.k8s.io/kustomize/api v0.8.5/go.mod h1:M377apnKT5ZHJS++6H4rQoCHmWtt6qTpp3mbe7p6OLY=
-sigs.k8s.io/kustomize/api v0.8.11 h1:LzQzlq6Z023b+mBtc6v72N2mSHYmN8x7ssgbf/hv0H8=
-sigs.k8s.io/kustomize/api v0.8.11/go.mod h1:a77Ls36JdfCWojpUqR6m60pdGY1AYFix4AH83nJtY1g=
-sigs.k8s.io/kustomize/api v0.12.1 h1:7YM7gW3kYBwtKvoY216ZzY+8hM+lV53LUayghNRJ0vM=
-sigs.k8s.io/kustomize/api v0.12.1/go.mod h1:y3JUhimkZkR6sbLNwfJHxvo1TCLwuwm14sCYnkH6S1s=
-sigs.k8s.io/kustomize/cmd/config v0.9.7 h1:xxvL/np/zYHVuCH1tNFehlyEtSW5oXjoI6ycejiyOwQ=
-sigs.k8s.io/kustomize/cmd/config v0.9.7/go.mod h1:MvXCpHs77cfyxRmCNUQjIqCmZyYsbn5PyQpWiq44nW0=
-sigs.k8s.io/kustomize/cmd/config v0.9.13 h1:lqOf0QcFhNvgZkgrPINNRs7TxEO7IGVtLMyUJId3oRE=
-sigs.k8s.io/kustomize/cmd/config v0.9.13/go.mod h1:7547FLF8W/lTaDf0BDqFTbZxM9zqwEJqCKN9sSR0xSs=
-sigs.k8s.io/kustomize/cmd/config v0.10.9 h1:LV8AUwZPuvqhGfia50uNwsPwNg1xOy9koEf5hyBnYs4=
-sigs.k8s.io/kustomize/cmd/config v0.10.9/go.mod h1:T0s850zPV3wKfBALA0dyeP/K74jlJcoP8Pr9ZWwE3MQ=
-sigs.k8s.io/kustomize/kustomize/v4 v4.0.5 h1:0xQWp03aKWilF6UJrupcA2rCoCn3jejkJ+m/CCI/Fis=
-sigs.k8s.io/kustomize/kustomize/v4 v4.0.5/go.mod h1:C7rYla7sI8EnxHE/xEhRBSHMNfcL91fx0uKmUlUhrBk=
-sigs.k8s.io/kustomize/kustomize/v4 v4.2.0 h1:RKgbyHgzuHQZ35sBDzWcbnR3HBlJSYdSN0H+sx3tUkk=
-sigs.k8s.io/kustomize/kustomize/v4 v4.2.0/go.mod h1:MOkR6fmhwG7hEDRXBYELTi5GSFcLwfqwzTRHW3kv5go=
-sigs.k8s.io/kustomize/kustomize/v4 v4.5.7 h1:cDW6AVMl6t/SLuQaezMET8hgnadZGIAr8tUrxFVOrpg=
-sigs.k8s.io/kustomize/kustomize/v4 v4.5.7/go.mod h1:VSNKEH9D9d9bLiWEGbS6Xbg/Ih0tgQalmPvntzRxZ/Q=
-sigs.k8s.io/kustomize/kyaml v0.10.15 h1:dSLgG78KyaxN4HylPXdK+7zB3k7sW6q3IcCmcfKA+aI=
-sigs.k8s.io/kustomize/kyaml v0.10.15/go.mod h1:mlQFagmkm1P+W4lZJbJ/yaxMd8PqMRSC4cPcfUVt5Hg=
-sigs.k8s.io/kustomize/kyaml v0.11.0 h1:9KhiCPKaVyuPcgOLJXkvytOvjMJLoxpjodiycb4gHsA=
-sigs.k8s.io/kustomize/kyaml v0.11.0/go.mod h1:GNMwjim4Ypgp/MueD3zXHLRJEjz7RvtPae0AwlvEMFM=
-sigs.k8s.io/kustomize/kyaml v0.13.9 h1:Qz53EAaFFANyNgyOEJbT/yoIHygK40/ZcvU3rgry2Tk=
-sigs.k8s.io/kustomize/kyaml v0.13.9/go.mod h1:QsRbD0/KcU+wdk0/L0fIp2KLnohkVzs6fQ85/nOXac4=
-sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
-sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
-sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780=
+k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA=
+sigs.k8s.io/kustomize/api v0.17.3 h1:6GCuHSsxq7fN5yhF2XrC+AAr8gxQwhexgHflOAD/JJU=
+sigs.k8s.io/kustomize/api v0.17.3/go.mod h1:TuDH4mdx7jTfK61SQ/j1QZM/QWR+5rmEiNjvYlhzFhc=
+sigs.k8s.io/kustomize/cmd/config v0.14.2 h1:YOCu0BnVPm2Iq6PR4fJgO6+rivg5LbR3+o/4ZUeXAvM=
+sigs.k8s.io/kustomize/cmd/config v0.14.2/go.mod h1:w30rR4oCUm5wEi0tSuBCmuBMS9Z/Cq6oDdfg8fL/qls=
+sigs.k8s.io/kustomize/kustomize/v5 v5.4.3 h1:SJMDq/0HYNTFPgmlBuxsGzdBB6furxhwHKHEfzBbvSw=
+sigs.k8s.io/kustomize/kustomize/v5 v5.4.3/go.mod h1:vhSLBp3H7wx0oeh/hE7CMQVNft42plZxUqxhvVZxb8Q=
+sigs.k8s.io/kustomize/kyaml v0.17.2 h1:+AzvoJUY0kq4QAhH/ydPHHMRLijtUKiyVyh7fOSshr0=
+sigs.k8s.io/kustomize/kyaml v0.17.2/go.mod h1:9V0mCjIEYjlXuCdYsSXvyoy2BTsLESH7TlGV81S282U=
+sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
+sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
diff --git a/operator/.bingo/operator-sdk.mod b/operator/.bingo/operator-sdk.mod
index 120bd4ebf28b9..9eff57c0f1cf3 100644
--- a/operator/.bingo/operator-sdk.mod
+++ b/operator/.bingo/operator-sdk.mod
@@ -1,5 +1,5 @@
module _ // Auto generated by https://github.com/bwplotka/bingo. DO NOT EDIT
-go 1.22.8
+go 1.22.5
require github.com/operator-framework/operator-sdk v1.37.0 // cmd/operator-sdk
diff --git a/operator/.bingo/variables.env b/operator/.bingo/variables.env
index 63c377848036b..193b281afdea3 100644
--- a/operator/.bingo/variables.env
+++ b/operator/.bingo/variables.env
@@ -28,7 +28,7 @@ JSONNETFMT="${GOBIN}/jsonnetfmt-v0.20.0"
KIND="${GOBIN}/kind-v0.23.0"
-KUSTOMIZE="${GOBIN}/kustomize-v4.5.7"
+KUSTOMIZE="${GOBIN}/kustomize-v5.4.3"
OPERATOR_SDK="${GOBIN}/operator-sdk-v1.37.0"
diff --git a/operator/Dockerfile b/operator/Dockerfile
index 1f8a6a3ca0483..a29ad9692c027 100644
--- a/operator/Dockerfile
+++ b/operator/Dockerfile
@@ -3,7 +3,7 @@ FROM golang:1.22.8 as builder
WORKDIR /workspace
# Copy the Go Modules manifests
-COPY apis/ apis/
+COPY api/ api/
COPY go.mod go.mod
COPY go.sum go.sum
# cache deps before building and copying source so that we don't need to re-download as much
@@ -11,12 +11,11 @@ COPY go.sum go.sum
RUN go mod download
# Copy the go source
-COPY main.go main.go
-COPY controllers/ controllers/
+COPY cmd/loki-operator/main.go cmd/loki-operator/main.go
COPY internal/ internal/
# Build
-RUN CGO_ENABLED=0 GOOS=linux GO111MODULE=on go build -mod=readonly -a -o manager main.go
+RUN CGO_ENABLED=0 GOOS=linux GO111MODULE=on go build -mod=readonly -a -o manager cmd/loki-operator/main.go
# Use distroless as minimal base image to package the manager binary
# Refer to https://github.com/GoogleContainerTools/distroless for more details
diff --git a/operator/Dockerfile.cross b/operator/Dockerfile.cross
index 548159b5d521b..03b7b2e81ec5b 100644
--- a/operator/Dockerfile.cross
+++ b/operator/Dockerfile.cross
@@ -8,7 +8,7 @@ FROM --platform=linux/amd64 $BUILD_IMAGE as builder
COPY --from=goenv /goarch /goarm /
WORKDIR /workspace
# Copy the Go Modules manifests
-COPY apis/ apis/
+COPY api/ api/
COPY go.mod go.mod
COPY go.sum go.sum
# cache deps before building and copying source so that we don't need to re-download as much
@@ -16,12 +16,11 @@ COPY go.sum go.sum
RUN go mod download
# Copy the go source
-COPY main.go main.go
-COPY controllers/ controllers/
+COPY cmd/loki-operator/main.go cmd/loki-operator/main.go
COPY internal/ internal/
# Build
-RUN CGO_ENABLED=0 GOOS=linux GO111MODULE=on GOARCH=$(cat /goarch) GOARM=$(cat /goarm) go build -a -o manager main.go
+RUN CGO_ENABLED=0 GOOS=linux GO111MODULE=on GOARCH=$(cat /goarch) GOARM=$(cat /goarm) go build -a -o manager cmd/loki-operator/main.go
# Use distroless as minimal base image to package the manager binary
# Refer to https://github.com/GoogleContainerTools/distroless for more details
diff --git a/operator/Makefile b/operator/Makefile
index 8dce06bbce797..d9031ba588c3d 100644
--- a/operator/Makefile
+++ b/operator/Makefile
@@ -126,10 +126,10 @@ deps: go.mod go.sum
go mod verify
.PHONY: deps-api
-deps-api: apis/loki/go.mod apis/loki/go.sum
- @cd ./apis/loki/ && go mod tidy
- @cd ./apis/loki/ && go mod download
- @cd ./apis/loki/ && go mod verify
+deps-api: api/loki/go.mod api/loki/go.sum
+ @cd ./api/loki/ && go mod tidy
+ @cd ./api/loki/ && go mod download
+ @cd ./api/loki/ && go mod verify
.PHONY: cli
cli: deps bin/loki-broker ## Build loki-broker CLI binary
@@ -138,11 +138,11 @@ bin/loki-broker: $(GO_FILES) | generate
.PHONY: manager
manager: deps generate ## Build manager binary
- go build -o bin/manager main.go
+ go build -o bin/manager ./cmd/loki-operator/main.go
.PHONY: size-calculator
size-calculator: deps generate ## Build size-calculator binary
- go build -o bin/size-calculator main.go
+ go build -o bin/size-calculator ./cmd/size-calculator/main.go
.PHONY: go-generate
go-generate: ## Run go generate
@@ -163,7 +163,7 @@ test: $(GO_FILES)
.PHONY: test-unit-api
test-unit-api: $(GO_FILES)
- @cd ./apis/loki/ && go test ./... -coverprofile cover.out
+ @cd ./api/loki/ && go test ./... -coverprofile cover.out
.PHONY: test-unit-prometheus
test-unit-prometheus: $(PROMTOOL) ## Run prometheus unit tests
@@ -237,7 +237,7 @@ quickstart-cleanup: $(KIND) ## Cleanup for quickstart set up
.PHONY: run
run: generate manifests ## Run against the configured Kubernetes cluster in ~/.kube/config
- go run ./main.go
+ go run ./cmd/loki-operator/main.go
.PHONY: install
install: manifests $(KUSTOMIZE) ## Install CRDs into a cluster
@@ -312,17 +312,17 @@ oci-push-calculator: ## Push the calculator image
$(OCI_RUNTIME) push $(CALCULATOR_IMG)
##@ Website
-TYPES_TARGET := $(shell find apis/loki -type f -iname "*_types.go")
+TYPES_TARGET := $(shell find api/loki -type f -iname "*_types.go")
docs/operator/api.md: $(TYPES_TARGET) $(GEN_CRD_API_REFERENCE_DOCS)
- $(GEN_CRD_API_REFERENCE_DOCS) -api-dir "github.com/grafana/loki/operator/apis/loki/" -config "$(PWD)/config/docs/config.json" -template-dir "$(PWD)/config/docs/templates" -out-file "$(PWD)/$@"
+ $(GEN_CRD_API_REFERENCE_DOCS) -api-dir "github.com/grafana/loki/operator/api/loki/" -config "$(PWD)/config/docs/config.json" -template-dir "$(PWD)/config/docs/templates" -out-file "$(PWD)/$@"
sed -i 's/+docs:/ docs:/' $@
sed -i 's/+parent:/ parent:/' $@
sed -i 's/##/\n##/' $@
sed -i 's/+newline/\n/' $@
-FEATURE_GATES_TARGET := $(shell find apis/config -type f -iname "*_types.go")
+FEATURE_GATES_TARGET := $(shell find api/config -type f -iname "*_types.go")
docs/operator/feature-gates.md: $(FEATURE_GATES_TARGET) $(GEN_CRD_API_REFERENCE_DOCS)
- $(GEN_CRD_API_REFERENCE_DOCS) -api-dir "github.com/grafana/loki/operator/apis/config/v1/" -config "$(PWD)/config/docs/config.json" -template-dir "$(PWD)/config/docs/templates" -out-file "$(PWD)/$@"
+ $(GEN_CRD_API_REFERENCE_DOCS) -api-dir "github.com/grafana/loki/operator/api/config/v1/" -config "$(PWD)/config/docs/config.json" -template-dir "$(PWD)/config/docs/templates" -out-file "$(PWD)/$@"
sed -i 's/title: "API"/title: "Feature Gates"/' $@
sed -i 's/+docs:/ docs:/' $@
sed -i 's/+parent:/ parent:/' $@
diff --git a/operator/PROJECT b/operator/PROJECT
index 2f46cff1f974a..cef254c461476 100644
--- a/operator/PROJECT
+++ b/operator/PROJECT
@@ -1,7 +1,7 @@
componentConfig: true
domain: grafana.com
layout:
-- go.kubebuilder.io/v3
+- go.kubebuilder.io/v4
multigroup: true
plugins:
manifests.sdk.operatorframework.io/v2: {}
@@ -16,7 +16,7 @@ resources:
domain: grafana.com
group: loki
kind: LokiStack
- path: github.com/grafana/loki/operator/apis/loki/v1beta1
+ path: github.com/grafana/loki/operator/api/loki/v1beta1
version: v1beta1
- api:
crdVersion: v1
@@ -25,7 +25,7 @@ resources:
domain: grafana.com
group: loki
kind: LokiStack
- path: github.com/grafana/loki/operator/apis/loki/v1
+ path: github.com/grafana/loki/operator/api/loki/v1
version: v1
webhooks:
conversion: true
@@ -38,7 +38,7 @@ resources:
domain: grafana.com
group: loki
kind: AlertingRule
- path: github.com/grafana/loki/operator/apis/loki/v1
+ path: github.com/grafana/loki/operator/api/loki/v1
version: v1
webhooks:
conversion: true
@@ -51,7 +51,7 @@ resources:
domain: grafana.com
group: loki
kind: AlertingRule
- path: github.com/grafana/loki/operator/apis/loki/v1beta1
+ path: github.com/grafana/loki/operator/api/loki/v1beta1
version: v1beta1
webhooks:
validation: true
@@ -63,7 +63,7 @@ resources:
domain: grafana.com
group: loki
kind: RecordingRule
- path: github.com/grafana/loki/operator/apis/loki/v1
+ path: github.com/grafana/loki/operator/api/loki/v1
version: v1
webhooks:
validation: true
@@ -76,7 +76,7 @@ resources:
domain: grafana.com
group: loki
kind: RecordingRule
- path: github.com/grafana/loki/operator/apis/loki/v1beta1
+ path: github.com/grafana/loki/operator/api/loki/v1beta1
version: v1beta1
webhooks:
validation: true
@@ -88,7 +88,7 @@ resources:
domain: grafana.com
group: loki
kind: RulerConfig
- path: github.com/grafana/loki/operator/apis/loki/v1
+ path: github.com/grafana/loki/operator/api/loki/v1
version: v1
- api:
crdVersion: v1
@@ -97,6 +97,6 @@ resources:
domain: grafana.com
group: loki
kind: RulerConfig
- path: github.com/grafana/loki/operator/apis/loki/v1beta1
+ path: github.com/grafana/loki/operator/api/loki/v1beta1
version: v1beta1
version: "3"
diff --git a/operator/apis/config/v1/doc.go b/operator/api/config/v1/doc.go
similarity index 100%
rename from operator/apis/config/v1/doc.go
rename to operator/api/config/v1/doc.go
diff --git a/operator/apis/config/v1/projectconfig_types.go b/operator/api/config/v1/projectconfig_types.go
similarity index 100%
rename from operator/apis/config/v1/projectconfig_types.go
rename to operator/api/config/v1/projectconfig_types.go
diff --git a/operator/apis/config/v1/zz_generated.deepcopy.go b/operator/api/config/v1/zz_generated.deepcopy.go
similarity index 100%
rename from operator/apis/config/v1/zz_generated.deepcopy.go
rename to operator/api/config/v1/zz_generated.deepcopy.go
diff --git a/operator/apis/loki/go.mod b/operator/api/loki/go.mod
similarity index 95%
rename from operator/apis/loki/go.mod
rename to operator/api/loki/go.mod
index 24d692d874e34..1fbcffb6bfe3b 100644
--- a/operator/apis/loki/go.mod
+++ b/operator/api/loki/go.mod
@@ -1,4 +1,4 @@
-module github.com/grafana/loki/operator/apis/loki
+module github.com/grafana/loki/operator/api/loki
go 1.19
diff --git a/operator/apis/loki/go.sum b/operator/api/loki/go.sum
similarity index 100%
rename from operator/apis/loki/go.sum
rename to operator/api/loki/go.sum
diff --git a/operator/apis/loki/register.go b/operator/api/loki/register.go
similarity index 100%
rename from operator/apis/loki/register.go
rename to operator/api/loki/register.go
diff --git a/operator/apis/loki/v1/alertingrule_types.go b/operator/api/loki/v1/alertingrule_types.go
similarity index 100%
rename from operator/apis/loki/v1/alertingrule_types.go
rename to operator/api/loki/v1/alertingrule_types.go
diff --git a/operator/apis/loki/v1/doc.go b/operator/api/loki/v1/doc.go
similarity index 100%
rename from operator/apis/loki/v1/doc.go
rename to operator/api/loki/v1/doc.go
diff --git a/operator/apis/loki/v1/groupversion_info.go b/operator/api/loki/v1/groupversion_info.go
similarity index 100%
rename from operator/apis/loki/v1/groupversion_info.go
rename to operator/api/loki/v1/groupversion_info.go
diff --git a/operator/apis/loki/v1/lokistack_types.go b/operator/api/loki/v1/lokistack_types.go
similarity index 91%
rename from operator/apis/loki/v1/lokistack_types.go
rename to operator/api/loki/v1/lokistack_types.go
index e0c55ec5d569f..84c097131057f 100644
--- a/operator/apis/loki/v1/lokistack_types.go
+++ b/operator/api/loki/v1/lokistack_types.go
@@ -27,7 +27,7 @@ const (
// LokiStackSizeType declares the type for loki cluster scale outs.
//
-// +kubebuilder:validation:Enum="1x.demo";"1x.extra-small";"1x.small";"1x.medium"
+// +kubebuilder:validation:Enum="1x.demo";"1x.pico";"1x.extra-small";"1x.small";"1x.medium"
type LokiStackSizeType string
const (
@@ -39,10 +39,18 @@ const (
// DO NOT USE THIS IN PRODUCTION!
SizeOneXDemo LokiStackSizeType = "1x.demo"
+ // SizeOneXPico defines the size of a single Loki deployment
+ // with extra small resources/limits requirements and HA support for all
+ // Loki components. This size is dedicated for setup **without** the
+ // requirement for single replication factor and auto-compaction.
+ //
+ // FIXME: Add clear description of ingestion/query performance expectations.
+ SizeOneXPico LokiStackSizeType = "1x.pico"
+
// SizeOneXExtraSmall defines the size of a single Loki deployment
- // with extra small resources/limits requirements and without HA support.
- // This size is ultimately dedicated for development and demo purposes.
- // DO NOT USE THIS IN PRODUCTION!
+ // with extra small resources/limits requirements and HA support for all
+ // Loki components. This size is dedicated for setup **without** the
+ // requirement for single replication factor and auto-compaction.
//
// FIXME: Add clear description of ingestion/query performance expectations.
SizeOneXExtraSmall LokiStackSizeType = "1x.extra-small"
@@ -289,6 +297,34 @@ type OpenshiftTenantSpec struct {
// +kubebuilder:validation:Optional
// +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Admin Groups"
AdminGroups []string `json:"adminGroups"`
+
+ // OTLP contains settings for ingesting data using OTLP in the OpenShift tenancy mode.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="OpenTelemetry Protocol"
+ OTLP *OpenshiftOTLPConfig `json:"otlp,omitempty"`
+}
+
+// OpenshiftOTLPConfig defines configuration specific to users using OTLP together with an OpenShift tenancy mode.
+type OpenshiftOTLPConfig struct {
+ // DisableRecommendedAttributes can be used to reduce the number of attributes used for stream labels and structured
+ // metadata.
+ //
+ // Enabling this setting removes the "recommended attributes" from the generated Loki configuration. This will cause
+ // meta information to not be available as stream labels or structured metadata, potentially making queries more
+ // expensive and less performant.
+ //
+ // Note that there is a set of "required attributes", needed for OpenShift Logging to work properly. Those will be
+ // added to the configuration, even if this field is set to true.
+ //
+ // This option is supposed to be combined with a custom label configuration customizing the labels for the specific
+ // usecase.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Disable recommended OTLP attributes"
+ DisableRecommendedAttributes bool `json:"disableRecommendedAttributes,omitempty"`
}
// LokiComponentSpec defines the requirements to configure scheduling
@@ -791,138 +827,70 @@ type IngestionLimitSpec struct {
PerStreamRateLimitBurst int32 `json:"perStreamRateLimitBurst,omitempty"`
}
-// OTLPAttributeAction defines the action to executed when indexing
-// OTLP resource attributes. Resource attributes can be either added
-// to the index, the chunk structured metadata or entirely dropped.
-type OTLPAttributeAction string
-
-const (
- // OTLPAttributeActionIndexLabel stores a resource attribute as a label, which is part of the index identifying streams.
- OTLPAttributeActionIndexLabel OTLPAttributeAction = "indexLabel"
- // OTLPAttributeActionStructuredMetadata stores an attribute as structured metadata with each log entry.
- OTLPAttributeActionStructuredMetadata OTLPAttributeAction = "structuredMetadata"
- // OTLPAttributeActionDrop removes the matching attributes from the log entry.
- OTLPAttributeActionDrop OTLPAttributeAction = "drop"
-)
-
-// OTLPAttributesSpec contains the configuration for a set of attributes
-// to store them as index labels or structured metadata or drop them altogether.
-type OTLPAttributesSpec struct {
- // Action defines the indexing action for the selected attributes. They
- // can be either added to structured metadata or drop altogether.
- //
- // +required
- // +kubebuilder:validation:Required
- // +kubebuilder:validation:Enum=structured_metadata;drop
- // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Action"
- Action OTLPAttributeAction `json:"action"`
-
- // Attributes allows choosing the attributes by listing their names.
+// OTLPSpec defines which resource, scope and log attributes should be used as stream labels or
+// stored as structured metadata.
+type OTLPSpec struct {
+ // StreamLabels configures which resource attributes are converted to Loki stream labels.
//
// +optional
// +kubebuilder:validation:Optional
- // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Attribute Names"
- Attributes []string `json:"attributes,omitempty"`
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Stream Labels"
+ StreamLabels *OTLPStreamLabelSpec `json:"streamLabels,omitempty"`
- // Regex allows choosing the attributes by matching a regular expression.
+ // StructuredMetadata configures which attributes are saved in structured metadata.
//
// +optional
// +kubebuilder:validation:Optional
- // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Regular Expression"
- Regex string `json:"regex,omitempty"`
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Structured Metadata"
+ StructuredMetadata *OTLPMetadataSpec `json:"structuredMetadata,omitempty"`
}
-// OTLPResourceAttributesConfigSpec contains the configuration for a set of resource attributes
-// to store them as index labels or structured metadata or drop them altogether.
-type OTLPResourceAttributesConfigSpec struct {
- // Action defines the indexing action for the selected resoure attributes. They
- // can be either indexed as labels, added to structured metadata or drop altogether.
- //
- // +required
- // +kubebuilder:validation:Required
- // +kubebuilder:validation:Enum=index_label;structured_metadata;drop
- // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Action"
- Action OTLPAttributeAction `json:"action"`
-
- // Attributes is the list of attributes to configure indexing or drop them
- // altogether.
+type OTLPStreamLabelSpec struct {
+ // ResourceAttributes lists the names of the resource attributes that should be converted into Loki stream labels.
//
// +optional
// +kubebuilder:validation:Optional
- // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Attribute Names"
- Attributes []string `json:"attributes,omitempty"`
-
- // Regex allows choosing the attributes by matching a regular expression.
- //
- // +optional
- // +kubebuilder:validation:Optional
- // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Regular Expression"
- Regex string `json:"regex,omitempty"`
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Resource Attributes"
+ ResourceAttributes []OTLPAttributeReference `json:"resourceAttributes,omitempty"`
}
-// OTLPResourceAttributesSpec contains the configuration for resource attributes
-// to store them as index labels or structured metadata or drop them altogether.
-type OTLPResourceAttributesSpec struct {
- // IgnoreDefaults controls whether to ignore the global configuration for resource attributes
- // indexed as labels.
- //
- // If IgnoreDefaults is true, then this spec needs to contain at least one mapping to a index label.
+type OTLPMetadataSpec struct {
+ // ResourceAttributes lists the names of resource attributes that should be included in structured metadata.
//
// +optional
// +kubebuilder:validation:Optional
- // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:booleanSwitch",displayName="Ignore Global Defaults"
- IgnoreDefaults bool `json:"ignoreDefaults,omitempty"`
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Resource Attributes"
+ ResourceAttributes []OTLPAttributeReference `json:"resourceAttributes,omitempty"`
- // Attributes contains the configuration for resource attributes
- // to store them as index labels or structured metadata or drop them altogether.
+ // ScopeAttributes lists the names of scope attributes that should be included in structured metadata.
//
// +optional
// +kubebuilder:validation:Optional
- // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Attributes"
- Attributes []OTLPResourceAttributesConfigSpec `json:"attributes,omitempty"`
-}
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Scope Attributes"
+ ScopeAttributes []OTLPAttributeReference `json:"scopeAttributes,omitempty"`
-// GlobalOTLPSpec defines which resource, scope and log attributes to
-// be stored as index or structured metadata or drop altogether for all
-// tenants.
-type GlobalOTLPSpec struct {
- // IndexedResourceAttributes contains the global configuration for resource attributes
- // to store them as index labels.
+ // LogAttributes lists the names of log attributes that should be included in structured metadata.
//
// +optional
// +kubebuilder:validation:Optional
- // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Indexed Resource Attributes"
- IndexedResourceAttributes []string `json:"indexedResourceAttributes,omitempty"`
-
- OTLPSpec `json:",omitempty"`
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Log Attributes"
+ LogAttributes []OTLPAttributeReference `json:"logAttributes,omitempty"`
}
-// OTLPSpec defines which resource, scope and log attributes to
-// be stored as index or structured metadata or drop altogether
-type OTLPSpec struct {
- // ResourceAttributes contains the configuration for resource attributes
- // to store them as index labels or structured metadata or drop them altogether.
- //
- // +optional
- // +kubebuilder:validation:Optional
- // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Resource Attributes"
- ResourceAttributes *OTLPResourceAttributesSpec `json:"resourceAttributes,omitempty"`
-
- // ScopeAttributes contains the configuration for scope attributes
- // to store them as structured metadata or drop them altogether.
+type OTLPAttributeReference struct {
+ // Name contains either a verbatim name of an attribute or a regular expression matching many attributes.
//
- // +optional
- // +kubebuilder:validation:Optional
- // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Scope Attributes"
- ScopeAttributes []OTLPAttributesSpec `json:"scopeAttributes,omitempty"`
+ // +required
+ // +kubebuilder:validation:Required
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Name"
+ Name string `json:"name"`
- // LogAttributes contains the configuration for log attributes
- // to store them as structured metadata or drop them altogether.
+ // If Regex is true, then Name is treated as a regular expression instead of as a verbatim attribute name.
//
// +optional
// +kubebuilder:validation:Optional
- // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Log Attributes"
- LogAttributes []OTLPAttributesSpec `json:"logAttributes,omitempty"`
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Treat name as regular expression"
+ Regex bool `json:"regex,omitempty"`
}
// RetentionStreamSpec defines a log stream with separate retention time.
@@ -978,13 +946,14 @@ type LimitsTemplateSpec struct {
// +kubebuilder:validation:Optional
QueryLimits *QueryLimitSpec `json:"queries,omitempty"`
- // OTLP to configure which resource, scope and log attributes
- // to store as labels or structured metadata or drop them altogether
- // for all tenants.
+ // OTLP to configure which resource, scope and log attributes are stored as stream labels or structured metadata.
+ //
+ // Tenancy modes can provide a default OTLP configuration, when no custom OTLP configuration is set or even
+ // enforce the use of some required attributes.
//
// +optional
// +kubebuilder:validation:Optional
- OTLP *GlobalOTLPSpec `json:"otlp,omitempty"`
+ OTLP *OTLPSpec `json:"otlp,omitempty"`
// Retention defines how long logs are kept in storage.
//
@@ -993,7 +962,7 @@ type LimitsTemplateSpec struct {
Retention *RetentionLimitSpec `json:"retention,omitempty"`
}
-// LimitsTemplateSpec defines the limits applied at ingestion or query path.
+// PerTenantLimitsTemplateSpec defines the limits applied at ingestion or query path.
type PerTenantLimitsTemplateSpec struct {
// IngestionLimits defines the limits applied on ingested log streams.
//
@@ -1007,9 +976,12 @@ type PerTenantLimitsTemplateSpec struct {
// +kubebuilder:validation:Optional
QueryLimits *PerTenantQueryLimitSpec `json:"queries,omitempty"`
- // OTLP to configure which resource, scope and log attributes
- // to store as labels or structured metadata or drop them altogether
- // for a single tenants.
+ // OTLP to configure which resource, scope and log attributes are stored as stream labels or structured metadata.
+ //
+ // Tenancy modes can provide a default OTLP configuration, when no custom OTLP configuration is set or even
+ // enforce the use of some required attributes.
+ //
+ // The per-tenant configuration for OTLP attributes will be merged with the global configuration.
//
// +optional
// +kubebuilder:validation:Optional
@@ -1463,16 +1435,3 @@ func (t BlockedQueryTypes) String() string {
return strings.Join(res, ",")
}
-
-func (a OTLPAttributeAction) Value() string {
- switch a {
- case OTLPAttributeActionIndexLabel:
- return "index_label"
- case OTLPAttributeActionStructuredMetadata:
- return "structured_metadata"
- case OTLPAttributeActionDrop:
- return "drop"
- default:
- return string(a)
- }
-}
diff --git a/operator/apis/loki/v1/recordingrule_types.go b/operator/api/loki/v1/recordingrule_types.go
similarity index 100%
rename from operator/apis/loki/v1/recordingrule_types.go
rename to operator/api/loki/v1/recordingrule_types.go
diff --git a/operator/apis/loki/v1/rulerconfig_types.go b/operator/api/loki/v1/rulerconfig_types.go
similarity index 100%
rename from operator/apis/loki/v1/rulerconfig_types.go
rename to operator/api/loki/v1/rulerconfig_types.go
diff --git a/operator/apis/loki/v1/v1.go b/operator/api/loki/v1/v1.go
similarity index 88%
rename from operator/apis/loki/v1/v1.go
rename to operator/api/loki/v1/v1.go
index a17e7244dfb53..77ae0fa417ce8 100644
--- a/operator/apis/loki/v1/v1.go
+++ b/operator/api/loki/v1/v1.go
@@ -84,12 +84,12 @@ var (
// ErrIPv6InstanceAddrTypeNotAllowed when the default InstanceAddrType is used with enableIPv6.
ErrIPv6InstanceAddrTypeNotAllowed = errors.New(`instanceAddrType "default" cannot be used with enableIPv6 at the same time`)
- // ErrOTLPResourceAttributesEmptyNotAllowed when the OTLP ResourceAttributes are empty even though ignoreDefaults is enabled.
- ErrOTLPResourceAttributesEmptyNotAllowed = errors.New(`resourceAttributes cannot be empty when ignoreDefaults is true`)
- // ErrOTLPResourceAttributesIndexLabelActionMissing when OTLP ResourceAttributes does not contain at least one index label when ignoreDefaults is enabled.
- ErrOTLPResourceAttributesIndexLabelActionMissing = errors.New(`resourceAttributes does not contain at least one attributed mapped to "index_label"`)
- // ErrOTLPAttributesSpecInvalid when the OTLPAttributesSpec attibutes and regex fields are both empty.
- ErrOTLPAttributesSpecInvalid = errors.New(`attributes and regex cannot be empty at the same time`)
+ // ErrOTLPGlobalNoStreamLabel when the global OTLP configuration does not define at least one stream label.
+ ErrOTLPGlobalNoStreamLabel = errors.New("global OTLP configuration needs to define at least one stream label")
+ // ErrOTLPTenantMissing when a tenant is missing from the OTLP configuration although it has been defined in the tenancy.
+ ErrOTLPTenantMissing = errors.New("if no global OTLP configuration is present which defines at least one stream label, every tenant must have an OTLP configuration")
+ // ErrOTLPTenantNoStreamLabel when a tenant is defined but has no stream labels and there also no global stream labels.
+ ErrOTLPTenantNoStreamLabel = errors.New("if no global OTLP configuration is present which defines at least one stream label, every tenant must define at least one stream label")
// ErrRuleMustMatchNamespace indicates that an expression used in an alerting or recording rule is missing
// matchers for a namespace.
diff --git a/operator/apis/loki/v1/zz_generated.deepcopy.go b/operator/api/loki/v1/zz_generated.deepcopy.go
similarity index 96%
rename from operator/apis/loki/v1/zz_generated.deepcopy.go
rename to operator/api/loki/v1/zz_generated.deepcopy.go
index faab229b2a569..99c8caa49432b 100644
--- a/operator/apis/loki/v1/zz_generated.deepcopy.go
+++ b/operator/api/loki/v1/zz_generated.deepcopy.go
@@ -504,27 +504,6 @@ func (in *ClusterProxy) DeepCopy() *ClusterProxy {
return out
}
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *GlobalOTLPSpec) DeepCopyInto(out *GlobalOTLPSpec) {
- *out = *in
- if in.IndexedResourceAttributes != nil {
- in, out := &in.IndexedResourceAttributes, &out.IndexedResourceAttributes
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- in.OTLPSpec.DeepCopyInto(&out.OTLPSpec)
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalOTLPSpec.
-func (in *GlobalOTLPSpec) DeepCopy() *GlobalOTLPSpec {
- if in == nil {
- return nil
- }
- out := new(GlobalOTLPSpec)
- in.DeepCopyInto(out)
- return out
-}
-
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HashRingSpec) DeepCopyInto(out *HashRingSpec) {
*out = *in
@@ -602,7 +581,7 @@ func (in *LimitsTemplateSpec) DeepCopyInto(out *LimitsTemplateSpec) {
}
if in.OTLP != nil {
in, out := &in.OTLP, &out.OTLP
- *out = new(GlobalOTLPSpec)
+ *out = new(OTLPSpec)
(*in).DeepCopyInto(*out)
}
if in.Retention != nil {
@@ -1084,97 +1063,91 @@ func (in *OPASpec) DeepCopy() *OPASpec {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *OTLPAttributesSpec) DeepCopyInto(out *OTLPAttributesSpec) {
+func (in *OTLPAttributeReference) DeepCopyInto(out *OTLPAttributeReference) {
*out = *in
- if in.Attributes != nil {
- in, out := &in.Attributes, &out.Attributes
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OTLPAttributesSpec.
-func (in *OTLPAttributesSpec) DeepCopy() *OTLPAttributesSpec {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OTLPAttributeReference.
+func (in *OTLPAttributeReference) DeepCopy() *OTLPAttributeReference {
if in == nil {
return nil
}
- out := new(OTLPAttributesSpec)
+ out := new(OTLPAttributeReference)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *OTLPResourceAttributesConfigSpec) DeepCopyInto(out *OTLPResourceAttributesConfigSpec) {
+func (in *OTLPMetadataSpec) DeepCopyInto(out *OTLPMetadataSpec) {
*out = *in
- if in.Attributes != nil {
- in, out := &in.Attributes, &out.Attributes
- *out = make([]string, len(*in))
+ if in.ResourceAttributes != nil {
+ in, out := &in.ResourceAttributes, &out.ResourceAttributes
+ *out = make([]OTLPAttributeReference, len(*in))
+ copy(*out, *in)
+ }
+ if in.ScopeAttributes != nil {
+ in, out := &in.ScopeAttributes, &out.ScopeAttributes
+ *out = make([]OTLPAttributeReference, len(*in))
+ copy(*out, *in)
+ }
+ if in.LogAttributes != nil {
+ in, out := &in.LogAttributes, &out.LogAttributes
+ *out = make([]OTLPAttributeReference, len(*in))
copy(*out, *in)
}
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OTLPResourceAttributesConfigSpec.
-func (in *OTLPResourceAttributesConfigSpec) DeepCopy() *OTLPResourceAttributesConfigSpec {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OTLPMetadataSpec.
+func (in *OTLPMetadataSpec) DeepCopy() *OTLPMetadataSpec {
if in == nil {
return nil
}
- out := new(OTLPResourceAttributesConfigSpec)
+ out := new(OTLPMetadataSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *OTLPResourceAttributesSpec) DeepCopyInto(out *OTLPResourceAttributesSpec) {
+func (in *OTLPSpec) DeepCopyInto(out *OTLPSpec) {
*out = *in
- if in.Attributes != nil {
- in, out := &in.Attributes, &out.Attributes
- *out = make([]OTLPResourceAttributesConfigSpec, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
+ if in.StreamLabels != nil {
+ in, out := &in.StreamLabels, &out.StreamLabels
+ *out = new(OTLPStreamLabelSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.StructuredMetadata != nil {
+ in, out := &in.StructuredMetadata, &out.StructuredMetadata
+ *out = new(OTLPMetadataSpec)
+ (*in).DeepCopyInto(*out)
}
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OTLPResourceAttributesSpec.
-func (in *OTLPResourceAttributesSpec) DeepCopy() *OTLPResourceAttributesSpec {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OTLPSpec.
+func (in *OTLPSpec) DeepCopy() *OTLPSpec {
if in == nil {
return nil
}
- out := new(OTLPResourceAttributesSpec)
+ out := new(OTLPSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *OTLPSpec) DeepCopyInto(out *OTLPSpec) {
+func (in *OTLPStreamLabelSpec) DeepCopyInto(out *OTLPStreamLabelSpec) {
*out = *in
if in.ResourceAttributes != nil {
in, out := &in.ResourceAttributes, &out.ResourceAttributes
- *out = new(OTLPResourceAttributesSpec)
- (*in).DeepCopyInto(*out)
- }
- if in.ScopeAttributes != nil {
- in, out := &in.ScopeAttributes, &out.ScopeAttributes
- *out = make([]OTLPAttributesSpec, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- if in.LogAttributes != nil {
- in, out := &in.LogAttributes, &out.LogAttributes
- *out = make([]OTLPAttributesSpec, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
+ *out = make([]OTLPAttributeReference, len(*in))
+ copy(*out, *in)
}
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OTLPSpec.
-func (in *OTLPSpec) DeepCopy() *OTLPSpec {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OTLPStreamLabelSpec.
+func (in *OTLPStreamLabelSpec) DeepCopy() *OTLPStreamLabelSpec {
if in == nil {
return nil
}
- out := new(OTLPSpec)
+ out := new(OTLPStreamLabelSpec)
in.DeepCopyInto(out)
return out
}
@@ -1251,6 +1224,21 @@ func (in *ObjectStorageTLSSpec) DeepCopy() *ObjectStorageTLSSpec {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OpenshiftOTLPConfig) DeepCopyInto(out *OpenshiftOTLPConfig) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenshiftOTLPConfig.
+func (in *OpenshiftOTLPConfig) DeepCopy() *OpenshiftOTLPConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(OpenshiftOTLPConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OpenshiftTenantSpec) DeepCopyInto(out *OpenshiftTenantSpec) {
*out = *in
@@ -1259,6 +1247,11 @@ func (in *OpenshiftTenantSpec) DeepCopyInto(out *OpenshiftTenantSpec) {
*out = make([]string, len(*in))
copy(*out, *in)
}
+ if in.OTLP != nil {
+ in, out := &in.OTLP, &out.OTLP
+ *out = new(OpenshiftOTLPConfig)
+ **out = **in
+ }
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenshiftTenantSpec.
diff --git a/operator/apis/loki/v1beta1/alertingrule_types.go b/operator/api/loki/v1beta1/alertingrule_types.go
similarity index 99%
rename from operator/apis/loki/v1beta1/alertingrule_types.go
rename to operator/api/loki/v1beta1/alertingrule_types.go
index 9a51320f788d4..15d9c6afd02cc 100644
--- a/operator/apis/loki/v1beta1/alertingrule_types.go
+++ b/operator/api/loki/v1beta1/alertingrule_types.go
@@ -1,7 +1,7 @@
package v1beta1
import (
- v1 "github.com/grafana/loki/operator/apis/loki/v1"
+ v1 "github.com/grafana/loki/operator/api/loki/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/conversion"
)
diff --git a/operator/apis/loki/v1beta1/alertingrule_types_test.go b/operator/api/loki/v1beta1/alertingrule_types_test.go
similarity index 98%
rename from operator/apis/loki/v1beta1/alertingrule_types_test.go
rename to operator/api/loki/v1beta1/alertingrule_types_test.go
index 373e7d8e71e00..e21f95e2380e8 100644
--- a/operator/apis/loki/v1beta1/alertingrule_types_test.go
+++ b/operator/api/loki/v1beta1/alertingrule_types_test.go
@@ -3,8 +3,8 @@ package v1beta1_test
import (
"testing"
- v1 "github.com/grafana/loki/operator/apis/loki/v1"
- "github.com/grafana/loki/operator/apis/loki/v1beta1"
+ v1 "github.com/grafana/loki/operator/api/loki/v1"
+ "github.com/grafana/loki/operator/api/loki/v1beta1"
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
diff --git a/operator/apis/loki/v1beta1/doc.go b/operator/api/loki/v1beta1/doc.go
similarity index 100%
rename from operator/apis/loki/v1beta1/doc.go
rename to operator/api/loki/v1beta1/doc.go
diff --git a/operator/apis/loki/v1beta1/groupversion_info.go b/operator/api/loki/v1beta1/groupversion_info.go
similarity index 100%
rename from operator/apis/loki/v1beta1/groupversion_info.go
rename to operator/api/loki/v1beta1/groupversion_info.go
diff --git a/operator/apis/loki/v1beta1/lokistack_types.go b/operator/api/loki/v1beta1/lokistack_types.go
similarity index 99%
rename from operator/apis/loki/v1beta1/lokistack_types.go
rename to operator/api/loki/v1beta1/lokistack_types.go
index cd7fb0c90cff5..e37436d213eda 100644
--- a/operator/apis/loki/v1beta1/lokistack_types.go
+++ b/operator/api/loki/v1beta1/lokistack_types.go
@@ -1,7 +1,7 @@
package v1beta1
import (
- v1 "github.com/grafana/loki/operator/apis/loki/v1"
+ v1 "github.com/grafana/loki/operator/api/loki/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/conversion"
diff --git a/operator/apis/loki/v1beta1/lokistack_types_test.go b/operator/api/loki/v1beta1/lokistack_types_test.go
similarity index 99%
rename from operator/apis/loki/v1beta1/lokistack_types_test.go
rename to operator/api/loki/v1beta1/lokistack_types_test.go
index 28ffb1763137d..33477d1b89024 100644
--- a/operator/apis/loki/v1beta1/lokistack_types_test.go
+++ b/operator/api/loki/v1beta1/lokistack_types_test.go
@@ -7,8 +7,8 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- v1 "github.com/grafana/loki/operator/apis/loki/v1"
- "github.com/grafana/loki/operator/apis/loki/v1beta1"
+ v1 "github.com/grafana/loki/operator/api/loki/v1"
+ "github.com/grafana/loki/operator/api/loki/v1beta1"
)
func TestConvertToV1_LokiStack(t *testing.T) {
diff --git a/operator/apis/loki/v1beta1/recordingrule_types.go b/operator/api/loki/v1beta1/recordingrule_types.go
similarity index 99%
rename from operator/apis/loki/v1beta1/recordingrule_types.go
rename to operator/api/loki/v1beta1/recordingrule_types.go
index e6d7f63ddd3ab..348ca3703f59d 100644
--- a/operator/apis/loki/v1beta1/recordingrule_types.go
+++ b/operator/api/loki/v1beta1/recordingrule_types.go
@@ -1,7 +1,7 @@
package v1beta1
import (
- v1 "github.com/grafana/loki/operator/apis/loki/v1"
+ v1 "github.com/grafana/loki/operator/api/loki/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/conversion"
)
diff --git a/operator/apis/loki/v1beta1/recordingrule_types_test.go b/operator/api/loki/v1beta1/recordingrule_types_test.go
similarity index 98%
rename from operator/apis/loki/v1beta1/recordingrule_types_test.go
rename to operator/api/loki/v1beta1/recordingrule_types_test.go
index d2a8ea7df83ab..6c8f494699571 100644
--- a/operator/apis/loki/v1beta1/recordingrule_types_test.go
+++ b/operator/api/loki/v1beta1/recordingrule_types_test.go
@@ -3,8 +3,8 @@ package v1beta1_test
import (
"testing"
- v1 "github.com/grafana/loki/operator/apis/loki/v1"
- "github.com/grafana/loki/operator/apis/loki/v1beta1"
+ v1 "github.com/grafana/loki/operator/api/loki/v1"
+ "github.com/grafana/loki/operator/api/loki/v1beta1"
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
diff --git a/operator/apis/loki/v1beta1/rulerconfig_types.go b/operator/api/loki/v1beta1/rulerconfig_types.go
similarity index 99%
rename from operator/apis/loki/v1beta1/rulerconfig_types.go
rename to operator/api/loki/v1beta1/rulerconfig_types.go
index c7bd9b4e7e812..420c9fb617021 100644
--- a/operator/apis/loki/v1beta1/rulerconfig_types.go
+++ b/operator/api/loki/v1beta1/rulerconfig_types.go
@@ -1,7 +1,7 @@
package v1beta1
import (
- v1 "github.com/grafana/loki/operator/apis/loki/v1"
+ v1 "github.com/grafana/loki/operator/api/loki/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/conversion"
)
diff --git a/operator/apis/loki/v1beta1/rulerconfig_types_test.go b/operator/api/loki/v1beta1/rulerconfig_types_test.go
similarity index 99%
rename from operator/apis/loki/v1beta1/rulerconfig_types_test.go
rename to operator/api/loki/v1beta1/rulerconfig_types_test.go
index ab79fc35e0e58..239e43027c887 100644
--- a/operator/apis/loki/v1beta1/rulerconfig_types_test.go
+++ b/operator/api/loki/v1beta1/rulerconfig_types_test.go
@@ -3,8 +3,8 @@ package v1beta1_test
import (
"testing"
- v1 "github.com/grafana/loki/operator/apis/loki/v1"
- "github.com/grafana/loki/operator/apis/loki/v1beta1"
+ v1 "github.com/grafana/loki/operator/api/loki/v1"
+ "github.com/grafana/loki/operator/api/loki/v1beta1"
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/ptr"
diff --git a/operator/apis/loki/v1beta1/v1beta1.go b/operator/api/loki/v1beta1/v1beta1.go
similarity index 100%
rename from operator/apis/loki/v1beta1/v1beta1.go
rename to operator/api/loki/v1beta1/v1beta1.go
diff --git a/operator/apis/loki/v1beta1/zz_generated.deepcopy.go b/operator/api/loki/v1beta1/zz_generated.deepcopy.go
similarity index 100%
rename from operator/apis/loki/v1beta1/zz_generated.deepcopy.go
rename to operator/api/loki/v1beta1/zz_generated.deepcopy.go
diff --git a/operator/bundle/community-openshift/bundle.Dockerfile b/operator/bundle/community-openshift/bundle.Dockerfile
index d83b40681db86..3be10ce151140 100644
--- a/operator/bundle/community-openshift/bundle.Dockerfile
+++ b/operator/bundle/community-openshift/bundle.Dockerfile
@@ -9,7 +9,7 @@ LABEL operators.operatorframework.io.bundle.channels.v1=alpha
LABEL operators.operatorframework.io.bundle.channel.default.v1=alpha
LABEL operators.operatorframework.io.metrics.builder=operator-sdk-unknown
LABEL operators.operatorframework.io.metrics.mediatype.v1=metrics+v1
-LABEL operators.operatorframework.io.metrics.project_layout=go.kubebuilder.io/v3
+LABEL operators.operatorframework.io.metrics.project_layout=go.kubebuilder.io/v4
# Labels for testing.
LABEL operators.operatorframework.io.test.mediatype.v1=scorecard+v1
diff --git a/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml
index 4ad8e058e5fbe..6fa468cf99960 100644
--- a/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml
+++ b/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml
@@ -150,7 +150,7 @@ metadata:
categories: OpenShift Optional, Logging & Tracing
certified: "false"
containerImage: docker.io/grafana/loki-operator:0.6.2
- createdAt: "2024-10-14T10:09:32Z"
+ createdAt: "2024-10-23T18:05:45Z"
description: The Community Loki Operator provides Kubernetes native deployment
and management of Loki and related logging components.
features.operators.openshift.io/disconnected: "true"
@@ -161,7 +161,7 @@ metadata:
features.operators.openshift.io/token-auth-azure: "true"
features.operators.openshift.io/token-auth-gcp: "false"
operators.operatorframework.io/builder: operator-sdk-unknown
- operators.operatorframework.io/project_layout: go.kubebuilder.io/v3
+ operators.operatorframework.io/project_layout: go.kubebuilder.io/v4
repository: https://github.com/grafana/loki/tree/main/operator
support: Grafana Loki SIG Operator
labels:
@@ -185,8 +185,9 @@ spec:
- description: List of groups for alerting rules.
displayName: Groups
path: groups
- - description: Interval defines the time interval between evaluation of the
- given alerting rule.
+ - description: |-
+ Interval defines the time interval between evaluation of the given
+ alerting rule.
displayName: Evaluation Interval
path: groups[0].interval
- description: Limit defines the number of alerts an alerting rule can produce.
@@ -208,14 +209,15 @@ spec:
- description: Annotations to add to each alert.
displayName: Annotations
path: groups[0].rules[0].annotations
- - description: The LogQL expression to evaluate. Every evaluation cycle this
- is evaluated at the current time, and all resultant time series become pending/firing
- alerts.
+ - description: |-
+ The LogQL expression to evaluate. Every evaluation cycle this is
+ evaluated at the current time, and all resultant time series become
+ pending/firing alerts.
displayName: LogQL Expression
path: groups[0].rules[0].expr
- - description: Alerts are considered firing once they have been returned for
- this long. Alerts which have not yet fired for long enough are considered
- pending.
+ - description: |-
+ Alerts are considered firing once they have been returned for this long.
+ Alerts which have not yet fired for long enough are considered pending.
displayName: Firing Threshold
path: groups[0].rules[0].for
- description: Labels to add to each alert.
@@ -272,17 +274,21 @@ spec:
- description: MemberList configuration spec
displayName: Memberlist Config
path: hashRing.memberlist
- - description: "EnableIPv6 enables IPv6 support for the memberlist based hash
- ring. \n Currently this also forces the instanceAddrType to podIP to avoid
- local address lookup for the memberlist."
+ - description: |-
+ EnableIPv6 enables IPv6 support for the memberlist based hash ring.
+
+
+ Currently this also forces the instanceAddrType to podIP to avoid local address lookup
+ for the memberlist.
displayName: Enable IPv6
path: hashRing.memberlist.enableIPv6
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:booleanSwitch
- - description: InstanceAddrType defines the type of address to use to advertise
- to the ring. Defaults to the first address from any private network interfaces
- of the current pod. Alternatively the public pod IP can be used in case
- private networks (RFC 1918 and RFC 6598) are not available.
+ - description: |-
+ InstanceAddrType defines the type of address to use to advertise to the ring.
+ Defaults to the first address from any private network interfaces of the current pod.
+ Alternatively the public pod IP can be used in case private networks (RFC 1918 and RFC 6598)
+ are not available.
displayName: Instance Address
path: hashRing.memberlist.instanceAddrType
x-descriptors:
@@ -301,9 +307,10 @@ spec:
- description: Global defines the limits applied globally across the cluster.
displayName: Global Limits
path: limits.global
- - description: IngestionBurstSize defines the local rate-limited sample size
- per distributor replica. It should be set to the set at least to the maximum
- logs size expected in a single push request.
+ - description: |-
+ IngestionBurstSize defines the local rate-limited sample size per
+ distributor replica. It should be set to the set at least to the
+ maximum logs size expected in a single push request.
displayName: Ingestion Burst Size (in MB)
path: limits.global.ingestion.ingestionBurstSize
x-descriptors:
@@ -313,26 +320,30 @@ spec:
path: limits.global.ingestion.ingestionRate
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxGlobalStreamsPerTenant defines the maximum number of active
- streams per tenant, across the cluster.
+ - description: |-
+ MaxGlobalStreamsPerTenant defines the maximum number of active streams
+ per tenant, across the cluster.
displayName: Max Global Streams per Tenant
path: limits.global.ingestion.maxGlobalStreamsPerTenant
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxLabelNameLength defines the maximum number of characters allowed
+ - description: |-
+ MaxLabelNameLength defines the maximum number of characters allowed
for label keys in log streams.
displayName: Max Label Name Length
path: limits.global.ingestion.maxLabelNameLength
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxLabelNamesPerSeries defines the maximum number of label names
- per series in each log stream.
+ - description: |-
+ MaxLabelNamesPerSeries defines the maximum number of label names per series
+ in each log stream.
displayName: Max Labels Names per Series
path: limits.global.ingestion.maxLabelNamesPerSeries
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxLabelValueLength defines the maximum number of characters
- allowed for label values in log streams.
+ - description: |-
+ MaxLabelValueLength defines the maximum number of characters allowed
+ for label values in log streams.
displayName: Max Label Value Length
path: limits.global.ingestion.maxLabelValueLength
x-descriptors:
@@ -343,8 +354,9 @@ spec:
path: limits.global.ingestion.maxLineSize
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: PerStreamDesiredRate defines the desired ingestion rate per second
- that LokiStack should target applying automatic stream sharding. Units MB.
+ - description: |-
+ PerStreamDesiredRate defines the desired ingestion rate per second that LokiStack should
+ target applying automatic stream sharding. Units MB.
displayName: Per Stream Desired Rate (in MB)
path: limits.global.ingestion.perStreamDesiredRate
x-descriptors:
@@ -361,84 +373,84 @@ spec:
path: limits.global.ingestion.perStreamRateLimitBurst
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: IndexedResourceAttributes contains the global configuration for
- resource attributes to store them as index labels.
- displayName: Indexed Resource Attributes
- path: limits.global.otlp.indexedResourceAttributes
- - description: LogAttributes contains the configuration for log attributes to
- store them as structured metadata or drop them altogether.
+ - description: StreamLabels configures which resource attributes are converted
+ to Loki stream labels.
+ displayName: Stream Labels
+ path: limits.global.otlp.streamLabels
+ - description: ResourceAttributes lists the names of the resource attributes
+ that should be converted into Loki stream labels.
+ displayName: Resource Attributes
+ path: limits.global.otlp.streamLabels.resourceAttributes
+ - description: Name contains either a verbatim name of an attribute or a regular
+ expression matching many attributes.
+ displayName: Name
+ path: limits.global.otlp.streamLabels.resourceAttributes[0].name
+ - description: If Regex is true, then Name is treated as a regular expression
+ instead of as a verbatim attribute name.
+ displayName: Treat name as regular expression
+ path: limits.global.otlp.streamLabels.resourceAttributes[0].regex
+ - description: StructuredMetadata configures which attributes are saved in structured
+ metadata.
+ displayName: Structured Metadata
+ path: limits.global.otlp.structuredMetadata
+ - description: LogAttributes lists the names of log attributes that should be
+ included in structured metadata.
displayName: Log Attributes
- path: limits.global.otlp.logAttributes
- - description: Action defines the indexing action for the selected attributes.
- They can be either added to structured metadata or drop altogether.
- displayName: Action
- path: limits.global.otlp.logAttributes[0].action
- - description: Attributes allows choosing the attributes by listing their names.
- displayName: Attribute Names
- path: limits.global.otlp.logAttributes[0].attributes
- - description: Regex allows choosing the attributes by matching a regular expression.
- displayName: Regular Expression
- path: limits.global.otlp.logAttributes[0].regex
- - description: ResourceAttributes contains the configuration for resource attributes
- to store them as index labels or structured metadata or drop them altogether.
+ path: limits.global.otlp.structuredMetadata.logAttributes
+ - description: Name contains either a verbatim name of an attribute or a regular
+ expression matching many attributes.
+ displayName: Name
+ path: limits.global.otlp.structuredMetadata.logAttributes[0].name
+ - description: If Regex is true, then Name is treated as a regular expression
+ instead of as a verbatim attribute name.
+ displayName: Treat name as regular expression
+ path: limits.global.otlp.structuredMetadata.logAttributes[0].regex
+ - description: ResourceAttributes lists the names of resource attributes that
+ should be included in structured metadata.
displayName: Resource Attributes
- path: limits.global.otlp.resourceAttributes
- - description: Attributes contains the configuration for resource attributes
- to store them as index labels or structured metadata or drop them altogether.
- displayName: Attributes
- path: limits.global.otlp.resourceAttributes.attributes
- - description: Action defines the indexing action for the selected resoure attributes.
- They can be either indexed as labels, added to structured metadata or drop
- altogether.
- displayName: Action
- path: limits.global.otlp.resourceAttributes.attributes[0].action
- - description: Attributes is the list of attributes to configure indexing or
- drop them altogether.
- displayName: Attribute Names
- path: limits.global.otlp.resourceAttributes.attributes[0].attributes
- - description: Regex allows choosing the attributes by matching a regular expression.
- displayName: Regular Expression
- path: limits.global.otlp.resourceAttributes.attributes[0].regex
- - description: "IgnoreDefaults controls whether to ignore the global configuration
- for resource attributes indexed as labels. \n If IgnoreDefaults is true,
- then this spec needs to contain at least one mapping to a index label."
- displayName: Ignore Global Defaults
- path: limits.global.otlp.resourceAttributes.ignoreDefaults
- x-descriptors:
- - urn:alm:descriptor:com.tectonic.ui:booleanSwitch
- - description: ScopeAttributes contains the configuration for scope attributes
- to store them as structured metadata or drop them altogether.
+ path: limits.global.otlp.structuredMetadata.resourceAttributes
+ - description: Name contains either a verbatim name of an attribute or a regular
+ expression matching many attributes.
+ displayName: Name
+ path: limits.global.otlp.structuredMetadata.resourceAttributes[0].name
+ - description: If Regex is true, then Name is treated as a regular expression
+ instead of as a verbatim attribute name.
+ displayName: Treat name as regular expression
+ path: limits.global.otlp.structuredMetadata.resourceAttributes[0].regex
+ - description: ScopeAttributes lists the names of scope attributes that should
+ be included in structured metadata.
displayName: Scope Attributes
- path: limits.global.otlp.scopeAttributes
- - description: Action defines the indexing action for the selected attributes.
- They can be either added to structured metadata or drop altogether.
- displayName: Action
- path: limits.global.otlp.scopeAttributes[0].action
- - description: Attributes allows choosing the attributes by listing their names.
- displayName: Attribute Names
- path: limits.global.otlp.scopeAttributes[0].attributes
- - description: Regex allows choosing the attributes by matching a regular expression.
- displayName: Regular Expression
- path: limits.global.otlp.scopeAttributes[0].regex
+ path: limits.global.otlp.structuredMetadata.scopeAttributes
+ - description: Name contains either a verbatim name of an attribute or a regular
+ expression matching many attributes.
+ displayName: Name
+ path: limits.global.otlp.structuredMetadata.scopeAttributes[0].name
+ - description: If Regex is true, then Name is treated as a regular expression
+ instead of as a verbatim attribute name.
+ displayName: Treat name as regular expression
+ path: limits.global.otlp.structuredMetadata.scopeAttributes[0].regex
- description: CardinalityLimit defines the cardinality limit for index queries.
displayName: Cardinality Limit
path: limits.global.queries.cardinalityLimit
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxChunksPerQuery defines the maximum number of chunks that can
- be fetched by a single query.
+ - description: |-
+ MaxChunksPerQuery defines the maximum number of chunks
+ that can be fetched by a single query.
displayName: Max Chunk per Query
path: limits.global.queries.maxChunksPerQuery
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxEntriesLimitsPerQuery defines the maximum number of log entries
+ - description: |-
+ MaxEntriesLimitsPerQuery defines the maximum number of log entries
that will be returned for a query.
displayName: Max Entries Limit per Query
path: limits.global.queries.maxEntriesLimitPerQuery
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxQuerySeries defines the maximum of unique series that is returned
- by a metric query.
+ - description: |-
+ MaxQuerySeries defines the maximum of unique series
+ that is returned by a metric query.
displayName: Max Query Series
path: limits.global.queries.maxQuerySeries
x-descriptors:
@@ -456,9 +468,10 @@ spec:
- description: Tenants defines the limits applied per tenant.
displayName: Limits per Tenant
path: limits.tenants
- - description: IngestionBurstSize defines the local rate-limited sample size
- per distributor replica. It should be set to the set at least to the maximum
- logs size expected in a single push request.
+ - description: |-
+ IngestionBurstSize defines the local rate-limited sample size per
+ distributor replica. It should be set to the set at least to the
+ maximum logs size expected in a single push request.
displayName: Ingestion Burst Size (in MB)
path: limits.tenants.ingestion.ingestionBurstSize
x-descriptors:
@@ -468,26 +481,30 @@ spec:
path: limits.tenants.ingestion.ingestionRate
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxGlobalStreamsPerTenant defines the maximum number of active
- streams per tenant, across the cluster.
+ - description: |-
+ MaxGlobalStreamsPerTenant defines the maximum number of active streams
+ per tenant, across the cluster.
displayName: Max Global Streams per Tenant
path: limits.tenants.ingestion.maxGlobalStreamsPerTenant
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxLabelNameLength defines the maximum number of characters allowed
+ - description: |-
+ MaxLabelNameLength defines the maximum number of characters allowed
for label keys in log streams.
displayName: Max Label Name Length
path: limits.tenants.ingestion.maxLabelNameLength
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxLabelNamesPerSeries defines the maximum number of label names
- per series in each log stream.
+ - description: |-
+ MaxLabelNamesPerSeries defines the maximum number of label names per series
+ in each log stream.
displayName: Max Labels Names per Series
path: limits.tenants.ingestion.maxLabelNamesPerSeries
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxLabelValueLength defines the maximum number of characters
- allowed for label values in log streams.
+ - description: |-
+ MaxLabelValueLength defines the maximum number of characters allowed
+ for label values in log streams.
displayName: Max Label Value Length
path: limits.tenants.ingestion.maxLabelValueLength
x-descriptors:
@@ -498,8 +515,9 @@ spec:
path: limits.tenants.ingestion.maxLineSize
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: PerStreamDesiredRate defines the desired ingestion rate per second
- that LokiStack should target applying automatic stream sharding. Units MB.
+ - description: |-
+ PerStreamDesiredRate defines the desired ingestion rate per second that LokiStack should
+ target applying automatic stream sharding. Units MB.
displayName: Per Stream Desired Rate (in MB)
path: limits.tenants.ingestion.perStreamDesiredRate
x-descriptors:
@@ -516,61 +534,62 @@ spec:
path: limits.tenants.ingestion.perStreamRateLimitBurst
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: LogAttributes contains the configuration for log attributes to
- store them as structured metadata or drop them altogether.
+ - description: StreamLabels configures which resource attributes are converted
+ to Loki stream labels.
+ displayName: Stream Labels
+ path: limits.tenants.otlp.streamLabels
+ - description: ResourceAttributes lists the names of the resource attributes
+ that should be converted into Loki stream labels.
+ displayName: Resource Attributes
+ path: limits.tenants.otlp.streamLabels.resourceAttributes
+ - description: Name contains either a verbatim name of an attribute or a regular
+ expression matching many attributes.
+ displayName: Name
+ path: limits.tenants.otlp.streamLabels.resourceAttributes[0].name
+ - description: If Regex is true, then Name is treated as a regular expression
+ instead of as a verbatim attribute name.
+ displayName: Treat name as regular expression
+ path: limits.tenants.otlp.streamLabels.resourceAttributes[0].regex
+ - description: StructuredMetadata configures which attributes are saved in structured
+ metadata.
+ displayName: Structured Metadata
+ path: limits.tenants.otlp.structuredMetadata
+ - description: LogAttributes lists the names of log attributes that should be
+ included in structured metadata.
displayName: Log Attributes
- path: limits.tenants.otlp.logAttributes
- - description: Action defines the indexing action for the selected attributes.
- They can be either added to structured metadata or drop altogether.
- displayName: Action
- path: limits.tenants.otlp.logAttributes[0].action
- - description: Attributes allows choosing the attributes by listing their names.
- displayName: Attribute Names
- path: limits.tenants.otlp.logAttributes[0].attributes
- - description: Regex allows choosing the attributes by matching a regular expression.
- displayName: Regular Expression
- path: limits.tenants.otlp.logAttributes[0].regex
- - description: ResourceAttributes contains the configuration for resource attributes
- to store them as index labels or structured metadata or drop them altogether.
+ path: limits.tenants.otlp.structuredMetadata.logAttributes
+ - description: Name contains either a verbatim name of an attribute or a regular
+ expression matching many attributes.
+ displayName: Name
+ path: limits.tenants.otlp.structuredMetadata.logAttributes[0].name
+ - description: If Regex is true, then Name is treated as a regular expression
+ instead of as a verbatim attribute name.
+ displayName: Treat name as regular expression
+ path: limits.tenants.otlp.structuredMetadata.logAttributes[0].regex
+ - description: ResourceAttributes lists the names of resource attributes that
+ should be included in structured metadata.
displayName: Resource Attributes
- path: limits.tenants.otlp.resourceAttributes
- - description: Attributes contains the configuration for resource attributes
- to store them as index labels or structured metadata or drop them altogether.
- displayName: Attributes
- path: limits.tenants.otlp.resourceAttributes.attributes
- - description: Action defines the indexing action for the selected resoure attributes.
- They can be either indexed as labels, added to structured metadata or drop
- altogether.
- displayName: Action
- path: limits.tenants.otlp.resourceAttributes.attributes[0].action
- - description: Attributes is the list of attributes to configure indexing or
- drop them altogether.
- displayName: Attribute Names
- path: limits.tenants.otlp.resourceAttributes.attributes[0].attributes
- - description: Regex allows choosing the attributes by matching a regular expression.
- displayName: Regular Expression
- path: limits.tenants.otlp.resourceAttributes.attributes[0].regex
- - description: "IgnoreDefaults controls whether to ignore the global configuration
- for resource attributes indexed as labels. \n If IgnoreDefaults is true,
- then this spec needs to contain at least one mapping to a index label."
- displayName: Ignore Global Defaults
- path: limits.tenants.otlp.resourceAttributes.ignoreDefaults
- x-descriptors:
- - urn:alm:descriptor:com.tectonic.ui:booleanSwitch
- - description: ScopeAttributes contains the configuration for scope attributes
- to store them as structured metadata or drop them altogether.
+ path: limits.tenants.otlp.structuredMetadata.resourceAttributes
+ - description: Name contains either a verbatim name of an attribute or a regular
+ expression matching many attributes.
+ displayName: Name
+ path: limits.tenants.otlp.structuredMetadata.resourceAttributes[0].name
+ - description: If Regex is true, then Name is treated as a regular expression
+ instead of as a verbatim attribute name.
+ displayName: Treat name as regular expression
+ path: limits.tenants.otlp.structuredMetadata.resourceAttributes[0].regex
+ - description: ScopeAttributes lists the names of scope attributes that should
+ be included in structured metadata.
displayName: Scope Attributes
- path: limits.tenants.otlp.scopeAttributes
- - description: Action defines the indexing action for the selected attributes.
- They can be either added to structured metadata or drop altogether.
- displayName: Action
- path: limits.tenants.otlp.scopeAttributes[0].action
- - description: Attributes allows choosing the attributes by listing their names.
- displayName: Attribute Names
- path: limits.tenants.otlp.scopeAttributes[0].attributes
- - description: Regex allows choosing the attributes by matching a regular expression.
- displayName: Regular Expression
- path: limits.tenants.otlp.scopeAttributes[0].regex
+ path: limits.tenants.otlp.structuredMetadata.scopeAttributes
+ - description: Name contains either a verbatim name of an attribute or a regular
+ expression matching many attributes.
+ displayName: Name
+ path: limits.tenants.otlp.structuredMetadata.scopeAttributes[0].name
+ - description: If Regex is true, then Name is treated as a regular expression
+ instead of as a verbatim attribute name.
+ displayName: Treat name as regular expression
+ path: limits.tenants.otlp.structuredMetadata.scopeAttributes[0].regex
- description: Blocked defines the list of rules to block matching queries.
displayName: Blocked
path: limits.tenants.queries.blocked
@@ -597,20 +616,23 @@ spec:
path: limits.tenants.queries.cardinalityLimit
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxChunksPerQuery defines the maximum number of chunks that can
- be fetched by a single query.
+ - description: |-
+ MaxChunksPerQuery defines the maximum number of chunks
+ that can be fetched by a single query.
displayName: Max Chunk per Query
path: limits.tenants.queries.maxChunksPerQuery
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxEntriesLimitsPerQuery defines the maximum number of log entries
+ - description: |-
+ MaxEntriesLimitsPerQuery defines the maximum number of log entries
that will be returned for a query.
displayName: Max Entries Limit per Query
path: limits.tenants.queries.maxEntriesLimitPerQuery
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxQuerySeries defines the maximum of unique series that is returned
- by a metric query.
+ - description: |-
+ MaxQuerySeries defines the maximum of unique series
+ that is returned by a metric query.
displayName: Max Query Series
path: limits.tenants.queries.maxQuerySeries
x-descriptors:
@@ -625,8 +647,9 @@ spec:
of a query request.
displayName: Query Timeout
path: limits.tenants.queries.queryTimeout
- - description: ManagementState defines if the CR should be managed by the operator
- or not. Default is managed.
+ - description: |-
+ ManagementState defines if the CR should be managed by the operator or not.
+ Default is managed.
displayName: Management State
path: managementState
x-descriptors:
@@ -653,9 +676,9 @@ spec:
path: replication.factor
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: 'Zones defines an array of ZoneSpec that the scheduler will try
- to satisfy. IMPORTANT: Make sure that the replication factor defined is
- less than or equal to the number of available zones.'
+ - description: |-
+ Zones defines an array of ZoneSpec that the scheduler will try to satisfy.
+ IMPORTANT: Make sure that the replication factor defined is less than or equal to the number of available zones.
displayName: Zones Spec
path: replication.zones
- description: MaxSkew describes the maximum degree to which Pods can be unevenly
@@ -668,9 +691,9 @@ spec:
labels.
displayName: Topology Key
path: replication.zones[0].topologyKey
- - description: 'Deprecated: Please use replication.factor instead. This field
- will be removed in future versions of this CRD. ReplicationFactor defines
- the policy for log stream replication.'
+ - description: |-
+ Deprecated: Please use replication.factor instead. This field will be removed in future versions of this CRD.
+ ReplicationFactor defines the policy for log stream replication.
displayName: Replication Factor
path: replicationFactor
x-descriptors:
@@ -685,11 +708,13 @@ spec:
path: rules.enabled
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:booleanSwitch
- - description: Namespaces to be selected for PrometheusRules discovery. If unspecified,
- only the same namespace as the LokiStack object is in is used.
+ - description: |-
+ Namespaces to be selected for PrometheusRules discovery. If unspecified, only
+ the same namespace as the LokiStack object is in is used.
displayName: Namespace Selector
path: rules.namespaceSelector
- - description: A selector to select which LokiRules to mount for loading alerting/recording
+ - description: |-
+ A selector to select which LokiRules to mount for loading alerting/recording
rules from.
displayName: Selector
path: rules.selector
@@ -729,13 +754,15 @@ spec:
- description: TLS configuration for reaching the object storage endpoint.
displayName: TLS Config
path: storage.tls
- - description: Key is the data key of a ConfigMap containing a CA certificate.
- It needs to be in the same namespace as the LokiStack custom resource. If
- empty, it defaults to "service-ca.crt".
+ - description: |-
+ Key is the data key of a ConfigMap containing a CA certificate.
+ It needs to be in the same namespace as the LokiStack custom resource.
+ If empty, it defaults to "service-ca.crt".
displayName: CA ConfigMap Key
path: storage.tls.caKey
- - description: CA is the name of a ConfigMap containing a CA certificate. It
- needs to be in the same namespace as the LokiStack custom resource.
+ - description: |-
+ CA is the name of a ConfigMap containing a CA certificate.
+ It needs to be in the same namespace as the LokiStack custom resource.
displayName: CA ConfigMap Name
path: storage.tls.caName
x-descriptors:
@@ -755,8 +782,9 @@ spec:
- description: Compactor defines the compaction component spec.
displayName: Compactor pods
path: template.compactor
- - description: PodAntiAffinity defines the pod anti affinity scheduling rules
- to schedule pods of a component.
+ - description: |-
+ PodAntiAffinity defines the pod anti affinity scheduling rules to schedule pods
+ of a component.
displayName: PodAntiAffinity
path: template.compactor.podAntiAffinity
x-descriptors:
@@ -769,8 +797,9 @@ spec:
- description: Distributor defines the distributor component spec.
displayName: Distributor pods
path: template.distributor
- - description: PodAntiAffinity defines the pod anti affinity scheduling rules
- to schedule pods of a component.
+ - description: |-
+ PodAntiAffinity defines the pod anti affinity scheduling rules to schedule pods
+ of a component.
displayName: PodAntiAffinity
path: template.distributor.podAntiAffinity
x-descriptors:
@@ -783,8 +812,9 @@ spec:
- description: Gateway defines the lokistack gateway component spec.
displayName: Gateway pods
path: template.gateway
- - description: PodAntiAffinity defines the pod anti affinity scheduling rules
- to schedule pods of a component.
+ - description: |-
+ PodAntiAffinity defines the pod anti affinity scheduling rules to schedule pods
+ of a component.
displayName: PodAntiAffinity
path: template.gateway.podAntiAffinity
x-descriptors:
@@ -797,8 +827,9 @@ spec:
- description: IndexGateway defines the index gateway component spec.
displayName: Index Gateway pods
path: template.indexGateway
- - description: PodAntiAffinity defines the pod anti affinity scheduling rules
- to schedule pods of a component.
+ - description: |-
+ PodAntiAffinity defines the pod anti affinity scheduling rules to schedule pods
+ of a component.
displayName: PodAntiAffinity
path: template.indexGateway.podAntiAffinity
x-descriptors:
@@ -811,8 +842,9 @@ spec:
- description: Ingester defines the ingester component spec.
displayName: Ingester pods
path: template.ingester
- - description: PodAntiAffinity defines the pod anti affinity scheduling rules
- to schedule pods of a component.
+ - description: |-
+ PodAntiAffinity defines the pod anti affinity scheduling rules to schedule pods
+ of a component.
displayName: PodAntiAffinity
path: template.ingester.podAntiAffinity
x-descriptors:
@@ -825,8 +857,9 @@ spec:
- description: Querier defines the querier component spec.
displayName: Querier pods
path: template.querier
- - description: PodAntiAffinity defines the pod anti affinity scheduling rules
- to schedule pods of a component.
+ - description: |-
+ PodAntiAffinity defines the pod anti affinity scheduling rules to schedule pods
+ of a component.
displayName: PodAntiAffinity
path: template.querier.podAntiAffinity
x-descriptors:
@@ -839,8 +872,9 @@ spec:
- description: QueryFrontend defines the query frontend component spec.
displayName: Query Frontend pods
path: template.queryFrontend
- - description: PodAntiAffinity defines the pod anti affinity scheduling rules
- to schedule pods of a component.
+ - description: |-
+ PodAntiAffinity defines the pod anti affinity scheduling rules to schedule pods
+ of a component.
displayName: PodAntiAffinity
path: template.queryFrontend.podAntiAffinity
x-descriptors:
@@ -853,8 +887,9 @@ spec:
- description: Ruler defines the ruler component spec.
displayName: Ruler pods
path: template.ruler
- - description: PodAntiAffinity defines the pod anti affinity scheduling rules
- to schedule pods of a component.
+ - description: |-
+ PodAntiAffinity defines the pod anti affinity scheduling rules to schedule pods
+ of a component.
displayName: PodAntiAffinity
path: template.ruler.podAntiAffinity
x-descriptors:
@@ -878,13 +913,15 @@ spec:
- description: CA defines the spec for the custom CA for tenant's authentication.
displayName: CA ConfigMap
path: tenants.authentication[0].mTLS.ca
- - description: Key is the data key of a ConfigMap containing a CA certificate.
- It needs to be in the same namespace as the LokiStack custom resource. If
- empty, it defaults to "service-ca.crt".
+ - description: |-
+ Key is the data key of a ConfigMap containing a CA certificate.
+ It needs to be in the same namespace as the LokiStack custom resource.
+ If empty, it defaults to "service-ca.crt".
displayName: CA ConfigMap Key
path: tenants.authentication[0].mTLS.ca.caKey
- - description: CA is the name of a ConfigMap containing a CA certificate. It
- needs to be in the same namespace as the LokiStack custom resource.
+ - description: |-
+ CA is the name of a ConfigMap containing a CA certificate.
+ It needs to be in the same namespace as the LokiStack custom resource.
displayName: CA ConfigMap Name
path: tenants.authentication[0].mTLS.ca.caName
x-descriptors:
@@ -895,13 +932,15 @@ spec:
- description: IssuerCA defines the spec for the issuer CA for tenant's authentication.
displayName: IssuerCA ConfigMap
path: tenants.authentication[0].oidc.issuerCA
- - description: Key is the data key of a ConfigMap containing a CA certificate.
- It needs to be in the same namespace as the LokiStack custom resource. If
- empty, it defaults to "service-ca.crt".
+ - description: |-
+ Key is the data key of a ConfigMap containing a CA certificate.
+ It needs to be in the same namespace as the LokiStack custom resource.
+ If empty, it defaults to "service-ca.crt".
displayName: CA ConfigMap Key
path: tenants.authentication[0].oidc.issuerCA.caKey
- - description: CA is the name of a ConfigMap containing a CA certificate. It
- needs to be in the same namespace as the LokiStack custom resource.
+ - description: |-
+ CA is the name of a ConfigMap containing a CA certificate.
+ It needs to be in the same namespace as the LokiStack custom resource.
displayName: CA ConfigMap Name
path: tenants.authentication[0].oidc.issuerCA.caName
x-descriptors:
@@ -957,12 +996,39 @@ spec:
- description: Openshift defines the configuration specific to Openshift modes.
displayName: Openshift
path: tenants.openshift
- - description: "AdminGroups defines a list of groups, whose members are considered
- to have admin-privileges by the Loki Operator. Setting this to an empty
- array disables admin groups. \n By default the following groups are considered
- admin-groups: - system:cluster-admins - cluster-admin - dedicated-admin"
+ - description: |-
+ AdminGroups defines a list of groups, whose members are considered to have admin-privileges by the Loki Operator.
+ Setting this to an empty array disables admin groups.
+
+
+ By default the following groups are considered admin-groups:
+ - system:cluster-admins
+ - cluster-admin
+ - dedicated-admin
displayName: Admin Groups
path: tenants.openshift.adminGroups
+ - description: OTLP contains settings for ingesting data using OTLP in the OpenShift
+ tenancy mode.
+ displayName: OpenTelemetry Protocol
+ path: tenants.openshift.otlp
+ - description: |-
+ DisableRecommendedAttributes can be used to reduce the number of attributes used for stream labels and structured
+ metadata.
+
+
+ Enabling this setting removes the "recommended attributes" from the generated Loki configuration. This will cause
+ meta information to not be available as stream labels or structured metadata, potentially making queries more
+ expensive and less performant.
+
+
+ Note that there is a set of "required attributes", needed for OpenShift Logging to work properly. Those will be
+ added to the configuration, even if this field is set to true.
+
+
+ This option is supposed to be combined with a custom label configuration customizing the labels for the specific
+ usecase.
+ displayName: Disable recommended OTLP attributes
+ path: tenants.openshift.otlp.disableRecommendedAttributes
statusDescriptors:
- description: Distributor is a map to the per pod status of the distributor
deployment
@@ -1026,8 +1092,9 @@ spec:
- description: List of groups for recording rules.
displayName: Groups
path: groups
- - description: Interval defines the time interval between evaluation of the
- given recoding rule.
+ - description: |-
+ Interval defines the time interval between evaluation of the given
+ recoding rule.
displayName: Evaluation Interval
path: groups[0].interval
- description: Limit defines the number of series a recording rule can produce.
@@ -1043,9 +1110,10 @@ spec:
- description: Rules defines a list of recording rules
displayName: Rules
path: groups[0].rules
- - description: The LogQL expression to evaluate. Every evaluation cycle this
- is evaluated at the current time, and all resultant time series become pending/firing
- alerts.
+ - description: |-
+ The LogQL expression to evaluate. Every evaluation cycle this is
+ evaluated at the current time, and all resultant time series become
+ pending/firing alerts.
displayName: LogQL Expression
path: groups[0].rules[0].expr
- description: Labels to add to each recording rule.
@@ -1148,9 +1216,10 @@ spec:
path: alertmanager.enableV2
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:booleanSwitch
- - description: List of AlertManager URLs to send notifications to. Each Alertmanager
- URL is treated as a separate group in the configuration. Multiple Alertmanagers
- in HA per group can be supported by using DNS resolution (See EnableDNSDiscovery).
+ - description: |-
+ List of AlertManager URLs to send notifications to. Each Alertmanager URL is treated as
+ a separate group in the configuration. Multiple Alertmanagers in HA per group can be
+ supported by using DNS resolution (See EnableDNSDiscovery).
displayName: AlertManager Endpoints
path: alertmanager.endpoints
- description: Additional labels to add to all alerts.
@@ -1170,9 +1239,9 @@ spec:
path: alertmanager.notificationQueue.capacity
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: Minimum duration between alert and restored "for" state. This
- is maintained only for alerts with configured "for" time greater than the
- grace period.
+ - description: |-
+ Minimum duration between alert and restored "for" state. This is maintained
+ only for alerts with configured "for" time greater than the grace period.
displayName: Firing Grace Period
path: alertmanager.notificationQueue.forGracePeriod
- description: Max time to tolerate outage for restoring "for" state of alert.
@@ -1205,21 +1274,23 @@ spec:
Default is '(.*)'
displayName: Regex
path: alertmanager.relabelConfigs[0].regex
- - description: Replacement value against which a regex replace is performed
- if the regular expression matches. Regex capture groups are available. Default
- is '$1'
+ - description: |-
+ Replacement value against which a regex replace is performed if the
+ regular expression matches. Regex capture groups are available. Default is '$1'
displayName: Replacement
path: alertmanager.relabelConfigs[0].replacement
- description: Separator placed between concatenated source label values. default
is ';'.
displayName: Separator
path: alertmanager.relabelConfigs[0].separator
- - description: The source labels select values from existing labels. Their content
- is concatenated using the configured separator and matched against the configured
- regular expression for the replace, keep, and drop actions.
+ - description: |-
+ The source labels select values from existing labels. Their content is concatenated
+ using the configured separator and matched against the configured regular expression
+ for the replace, keep, and drop actions.
displayName: Source Labels
path: alertmanager.relabelConfigs[0].sourceLabels
- - description: Label to which the resulting value is written in a replace action.
+ - description: |-
+ Label to which the resulting value is written in a replace action.
It is mandatory for replace actions. Regex capture groups are available.
displayName: Target Label
path: alertmanager.relabelConfigs[0].targetLabel
@@ -1300,9 +1371,10 @@ spec:
path: overrides.alertmanager.enableV2
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:booleanSwitch
- - description: List of AlertManager URLs to send notifications to. Each Alertmanager
- URL is treated as a separate group in the configuration. Multiple Alertmanagers
- in HA per group can be supported by using DNS resolution (See EnableDNSDiscovery).
+ - description: |-
+ List of AlertManager URLs to send notifications to. Each Alertmanager URL is treated as
+ a separate group in the configuration. Multiple Alertmanagers in HA per group can be
+ supported by using DNS resolution (See EnableDNSDiscovery).
displayName: AlertManager Endpoints
path: overrides.alertmanager.endpoints
- description: Additional labels to add to all alerts.
@@ -1322,9 +1394,9 @@ spec:
path: overrides.alertmanager.notificationQueue.capacity
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: Minimum duration between alert and restored "for" state. This
- is maintained only for alerts with configured "for" time greater than the
- grace period.
+ - description: |-
+ Minimum duration between alert and restored "for" state. This is maintained
+ only for alerts with configured "for" time greater than the grace period.
displayName: Firing Grace Period
path: overrides.alertmanager.notificationQueue.forGracePeriod
- description: Max time to tolerate outage for restoring "for" state of alert.
@@ -1357,21 +1429,23 @@ spec:
Default is '(.*)'
displayName: Regex
path: overrides.alertmanager.relabelConfigs[0].regex
- - description: Replacement value against which a regex replace is performed
- if the regular expression matches. Regex capture groups are available. Default
- is '$1'
+ - description: |-
+ Replacement value against which a regex replace is performed if the
+ regular expression matches. Regex capture groups are available. Default is '$1'
displayName: Replacement
path: overrides.alertmanager.relabelConfigs[0].replacement
- description: Separator placed between concatenated source label values. default
is ';'.
displayName: Separator
path: overrides.alertmanager.relabelConfigs[0].separator
- - description: The source labels select values from existing labels. Their content
- is concatenated using the configured separator and matched against the configured
- regular expression for the replace, keep, and drop actions.
+ - description: |-
+ The source labels select values from existing labels. Their content is concatenated
+ using the configured separator and matched against the configured regular expression
+ for the replace, keep, and drop actions.
displayName: Source Labels
path: overrides.alertmanager.relabelConfigs[0].sourceLabels
- - description: Label to which the resulting value is written in a replace action.
+ - description: |-
+ Label to which the resulting value is written in a replace action.
It is mandatory for replace actions. Regex capture groups are available.
displayName: Target Label
path: overrides.alertmanager.relabelConfigs[0].targetLabel
@@ -1433,21 +1507,23 @@ spec:
Default is '(.*)'
displayName: Regex
path: remoteWrite.client.relabelConfigs[0].regex
- - description: Replacement value against which a regex replace is performed
- if the regular expression matches. Regex capture groups are available. Default
- is '$1'
+ - description: |-
+ Replacement value against which a regex replace is performed if the
+ regular expression matches. Regex capture groups are available. Default is '$1'
displayName: Replacement
path: remoteWrite.client.relabelConfigs[0].replacement
- description: Separator placed between concatenated source label values. default
is ';'.
displayName: Separator
path: remoteWrite.client.relabelConfigs[0].separator
- - description: The source labels select values from existing labels. Their content
- is concatenated using the configured separator and matched against the configured
- regular expression for the replace, keep, and drop actions.
+ - description: |-
+ The source labels select values from existing labels. Their content is concatenated
+ using the configured separator and matched against the configured regular expression
+ for the replace, keep, and drop actions.
displayName: Source Labels
path: remoteWrite.client.relabelConfigs[0].sourceLabels
- - description: Label to which the resulting value is written in a replace action.
+ - description: |-
+ Label to which the resulting value is written in a replace action.
It is mandatory for replace actions. Regex capture groups are available.
displayName: Target Label
path: remoteWrite.client.relabelConfigs[0].targetLabel
@@ -1777,7 +1853,7 @@ spec:
- /manager
env:
- name: RELATED_IMAGE_LOKI
- value: docker.io/grafana/loki:3.2.0
+ value: docker.io/grafana/loki:3.2.1
- name: RELATED_IMAGE_GATEWAY
value: quay.io/observatorium/api:latest
- name: RELATED_IMAGE_OPA
@@ -1901,7 +1977,7 @@ spec:
provider:
name: Grafana Loki SIG Operator
relatedImages:
- - image: docker.io/grafana/loki:3.2.0
+ - image: docker.io/grafana/loki:3.2.1
name: loki
- image: quay.io/observatorium/api:latest
name: gateway
diff --git a/operator/bundle/community-openshift/manifests/loki.grafana.com_lokistacks.yaml b/operator/bundle/community-openshift/manifests/loki.grafana.com_lokistacks.yaml
index d1c01b5cb082f..d7971f0cda671 100644
--- a/operator/bundle/community-openshift/manifests/loki.grafana.com_lokistacks.yaml
+++ b/operator/bundle/community-openshift/manifests/loki.grafana.com_lokistacks.yaml
@@ -165,125 +165,101 @@ spec:
type: object
otlp:
description: |-
- OTLP to configure which resource, scope and log attributes
- to store as labels or structured metadata or drop them altogether
- for all tenants.
+ OTLP to configure which resource, scope and log attributes are stored as stream labels or structured metadata.
+
+ Tenancy modes can provide a default OTLP configuration, when no custom OTLP configuration is set or even
+ enforce the use of some required attributes.
properties:
- indexedResourceAttributes:
- description: |-
- IndexedResourceAttributes contains the global configuration for resource attributes
- to store them as index labels.
- items:
- type: string
- type: array
- logAttributes:
- description: |-
- LogAttributes contains the configuration for log attributes
- to store them as structured metadata or drop them altogether.
- items:
- description: |-
- OTLPAttributesSpec contains the configuration for a set of attributes
- to store them as index labels or structured metadata or drop them altogether.
- properties:
- action:
- description: |-
- Action defines the indexing action for the selected attributes. They
- can be either added to structured metadata or drop altogether.
- enum:
- - structured_metadata
- - drop
- type: string
- attributes:
- description: Attributes allows choosing the attributes
- by listing their names.
- items:
- type: string
- type: array
- regex:
- description: Regex allows choosing the attributes
- by matching a regular expression.
- type: string
- required:
- - action
- type: object
- type: array
- resourceAttributes:
- description: |-
- ResourceAttributes contains the configuration for resource attributes
- to store them as index labels or structured metadata or drop them altogether.
+ streamLabels:
+ description: StreamLabels configures which resource attributes
+ are converted to Loki stream labels.
properties:
- attributes:
- description: |-
- Attributes contains the configuration for resource attributes
- to store them as index labels or structured metadata or drop them altogether.
+ resourceAttributes:
+ description: ResourceAttributes lists the names of
+ the resource attributes that should be converted
+ into Loki stream labels.
items:
- description: |-
- OTLPResourceAttributesConfigSpec contains the configuration for a set of resource attributes
- to store them as index labels or structured metadata or drop them altogether.
properties:
- action:
- description: |-
- Action defines the indexing action for the selected resoure attributes. They
- can be either indexed as labels, added to structured metadata or drop altogether.
- enum:
- - index_label
- - structured_metadata
- - drop
+ name:
+ description: Name contains either a verbatim
+ name of an attribute or a regular expression
+ matching many attributes.
type: string
- attributes:
- description: |-
- Attributes is the list of attributes to configure indexing or drop them
- altogether.
- items:
- type: string
- type: array
regex:
- description: Regex allows choosing the attributes
- by matching a regular expression.
+ description: If Regex is true, then Name is
+ treated as a regular expression instead of
+ as a verbatim attribute name.
+ type: boolean
+ required:
+ - name
+ type: object
+ type: array
+ type: object
+ structuredMetadata:
+ description: StructuredMetadata configures which attributes
+ are saved in structured metadata.
+ properties:
+ logAttributes:
+ description: LogAttributes lists the names of log
+ attributes that should be included in structured
+ metadata.
+ items:
+ properties:
+ name:
+ description: Name contains either a verbatim
+ name of an attribute or a regular expression
+ matching many attributes.
type: string
+ regex:
+ description: If Regex is true, then Name is
+ treated as a regular expression instead of
+ as a verbatim attribute name.
+ type: boolean
required:
- - action
+ - name
+ type: object
+ type: array
+ resourceAttributes:
+ description: ResourceAttributes lists the names of
+ resource attributes that should be included in structured
+ metadata.
+ items:
+ properties:
+ name:
+ description: Name contains either a verbatim
+ name of an attribute or a regular expression
+ matching many attributes.
+ type: string
+ regex:
+ description: If Regex is true, then Name is
+ treated as a regular expression instead of
+ as a verbatim attribute name.
+ type: boolean
+ required:
+ - name
+ type: object
+ type: array
+ scopeAttributes:
+ description: ScopeAttributes lists the names of scope
+ attributes that should be included in structured
+ metadata.
+ items:
+ properties:
+ name:
+ description: Name contains either a verbatim
+ name of an attribute or a regular expression
+ matching many attributes.
+ type: string
+ regex:
+ description: If Regex is true, then Name is
+ treated as a regular expression instead of
+ as a verbatim attribute name.
+ type: boolean
+ required:
+ - name
type: object
type: array
- ignoreDefaults:
- description: |-
- IgnoreDefaults controls whether to ignore the global configuration for resource attributes
- indexed as labels.
-
- If IgnoreDefaults is true, then this spec needs to contain at least one mapping to a index label.
- type: boolean
type: object
- scopeAttributes:
- description: |-
- ScopeAttributes contains the configuration for scope attributes
- to store them as structured metadata or drop them altogether.
- items:
- description: |-
- OTLPAttributesSpec contains the configuration for a set of attributes
- to store them as index labels or structured metadata or drop them altogether.
- properties:
- action:
- description: |-
- Action defines the indexing action for the selected attributes. They
- can be either added to structured metadata or drop altogether.
- enum:
- - structured_metadata
- - drop
- type: string
- attributes:
- description: Attributes allows choosing the attributes
- by listing their names.
- items:
- type: string
- type: array
- regex:
- description: Regex allows choosing the attributes
- by matching a regular expression.
- type: string
- required:
- - action
- type: object
- type: array
type: object
queries:
description: QueryLimits defines the limit applied on querying
@@ -363,7 +339,7 @@ spec:
type: object
tenants:
additionalProperties:
- description: LimitsTemplateSpec defines the limits applied
+ description: PerTenantLimitsTemplateSpec defines the limits applied
at ingestion or query path.
properties:
ingestion:
@@ -430,118 +406,103 @@ spec:
type: object
otlp:
description: |-
- OTLP to configure which resource, scope and log attributes
- to store as labels or structured metadata or drop them altogether
- for a single tenants.
+ OTLP to configure which resource, scope and log attributes are stored as stream labels or structured metadata.
+
+ Tenancy modes can provide a default OTLP configuration, when no custom OTLP configuration is set or even
+ enforce the use of some required attributes.
+
+ The per-tenant configuration for OTLP attributes will be merged with the global configuration.
properties:
- logAttributes:
- description: |-
- LogAttributes contains the configuration for log attributes
- to store them as structured metadata or drop them altogether.
- items:
- description: |-
- OTLPAttributesSpec contains the configuration for a set of attributes
- to store them as index labels or structured metadata or drop them altogether.
- properties:
- action:
- description: |-
- Action defines the indexing action for the selected attributes. They
- can be either added to structured metadata or drop altogether.
- enum:
- - structured_metadata
- - drop
- type: string
- attributes:
- description: Attributes allows choosing the attributes
- by listing their names.
- items:
- type: string
- type: array
- regex:
- description: Regex allows choosing the attributes
- by matching a regular expression.
- type: string
- required:
- - action
- type: object
- type: array
- resourceAttributes:
- description: |-
- ResourceAttributes contains the configuration for resource attributes
- to store them as index labels or structured metadata or drop them altogether.
+ streamLabels:
+ description: StreamLabels configures which resource
+ attributes are converted to Loki stream labels.
properties:
- attributes:
- description: |-
- Attributes contains the configuration for resource attributes
- to store them as index labels or structured metadata or drop them altogether.
+ resourceAttributes:
+ description: ResourceAttributes lists the names
+ of the resource attributes that should be converted
+ into Loki stream labels.
items:
- description: |-
- OTLPResourceAttributesConfigSpec contains the configuration for a set of resource attributes
- to store them as index labels or structured metadata or drop them altogether.
properties:
- action:
- description: |-
- Action defines the indexing action for the selected resoure attributes. They
- can be either indexed as labels, added to structured metadata or drop altogether.
- enum:
- - index_label
- - structured_metadata
- - drop
+ name:
+ description: Name contains either a verbatim
+ name of an attribute or a regular expression
+ matching many attributes.
type: string
- attributes:
- description: |-
- Attributes is the list of attributes to configure indexing or drop them
- altogether.
- items:
- type: string
- type: array
regex:
- description: Regex allows choosing the attributes
- by matching a regular expression.
+ description: If Regex is true, then Name is
+ treated as a regular expression instead
+ of as a verbatim attribute name.
+ type: boolean
+ required:
+ - name
+ type: object
+ type: array
+ type: object
+ structuredMetadata:
+ description: StructuredMetadata configures which attributes
+ are saved in structured metadata.
+ properties:
+ logAttributes:
+ description: LogAttributes lists the names of log
+ attributes that should be included in structured
+ metadata.
+ items:
+ properties:
+ name:
+ description: Name contains either a verbatim
+ name of an attribute or a regular expression
+ matching many attributes.
type: string
+ regex:
+ description: If Regex is true, then Name is
+ treated as a regular expression instead
+ of as a verbatim attribute name.
+ type: boolean
required:
- - action
+ - name
+ type: object
+ type: array
+ resourceAttributes:
+ description: ResourceAttributes lists the names
+ of resource attributes that should be included
+ in structured metadata.
+ items:
+ properties:
+ name:
+ description: Name contains either a verbatim
+ name of an attribute or a regular expression
+ matching many attributes.
+ type: string
+ regex:
+ description: If Regex is true, then Name is
+ treated as a regular expression instead
+ of as a verbatim attribute name.
+ type: boolean
+ required:
+ - name
+ type: object
+ type: array
+ scopeAttributes:
+ description: ScopeAttributes lists the names of
+ scope attributes that should be included in structured
+ metadata.
+ items:
+ properties:
+ name:
+ description: Name contains either a verbatim
+ name of an attribute or a regular expression
+ matching many attributes.
+ type: string
+ regex:
+ description: If Regex is true, then Name is
+ treated as a regular expression instead
+ of as a verbatim attribute name.
+ type: boolean
+ required:
+ - name
type: object
type: array
- ignoreDefaults:
- description: |-
- IgnoreDefaults controls whether to ignore the global configuration for resource attributes
- indexed as labels.
-
- If IgnoreDefaults is true, then this spec needs to contain at least one mapping to a index label.
- type: boolean
type: object
- scopeAttributes:
- description: |-
- ScopeAttributes contains the configuration for scope attributes
- to store them as structured metadata or drop them altogether.
- items:
- description: |-
- OTLPAttributesSpec contains the configuration for a set of attributes
- to store them as index labels or structured metadata or drop them altogether.
- properties:
- action:
- description: |-
- Action defines the indexing action for the selected attributes. They
- can be either added to structured metadata or drop altogether.
- enum:
- - structured_metadata
- - drop
- type: string
- attributes:
- description: Attributes allows choosing the attributes
- by listing their names.
- items:
- type: string
- type: array
- regex:
- description: Regex allows choosing the attributes
- by matching a regular expression.
- type: string
- required:
- - action
- type: object
- type: array
type: object
queries:
description: QueryLimits defines the limit applied on querying
@@ -830,6 +791,7 @@ spec:
out sizes.
enum:
- 1x.demo
+ - 1x.pico
- 1x.extra-small
- 1x.small
- 1x.medium
@@ -3903,6 +3865,26 @@ spec:
items:
type: string
type: array
+ otlp:
+ description: OTLP contains settings for ingesting data using
+ OTLP in the OpenShift tenancy mode.
+ properties:
+ disableRecommendedAttributes:
+ description: |-
+ DisableRecommendedAttributes can be used to reduce the number of attributes used for stream labels and structured
+ metadata.
+
+ Enabling this setting removes the "recommended attributes" from the generated Loki configuration. This will cause
+ meta information to not be available as stream labels or structured metadata, potentially making queries more
+ expensive and less performant.
+
+ Note that there is a set of "required attributes", needed for OpenShift Logging to work properly. Those will be
+ added to the configuration, even if this field is set to true.
+
+ This option is supposed to be combined with a custom label configuration customizing the labels for the specific
+ usecase.
+ type: boolean
+ type: object
type: object
required:
- mode
diff --git a/operator/bundle/community-openshift/metadata/annotations.yaml b/operator/bundle/community-openshift/metadata/annotations.yaml
index f17f3ab1caacc..4c99b3aa08786 100644
--- a/operator/bundle/community-openshift/metadata/annotations.yaml
+++ b/operator/bundle/community-openshift/metadata/annotations.yaml
@@ -8,7 +8,7 @@ annotations:
operators.operatorframework.io.bundle.channel.default.v1: alpha
operators.operatorframework.io.metrics.builder: operator-sdk-unknown
operators.operatorframework.io.metrics.mediatype.v1: metrics+v1
- operators.operatorframework.io.metrics.project_layout: go.kubebuilder.io/v3
+ operators.operatorframework.io.metrics.project_layout: go.kubebuilder.io/v4
# Annotations for testing.
operators.operatorframework.io.test.mediatype.v1: scorecard+v1
diff --git a/operator/bundle/community/bundle.Dockerfile b/operator/bundle/community/bundle.Dockerfile
index d83b40681db86..3be10ce151140 100644
--- a/operator/bundle/community/bundle.Dockerfile
+++ b/operator/bundle/community/bundle.Dockerfile
@@ -9,7 +9,7 @@ LABEL operators.operatorframework.io.bundle.channels.v1=alpha
LABEL operators.operatorframework.io.bundle.channel.default.v1=alpha
LABEL operators.operatorframework.io.metrics.builder=operator-sdk-unknown
LABEL operators.operatorframework.io.metrics.mediatype.v1=metrics+v1
-LABEL operators.operatorframework.io.metrics.project_layout=go.kubebuilder.io/v3
+LABEL operators.operatorframework.io.metrics.project_layout=go.kubebuilder.io/v4
# Labels for testing.
LABEL operators.operatorframework.io.test.mediatype.v1=scorecard+v1
diff --git a/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml
index e185519eee975..83b91072bb5cc 100644
--- a/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml
+++ b/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml
@@ -150,11 +150,11 @@ metadata:
categories: OpenShift Optional, Logging & Tracing
certified: "false"
containerImage: docker.io/grafana/loki-operator:0.6.2
- createdAt: "2024-10-14T10:09:30Z"
+ createdAt: "2024-10-23T18:05:43Z"
description: The Community Loki Operator provides Kubernetes native deployment
and management of Loki and related logging components.
operators.operatorframework.io/builder: operator-sdk-unknown
- operators.operatorframework.io/project_layout: go.kubebuilder.io/v3
+ operators.operatorframework.io/project_layout: go.kubebuilder.io/v4
repository: https://github.com/grafana/loki/tree/main/operator
support: Grafana Loki SIG Operator
labels:
@@ -178,8 +178,9 @@ spec:
- description: List of groups for alerting rules.
displayName: Groups
path: groups
- - description: Interval defines the time interval between evaluation of the
- given alerting rule.
+ - description: |-
+ Interval defines the time interval between evaluation of the given
+ alerting rule.
displayName: Evaluation Interval
path: groups[0].interval
- description: Limit defines the number of alerts an alerting rule can produce.
@@ -201,14 +202,15 @@ spec:
- description: Annotations to add to each alert.
displayName: Annotations
path: groups[0].rules[0].annotations
- - description: The LogQL expression to evaluate. Every evaluation cycle this
- is evaluated at the current time, and all resultant time series become pending/firing
- alerts.
+ - description: |-
+ The LogQL expression to evaluate. Every evaluation cycle this is
+ evaluated at the current time, and all resultant time series become
+ pending/firing alerts.
displayName: LogQL Expression
path: groups[0].rules[0].expr
- - description: Alerts are considered firing once they have been returned for
- this long. Alerts which have not yet fired for long enough are considered
- pending.
+ - description: |-
+ Alerts are considered firing once they have been returned for this long.
+ Alerts which have not yet fired for long enough are considered pending.
displayName: Firing Threshold
path: groups[0].rules[0].for
- description: Labels to add to each alert.
@@ -265,17 +267,21 @@ spec:
- description: MemberList configuration spec
displayName: Memberlist Config
path: hashRing.memberlist
- - description: "EnableIPv6 enables IPv6 support for the memberlist based hash
- ring. \n Currently this also forces the instanceAddrType to podIP to avoid
- local address lookup for the memberlist."
+ - description: |-
+ EnableIPv6 enables IPv6 support for the memberlist based hash ring.
+
+
+ Currently this also forces the instanceAddrType to podIP to avoid local address lookup
+ for the memberlist.
displayName: Enable IPv6
path: hashRing.memberlist.enableIPv6
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:booleanSwitch
- - description: InstanceAddrType defines the type of address to use to advertise
- to the ring. Defaults to the first address from any private network interfaces
- of the current pod. Alternatively the public pod IP can be used in case
- private networks (RFC 1918 and RFC 6598) are not available.
+ - description: |-
+ InstanceAddrType defines the type of address to use to advertise to the ring.
+ Defaults to the first address from any private network interfaces of the current pod.
+ Alternatively the public pod IP can be used in case private networks (RFC 1918 and RFC 6598)
+ are not available.
displayName: Instance Address
path: hashRing.memberlist.instanceAddrType
x-descriptors:
@@ -294,9 +300,10 @@ spec:
- description: Global defines the limits applied globally across the cluster.
displayName: Global Limits
path: limits.global
- - description: IngestionBurstSize defines the local rate-limited sample size
- per distributor replica. It should be set to the set at least to the maximum
- logs size expected in a single push request.
+ - description: |-
+ IngestionBurstSize defines the local rate-limited sample size per
+ distributor replica. It should be set to the set at least to the
+ maximum logs size expected in a single push request.
displayName: Ingestion Burst Size (in MB)
path: limits.global.ingestion.ingestionBurstSize
x-descriptors:
@@ -306,26 +313,30 @@ spec:
path: limits.global.ingestion.ingestionRate
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxGlobalStreamsPerTenant defines the maximum number of active
- streams per tenant, across the cluster.
+ - description: |-
+ MaxGlobalStreamsPerTenant defines the maximum number of active streams
+ per tenant, across the cluster.
displayName: Max Global Streams per Tenant
path: limits.global.ingestion.maxGlobalStreamsPerTenant
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxLabelNameLength defines the maximum number of characters allowed
+ - description: |-
+ MaxLabelNameLength defines the maximum number of characters allowed
for label keys in log streams.
displayName: Max Label Name Length
path: limits.global.ingestion.maxLabelNameLength
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxLabelNamesPerSeries defines the maximum number of label names
- per series in each log stream.
+ - description: |-
+ MaxLabelNamesPerSeries defines the maximum number of label names per series
+ in each log stream.
displayName: Max Labels Names per Series
path: limits.global.ingestion.maxLabelNamesPerSeries
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxLabelValueLength defines the maximum number of characters
- allowed for label values in log streams.
+ - description: |-
+ MaxLabelValueLength defines the maximum number of characters allowed
+ for label values in log streams.
displayName: Max Label Value Length
path: limits.global.ingestion.maxLabelValueLength
x-descriptors:
@@ -336,8 +347,9 @@ spec:
path: limits.global.ingestion.maxLineSize
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: PerStreamDesiredRate defines the desired ingestion rate per second
- that LokiStack should target applying automatic stream sharding. Units MB.
+ - description: |-
+ PerStreamDesiredRate defines the desired ingestion rate per second that LokiStack should
+ target applying automatic stream sharding. Units MB.
displayName: Per Stream Desired Rate (in MB)
path: limits.global.ingestion.perStreamDesiredRate
x-descriptors:
@@ -354,84 +366,84 @@ spec:
path: limits.global.ingestion.perStreamRateLimitBurst
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: IndexedResourceAttributes contains the global configuration for
- resource attributes to store them as index labels.
- displayName: Indexed Resource Attributes
- path: limits.global.otlp.indexedResourceAttributes
- - description: LogAttributes contains the configuration for log attributes to
- store them as structured metadata or drop them altogether.
+ - description: StreamLabels configures which resource attributes are converted
+ to Loki stream labels.
+ displayName: Stream Labels
+ path: limits.global.otlp.streamLabels
+ - description: ResourceAttributes lists the names of the resource attributes
+ that should be converted into Loki stream labels.
+ displayName: Resource Attributes
+ path: limits.global.otlp.streamLabels.resourceAttributes
+ - description: Name contains either a verbatim name of an attribute or a regular
+ expression matching many attributes.
+ displayName: Name
+ path: limits.global.otlp.streamLabels.resourceAttributes[0].name
+ - description: If Regex is true, then Name is treated as a regular expression
+ instead of as a verbatim attribute name.
+ displayName: Treat name as regular expression
+ path: limits.global.otlp.streamLabels.resourceAttributes[0].regex
+ - description: StructuredMetadata configures which attributes are saved in structured
+ metadata.
+ displayName: Structured Metadata
+ path: limits.global.otlp.structuredMetadata
+ - description: LogAttributes lists the names of log attributes that should be
+ included in structured metadata.
displayName: Log Attributes
- path: limits.global.otlp.logAttributes
- - description: Action defines the indexing action for the selected attributes.
- They can be either added to structured metadata or drop altogether.
- displayName: Action
- path: limits.global.otlp.logAttributes[0].action
- - description: Attributes allows choosing the attributes by listing their names.
- displayName: Attribute Names
- path: limits.global.otlp.logAttributes[0].attributes
- - description: Regex allows choosing the attributes by matching a regular expression.
- displayName: Regular Expression
- path: limits.global.otlp.logAttributes[0].regex
- - description: ResourceAttributes contains the configuration for resource attributes
- to store them as index labels or structured metadata or drop them altogether.
+ path: limits.global.otlp.structuredMetadata.logAttributes
+ - description: Name contains either a verbatim name of an attribute or a regular
+ expression matching many attributes.
+ displayName: Name
+ path: limits.global.otlp.structuredMetadata.logAttributes[0].name
+ - description: If Regex is true, then Name is treated as a regular expression
+ instead of as a verbatim attribute name.
+ displayName: Treat name as regular expression
+ path: limits.global.otlp.structuredMetadata.logAttributes[0].regex
+ - description: ResourceAttributes lists the names of resource attributes that
+ should be included in structured metadata.
displayName: Resource Attributes
- path: limits.global.otlp.resourceAttributes
- - description: Attributes contains the configuration for resource attributes
- to store them as index labels or structured metadata or drop them altogether.
- displayName: Attributes
- path: limits.global.otlp.resourceAttributes.attributes
- - description: Action defines the indexing action for the selected resoure attributes.
- They can be either indexed as labels, added to structured metadata or drop
- altogether.
- displayName: Action
- path: limits.global.otlp.resourceAttributes.attributes[0].action
- - description: Attributes is the list of attributes to configure indexing or
- drop them altogether.
- displayName: Attribute Names
- path: limits.global.otlp.resourceAttributes.attributes[0].attributes
- - description: Regex allows choosing the attributes by matching a regular expression.
- displayName: Regular Expression
- path: limits.global.otlp.resourceAttributes.attributes[0].regex
- - description: "IgnoreDefaults controls whether to ignore the global configuration
- for resource attributes indexed as labels. \n If IgnoreDefaults is true,
- then this spec needs to contain at least one mapping to a index label."
- displayName: Ignore Global Defaults
- path: limits.global.otlp.resourceAttributes.ignoreDefaults
- x-descriptors:
- - urn:alm:descriptor:com.tectonic.ui:booleanSwitch
- - description: ScopeAttributes contains the configuration for scope attributes
- to store them as structured metadata or drop them altogether.
+ path: limits.global.otlp.structuredMetadata.resourceAttributes
+ - description: Name contains either a verbatim name of an attribute or a regular
+ expression matching many attributes.
+ displayName: Name
+ path: limits.global.otlp.structuredMetadata.resourceAttributes[0].name
+ - description: If Regex is true, then Name is treated as a regular expression
+ instead of as a verbatim attribute name.
+ displayName: Treat name as regular expression
+ path: limits.global.otlp.structuredMetadata.resourceAttributes[0].regex
+ - description: ScopeAttributes lists the names of scope attributes that should
+ be included in structured metadata.
displayName: Scope Attributes
- path: limits.global.otlp.scopeAttributes
- - description: Action defines the indexing action for the selected attributes.
- They can be either added to structured metadata or drop altogether.
- displayName: Action
- path: limits.global.otlp.scopeAttributes[0].action
- - description: Attributes allows choosing the attributes by listing their names.
- displayName: Attribute Names
- path: limits.global.otlp.scopeAttributes[0].attributes
- - description: Regex allows choosing the attributes by matching a regular expression.
- displayName: Regular Expression
- path: limits.global.otlp.scopeAttributes[0].regex
+ path: limits.global.otlp.structuredMetadata.scopeAttributes
+ - description: Name contains either a verbatim name of an attribute or a regular
+ expression matching many attributes.
+ displayName: Name
+ path: limits.global.otlp.structuredMetadata.scopeAttributes[0].name
+ - description: If Regex is true, then Name is treated as a regular expression
+ instead of as a verbatim attribute name.
+ displayName: Treat name as regular expression
+ path: limits.global.otlp.structuredMetadata.scopeAttributes[0].regex
- description: CardinalityLimit defines the cardinality limit for index queries.
displayName: Cardinality Limit
path: limits.global.queries.cardinalityLimit
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxChunksPerQuery defines the maximum number of chunks that can
- be fetched by a single query.
+ - description: |-
+ MaxChunksPerQuery defines the maximum number of chunks
+ that can be fetched by a single query.
displayName: Max Chunk per Query
path: limits.global.queries.maxChunksPerQuery
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxEntriesLimitsPerQuery defines the maximum number of log entries
+ - description: |-
+ MaxEntriesLimitsPerQuery defines the maximum number of log entries
that will be returned for a query.
displayName: Max Entries Limit per Query
path: limits.global.queries.maxEntriesLimitPerQuery
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxQuerySeries defines the maximum of unique series that is returned
- by a metric query.
+ - description: |-
+ MaxQuerySeries defines the maximum of unique series
+ that is returned by a metric query.
displayName: Max Query Series
path: limits.global.queries.maxQuerySeries
x-descriptors:
@@ -449,9 +461,10 @@ spec:
- description: Tenants defines the limits applied per tenant.
displayName: Limits per Tenant
path: limits.tenants
- - description: IngestionBurstSize defines the local rate-limited sample size
- per distributor replica. It should be set to the set at least to the maximum
- logs size expected in a single push request.
+ - description: |-
+ IngestionBurstSize defines the local rate-limited sample size per
+ distributor replica. It should be set to the set at least to the
+ maximum logs size expected in a single push request.
displayName: Ingestion Burst Size (in MB)
path: limits.tenants.ingestion.ingestionBurstSize
x-descriptors:
@@ -461,26 +474,30 @@ spec:
path: limits.tenants.ingestion.ingestionRate
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxGlobalStreamsPerTenant defines the maximum number of active
- streams per tenant, across the cluster.
+ - description: |-
+ MaxGlobalStreamsPerTenant defines the maximum number of active streams
+ per tenant, across the cluster.
displayName: Max Global Streams per Tenant
path: limits.tenants.ingestion.maxGlobalStreamsPerTenant
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxLabelNameLength defines the maximum number of characters allowed
+ - description: |-
+ MaxLabelNameLength defines the maximum number of characters allowed
for label keys in log streams.
displayName: Max Label Name Length
path: limits.tenants.ingestion.maxLabelNameLength
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxLabelNamesPerSeries defines the maximum number of label names
- per series in each log stream.
+ - description: |-
+ MaxLabelNamesPerSeries defines the maximum number of label names per series
+ in each log stream.
displayName: Max Labels Names per Series
path: limits.tenants.ingestion.maxLabelNamesPerSeries
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxLabelValueLength defines the maximum number of characters
- allowed for label values in log streams.
+ - description: |-
+ MaxLabelValueLength defines the maximum number of characters allowed
+ for label values in log streams.
displayName: Max Label Value Length
path: limits.tenants.ingestion.maxLabelValueLength
x-descriptors:
@@ -491,8 +508,9 @@ spec:
path: limits.tenants.ingestion.maxLineSize
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: PerStreamDesiredRate defines the desired ingestion rate per second
- that LokiStack should target applying automatic stream sharding. Units MB.
+ - description: |-
+ PerStreamDesiredRate defines the desired ingestion rate per second that LokiStack should
+ target applying automatic stream sharding. Units MB.
displayName: Per Stream Desired Rate (in MB)
path: limits.tenants.ingestion.perStreamDesiredRate
x-descriptors:
@@ -509,61 +527,62 @@ spec:
path: limits.tenants.ingestion.perStreamRateLimitBurst
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: LogAttributes contains the configuration for log attributes to
- store them as structured metadata or drop them altogether.
+ - description: StreamLabels configures which resource attributes are converted
+ to Loki stream labels.
+ displayName: Stream Labels
+ path: limits.tenants.otlp.streamLabels
+ - description: ResourceAttributes lists the names of the resource attributes
+ that should be converted into Loki stream labels.
+ displayName: Resource Attributes
+ path: limits.tenants.otlp.streamLabels.resourceAttributes
+ - description: Name contains either a verbatim name of an attribute or a regular
+ expression matching many attributes.
+ displayName: Name
+ path: limits.tenants.otlp.streamLabels.resourceAttributes[0].name
+ - description: If Regex is true, then Name is treated as a regular expression
+ instead of as a verbatim attribute name.
+ displayName: Treat name as regular expression
+ path: limits.tenants.otlp.streamLabels.resourceAttributes[0].regex
+ - description: StructuredMetadata configures which attributes are saved in structured
+ metadata.
+ displayName: Structured Metadata
+ path: limits.tenants.otlp.structuredMetadata
+ - description: LogAttributes lists the names of log attributes that should be
+ included in structured metadata.
displayName: Log Attributes
- path: limits.tenants.otlp.logAttributes
- - description: Action defines the indexing action for the selected attributes.
- They can be either added to structured metadata or drop altogether.
- displayName: Action
- path: limits.tenants.otlp.logAttributes[0].action
- - description: Attributes allows choosing the attributes by listing their names.
- displayName: Attribute Names
- path: limits.tenants.otlp.logAttributes[0].attributes
- - description: Regex allows choosing the attributes by matching a regular expression.
- displayName: Regular Expression
- path: limits.tenants.otlp.logAttributes[0].regex
- - description: ResourceAttributes contains the configuration for resource attributes
- to store them as index labels or structured metadata or drop them altogether.
+ path: limits.tenants.otlp.structuredMetadata.logAttributes
+ - description: Name contains either a verbatim name of an attribute or a regular
+ expression matching many attributes.
+ displayName: Name
+ path: limits.tenants.otlp.structuredMetadata.logAttributes[0].name
+ - description: If Regex is true, then Name is treated as a regular expression
+ instead of as a verbatim attribute name.
+ displayName: Treat name as regular expression
+ path: limits.tenants.otlp.structuredMetadata.logAttributes[0].regex
+ - description: ResourceAttributes lists the names of resource attributes that
+ should be included in structured metadata.
displayName: Resource Attributes
- path: limits.tenants.otlp.resourceAttributes
- - description: Attributes contains the configuration for resource attributes
- to store them as index labels or structured metadata or drop them altogether.
- displayName: Attributes
- path: limits.tenants.otlp.resourceAttributes.attributes
- - description: Action defines the indexing action for the selected resoure attributes.
- They can be either indexed as labels, added to structured metadata or drop
- altogether.
- displayName: Action
- path: limits.tenants.otlp.resourceAttributes.attributes[0].action
- - description: Attributes is the list of attributes to configure indexing or
- drop them altogether.
- displayName: Attribute Names
- path: limits.tenants.otlp.resourceAttributes.attributes[0].attributes
- - description: Regex allows choosing the attributes by matching a regular expression.
- displayName: Regular Expression
- path: limits.tenants.otlp.resourceAttributes.attributes[0].regex
- - description: "IgnoreDefaults controls whether to ignore the global configuration
- for resource attributes indexed as labels. \n If IgnoreDefaults is true,
- then this spec needs to contain at least one mapping to a index label."
- displayName: Ignore Global Defaults
- path: limits.tenants.otlp.resourceAttributes.ignoreDefaults
- x-descriptors:
- - urn:alm:descriptor:com.tectonic.ui:booleanSwitch
- - description: ScopeAttributes contains the configuration for scope attributes
- to store them as structured metadata or drop them altogether.
+ path: limits.tenants.otlp.structuredMetadata.resourceAttributes
+ - description: Name contains either a verbatim name of an attribute or a regular
+ expression matching many attributes.
+ displayName: Name
+ path: limits.tenants.otlp.structuredMetadata.resourceAttributes[0].name
+ - description: If Regex is true, then Name is treated as a regular expression
+ instead of as a verbatim attribute name.
+ displayName: Treat name as regular expression
+ path: limits.tenants.otlp.structuredMetadata.resourceAttributes[0].regex
+ - description: ScopeAttributes lists the names of scope attributes that should
+ be included in structured metadata.
displayName: Scope Attributes
- path: limits.tenants.otlp.scopeAttributes
- - description: Action defines the indexing action for the selected attributes.
- They can be either added to structured metadata or drop altogether.
- displayName: Action
- path: limits.tenants.otlp.scopeAttributes[0].action
- - description: Attributes allows choosing the attributes by listing their names.
- displayName: Attribute Names
- path: limits.tenants.otlp.scopeAttributes[0].attributes
- - description: Regex allows choosing the attributes by matching a regular expression.
- displayName: Regular Expression
- path: limits.tenants.otlp.scopeAttributes[0].regex
+ path: limits.tenants.otlp.structuredMetadata.scopeAttributes
+ - description: Name contains either a verbatim name of an attribute or a regular
+ expression matching many attributes.
+ displayName: Name
+ path: limits.tenants.otlp.structuredMetadata.scopeAttributes[0].name
+ - description: If Regex is true, then Name is treated as a regular expression
+ instead of as a verbatim attribute name.
+ displayName: Treat name as regular expression
+ path: limits.tenants.otlp.structuredMetadata.scopeAttributes[0].regex
- description: Blocked defines the list of rules to block matching queries.
displayName: Blocked
path: limits.tenants.queries.blocked
@@ -590,20 +609,23 @@ spec:
path: limits.tenants.queries.cardinalityLimit
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxChunksPerQuery defines the maximum number of chunks that can
- be fetched by a single query.
+ - description: |-
+ MaxChunksPerQuery defines the maximum number of chunks
+ that can be fetched by a single query.
displayName: Max Chunk per Query
path: limits.tenants.queries.maxChunksPerQuery
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxEntriesLimitsPerQuery defines the maximum number of log entries
+ - description: |-
+ MaxEntriesLimitsPerQuery defines the maximum number of log entries
that will be returned for a query.
displayName: Max Entries Limit per Query
path: limits.tenants.queries.maxEntriesLimitPerQuery
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxQuerySeries defines the maximum of unique series that is returned
- by a metric query.
+ - description: |-
+ MaxQuerySeries defines the maximum of unique series
+ that is returned by a metric query.
displayName: Max Query Series
path: limits.tenants.queries.maxQuerySeries
x-descriptors:
@@ -618,8 +640,9 @@ spec:
of a query request.
displayName: Query Timeout
path: limits.tenants.queries.queryTimeout
- - description: ManagementState defines if the CR should be managed by the operator
- or not. Default is managed.
+ - description: |-
+ ManagementState defines if the CR should be managed by the operator or not.
+ Default is managed.
displayName: Management State
path: managementState
x-descriptors:
@@ -646,9 +669,9 @@ spec:
path: replication.factor
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: 'Zones defines an array of ZoneSpec that the scheduler will try
- to satisfy. IMPORTANT: Make sure that the replication factor defined is
- less than or equal to the number of available zones.'
+ - description: |-
+ Zones defines an array of ZoneSpec that the scheduler will try to satisfy.
+ IMPORTANT: Make sure that the replication factor defined is less than or equal to the number of available zones.
displayName: Zones Spec
path: replication.zones
- description: MaxSkew describes the maximum degree to which Pods can be unevenly
@@ -661,9 +684,9 @@ spec:
labels.
displayName: Topology Key
path: replication.zones[0].topologyKey
- - description: 'Deprecated: Please use replication.factor instead. This field
- will be removed in future versions of this CRD. ReplicationFactor defines
- the policy for log stream replication.'
+ - description: |-
+ Deprecated: Please use replication.factor instead. This field will be removed in future versions of this CRD.
+ ReplicationFactor defines the policy for log stream replication.
displayName: Replication Factor
path: replicationFactor
x-descriptors:
@@ -678,11 +701,13 @@ spec:
path: rules.enabled
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:booleanSwitch
- - description: Namespaces to be selected for PrometheusRules discovery. If unspecified,
- only the same namespace as the LokiStack object is in is used.
+ - description: |-
+ Namespaces to be selected for PrometheusRules discovery. If unspecified, only
+ the same namespace as the LokiStack object is in is used.
displayName: Namespace Selector
path: rules.namespaceSelector
- - description: A selector to select which LokiRules to mount for loading alerting/recording
+ - description: |-
+ A selector to select which LokiRules to mount for loading alerting/recording
rules from.
displayName: Selector
path: rules.selector
@@ -722,13 +747,15 @@ spec:
- description: TLS configuration for reaching the object storage endpoint.
displayName: TLS Config
path: storage.tls
- - description: Key is the data key of a ConfigMap containing a CA certificate.
- It needs to be in the same namespace as the LokiStack custom resource. If
- empty, it defaults to "service-ca.crt".
+ - description: |-
+ Key is the data key of a ConfigMap containing a CA certificate.
+ It needs to be in the same namespace as the LokiStack custom resource.
+ If empty, it defaults to "service-ca.crt".
displayName: CA ConfigMap Key
path: storage.tls.caKey
- - description: CA is the name of a ConfigMap containing a CA certificate. It
- needs to be in the same namespace as the LokiStack custom resource.
+ - description: |-
+ CA is the name of a ConfigMap containing a CA certificate.
+ It needs to be in the same namespace as the LokiStack custom resource.
displayName: CA ConfigMap Name
path: storage.tls.caName
x-descriptors:
@@ -748,8 +775,9 @@ spec:
- description: Compactor defines the compaction component spec.
displayName: Compactor pods
path: template.compactor
- - description: PodAntiAffinity defines the pod anti affinity scheduling rules
- to schedule pods of a component.
+ - description: |-
+ PodAntiAffinity defines the pod anti affinity scheduling rules to schedule pods
+ of a component.
displayName: PodAntiAffinity
path: template.compactor.podAntiAffinity
x-descriptors:
@@ -762,8 +790,9 @@ spec:
- description: Distributor defines the distributor component spec.
displayName: Distributor pods
path: template.distributor
- - description: PodAntiAffinity defines the pod anti affinity scheduling rules
- to schedule pods of a component.
+ - description: |-
+ PodAntiAffinity defines the pod anti affinity scheduling rules to schedule pods
+ of a component.
displayName: PodAntiAffinity
path: template.distributor.podAntiAffinity
x-descriptors:
@@ -776,8 +805,9 @@ spec:
- description: Gateway defines the lokistack gateway component spec.
displayName: Gateway pods
path: template.gateway
- - description: PodAntiAffinity defines the pod anti affinity scheduling rules
- to schedule pods of a component.
+ - description: |-
+ PodAntiAffinity defines the pod anti affinity scheduling rules to schedule pods
+ of a component.
displayName: PodAntiAffinity
path: template.gateway.podAntiAffinity
x-descriptors:
@@ -790,8 +820,9 @@ spec:
- description: IndexGateway defines the index gateway component spec.
displayName: Index Gateway pods
path: template.indexGateway
- - description: PodAntiAffinity defines the pod anti affinity scheduling rules
- to schedule pods of a component.
+ - description: |-
+ PodAntiAffinity defines the pod anti affinity scheduling rules to schedule pods
+ of a component.
displayName: PodAntiAffinity
path: template.indexGateway.podAntiAffinity
x-descriptors:
@@ -804,8 +835,9 @@ spec:
- description: Ingester defines the ingester component spec.
displayName: Ingester pods
path: template.ingester
- - description: PodAntiAffinity defines the pod anti affinity scheduling rules
- to schedule pods of a component.
+ - description: |-
+ PodAntiAffinity defines the pod anti affinity scheduling rules to schedule pods
+ of a component.
displayName: PodAntiAffinity
path: template.ingester.podAntiAffinity
x-descriptors:
@@ -818,8 +850,9 @@ spec:
- description: Querier defines the querier component spec.
displayName: Querier pods
path: template.querier
- - description: PodAntiAffinity defines the pod anti affinity scheduling rules
- to schedule pods of a component.
+ - description: |-
+ PodAntiAffinity defines the pod anti affinity scheduling rules to schedule pods
+ of a component.
displayName: PodAntiAffinity
path: template.querier.podAntiAffinity
x-descriptors:
@@ -832,8 +865,9 @@ spec:
- description: QueryFrontend defines the query frontend component spec.
displayName: Query Frontend pods
path: template.queryFrontend
- - description: PodAntiAffinity defines the pod anti affinity scheduling rules
- to schedule pods of a component.
+ - description: |-
+ PodAntiAffinity defines the pod anti affinity scheduling rules to schedule pods
+ of a component.
displayName: PodAntiAffinity
path: template.queryFrontend.podAntiAffinity
x-descriptors:
@@ -846,8 +880,9 @@ spec:
- description: Ruler defines the ruler component spec.
displayName: Ruler pods
path: template.ruler
- - description: PodAntiAffinity defines the pod anti affinity scheduling rules
- to schedule pods of a component.
+ - description: |-
+ PodAntiAffinity defines the pod anti affinity scheduling rules to schedule pods
+ of a component.
displayName: PodAntiAffinity
path: template.ruler.podAntiAffinity
x-descriptors:
@@ -871,13 +906,15 @@ spec:
- description: CA defines the spec for the custom CA for tenant's authentication.
displayName: CA ConfigMap
path: tenants.authentication[0].mTLS.ca
- - description: Key is the data key of a ConfigMap containing a CA certificate.
- It needs to be in the same namespace as the LokiStack custom resource. If
- empty, it defaults to "service-ca.crt".
+ - description: |-
+ Key is the data key of a ConfigMap containing a CA certificate.
+ It needs to be in the same namespace as the LokiStack custom resource.
+ If empty, it defaults to "service-ca.crt".
displayName: CA ConfigMap Key
path: tenants.authentication[0].mTLS.ca.caKey
- - description: CA is the name of a ConfigMap containing a CA certificate. It
- needs to be in the same namespace as the LokiStack custom resource.
+ - description: |-
+ CA is the name of a ConfigMap containing a CA certificate.
+ It needs to be in the same namespace as the LokiStack custom resource.
displayName: CA ConfigMap Name
path: tenants.authentication[0].mTLS.ca.caName
x-descriptors:
@@ -888,13 +925,15 @@ spec:
- description: IssuerCA defines the spec for the issuer CA for tenant's authentication.
displayName: IssuerCA ConfigMap
path: tenants.authentication[0].oidc.issuerCA
- - description: Key is the data key of a ConfigMap containing a CA certificate.
- It needs to be in the same namespace as the LokiStack custom resource. If
- empty, it defaults to "service-ca.crt".
+ - description: |-
+ Key is the data key of a ConfigMap containing a CA certificate.
+ It needs to be in the same namespace as the LokiStack custom resource.
+ If empty, it defaults to "service-ca.crt".
displayName: CA ConfigMap Key
path: tenants.authentication[0].oidc.issuerCA.caKey
- - description: CA is the name of a ConfigMap containing a CA certificate. It
- needs to be in the same namespace as the LokiStack custom resource.
+ - description: |-
+ CA is the name of a ConfigMap containing a CA certificate.
+ It needs to be in the same namespace as the LokiStack custom resource.
displayName: CA ConfigMap Name
path: tenants.authentication[0].oidc.issuerCA.caName
x-descriptors:
@@ -950,12 +989,39 @@ spec:
- description: Openshift defines the configuration specific to Openshift modes.
displayName: Openshift
path: tenants.openshift
- - description: "AdminGroups defines a list of groups, whose members are considered
- to have admin-privileges by the Loki Operator. Setting this to an empty
- array disables admin groups. \n By default the following groups are considered
- admin-groups: - system:cluster-admins - cluster-admin - dedicated-admin"
+ - description: |-
+ AdminGroups defines a list of groups, whose members are considered to have admin-privileges by the Loki Operator.
+ Setting this to an empty array disables admin groups.
+
+
+ By default the following groups are considered admin-groups:
+ - system:cluster-admins
+ - cluster-admin
+ - dedicated-admin
displayName: Admin Groups
path: tenants.openshift.adminGroups
+ - description: OTLP contains settings for ingesting data using OTLP in the OpenShift
+ tenancy mode.
+ displayName: OpenTelemetry Protocol
+ path: tenants.openshift.otlp
+ - description: |-
+ DisableRecommendedAttributes can be used to reduce the number of attributes used for stream labels and structured
+ metadata.
+
+
+ Enabling this setting removes the "recommended attributes" from the generated Loki configuration. This will cause
+ meta information to not be available as stream labels or structured metadata, potentially making queries more
+ expensive and less performant.
+
+
+ Note that there is a set of "required attributes", needed for OpenShift Logging to work properly. Those will be
+ added to the configuration, even if this field is set to true.
+
+
+ This option is supposed to be combined with a custom label configuration customizing the labels for the specific
+ usecase.
+ displayName: Disable recommended OTLP attributes
+ path: tenants.openshift.otlp.disableRecommendedAttributes
statusDescriptors:
- description: Distributor is a map to the per pod status of the distributor
deployment
@@ -1019,8 +1085,9 @@ spec:
- description: List of groups for recording rules.
displayName: Groups
path: groups
- - description: Interval defines the time interval between evaluation of the
- given recoding rule.
+ - description: |-
+ Interval defines the time interval between evaluation of the given
+ recoding rule.
displayName: Evaluation Interval
path: groups[0].interval
- description: Limit defines the number of series a recording rule can produce.
@@ -1036,9 +1103,10 @@ spec:
- description: Rules defines a list of recording rules
displayName: Rules
path: groups[0].rules
- - description: The LogQL expression to evaluate. Every evaluation cycle this
- is evaluated at the current time, and all resultant time series become pending/firing
- alerts.
+ - description: |-
+ The LogQL expression to evaluate. Every evaluation cycle this is
+ evaluated at the current time, and all resultant time series become
+ pending/firing alerts.
displayName: LogQL Expression
path: groups[0].rules[0].expr
- description: Labels to add to each recording rule.
@@ -1141,9 +1209,10 @@ spec:
path: alertmanager.enableV2
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:booleanSwitch
- - description: List of AlertManager URLs to send notifications to. Each Alertmanager
- URL is treated as a separate group in the configuration. Multiple Alertmanagers
- in HA per group can be supported by using DNS resolution (See EnableDNSDiscovery).
+ - description: |-
+ List of AlertManager URLs to send notifications to. Each Alertmanager URL is treated as
+ a separate group in the configuration. Multiple Alertmanagers in HA per group can be
+ supported by using DNS resolution (See EnableDNSDiscovery).
displayName: AlertManager Endpoints
path: alertmanager.endpoints
- description: Additional labels to add to all alerts.
@@ -1163,9 +1232,9 @@ spec:
path: alertmanager.notificationQueue.capacity
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: Minimum duration between alert and restored "for" state. This
- is maintained only for alerts with configured "for" time greater than the
- grace period.
+ - description: |-
+ Minimum duration between alert and restored "for" state. This is maintained
+ only for alerts with configured "for" time greater than the grace period.
displayName: Firing Grace Period
path: alertmanager.notificationQueue.forGracePeriod
- description: Max time to tolerate outage for restoring "for" state of alert.
@@ -1198,21 +1267,23 @@ spec:
Default is '(.*)'
displayName: Regex
path: alertmanager.relabelConfigs[0].regex
- - description: Replacement value against which a regex replace is performed
- if the regular expression matches. Regex capture groups are available. Default
- is '$1'
+ - description: |-
+ Replacement value against which a regex replace is performed if the
+ regular expression matches. Regex capture groups are available. Default is '$1'
displayName: Replacement
path: alertmanager.relabelConfigs[0].replacement
- description: Separator placed between concatenated source label values. default
is ';'.
displayName: Separator
path: alertmanager.relabelConfigs[0].separator
- - description: The source labels select values from existing labels. Their content
- is concatenated using the configured separator and matched against the configured
- regular expression for the replace, keep, and drop actions.
+ - description: |-
+ The source labels select values from existing labels. Their content is concatenated
+ using the configured separator and matched against the configured regular expression
+ for the replace, keep, and drop actions.
displayName: Source Labels
path: alertmanager.relabelConfigs[0].sourceLabels
- - description: Label to which the resulting value is written in a replace action.
+ - description: |-
+ Label to which the resulting value is written in a replace action.
It is mandatory for replace actions. Regex capture groups are available.
displayName: Target Label
path: alertmanager.relabelConfigs[0].targetLabel
@@ -1293,9 +1364,10 @@ spec:
path: overrides.alertmanager.enableV2
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:booleanSwitch
- - description: List of AlertManager URLs to send notifications to. Each Alertmanager
- URL is treated as a separate group in the configuration. Multiple Alertmanagers
- in HA per group can be supported by using DNS resolution (See EnableDNSDiscovery).
+ - description: |-
+ List of AlertManager URLs to send notifications to. Each Alertmanager URL is treated as
+ a separate group in the configuration. Multiple Alertmanagers in HA per group can be
+ supported by using DNS resolution (See EnableDNSDiscovery).
displayName: AlertManager Endpoints
path: overrides.alertmanager.endpoints
- description: Additional labels to add to all alerts.
@@ -1315,9 +1387,9 @@ spec:
path: overrides.alertmanager.notificationQueue.capacity
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: Minimum duration between alert and restored "for" state. This
- is maintained only for alerts with configured "for" time greater than the
- grace period.
+ - description: |-
+ Minimum duration between alert and restored "for" state. This is maintained
+ only for alerts with configured "for" time greater than the grace period.
displayName: Firing Grace Period
path: overrides.alertmanager.notificationQueue.forGracePeriod
- description: Max time to tolerate outage for restoring "for" state of alert.
@@ -1350,21 +1422,23 @@ spec:
Default is '(.*)'
displayName: Regex
path: overrides.alertmanager.relabelConfigs[0].regex
- - description: Replacement value against which a regex replace is performed
- if the regular expression matches. Regex capture groups are available. Default
- is '$1'
+ - description: |-
+ Replacement value against which a regex replace is performed if the
+ regular expression matches. Regex capture groups are available. Default is '$1'
displayName: Replacement
path: overrides.alertmanager.relabelConfigs[0].replacement
- description: Separator placed between concatenated source label values. default
is ';'.
displayName: Separator
path: overrides.alertmanager.relabelConfigs[0].separator
- - description: The source labels select values from existing labels. Their content
- is concatenated using the configured separator and matched against the configured
- regular expression for the replace, keep, and drop actions.
+ - description: |-
+ The source labels select values from existing labels. Their content is concatenated
+ using the configured separator and matched against the configured regular expression
+ for the replace, keep, and drop actions.
displayName: Source Labels
path: overrides.alertmanager.relabelConfigs[0].sourceLabels
- - description: Label to which the resulting value is written in a replace action.
+ - description: |-
+ Label to which the resulting value is written in a replace action.
It is mandatory for replace actions. Regex capture groups are available.
displayName: Target Label
path: overrides.alertmanager.relabelConfigs[0].targetLabel
@@ -1426,21 +1500,23 @@ spec:
Default is '(.*)'
displayName: Regex
path: remoteWrite.client.relabelConfigs[0].regex
- - description: Replacement value against which a regex replace is performed
- if the regular expression matches. Regex capture groups are available. Default
- is '$1'
+ - description: |-
+ Replacement value against which a regex replace is performed if the
+ regular expression matches. Regex capture groups are available. Default is '$1'
displayName: Replacement
path: remoteWrite.client.relabelConfigs[0].replacement
- description: Separator placed between concatenated source label values. default
is ';'.
displayName: Separator
path: remoteWrite.client.relabelConfigs[0].separator
- - description: The source labels select values from existing labels. Their content
- is concatenated using the configured separator and matched against the configured
- regular expression for the replace, keep, and drop actions.
+ - description: |-
+ The source labels select values from existing labels. Their content is concatenated
+ using the configured separator and matched against the configured regular expression
+ for the replace, keep, and drop actions.
displayName: Source Labels
path: remoteWrite.client.relabelConfigs[0].sourceLabels
- - description: Label to which the resulting value is written in a replace action.
+ - description: |-
+ Label to which the resulting value is written in a replace action.
It is mandatory for replace actions. Regex capture groups are available.
displayName: Target Label
path: remoteWrite.client.relabelConfigs[0].targetLabel
@@ -1757,7 +1833,7 @@ spec:
- /manager
env:
- name: RELATED_IMAGE_LOKI
- value: docker.io/grafana/loki:3.2.0
+ value: docker.io/grafana/loki:3.2.1
- name: RELATED_IMAGE_GATEWAY
value: quay.io/observatorium/api:latest
- name: RELATED_IMAGE_OPA
@@ -1869,7 +1945,7 @@ spec:
provider:
name: Grafana Loki SIG Operator
relatedImages:
- - image: docker.io/grafana/loki:3.2.0
+ - image: docker.io/grafana/loki:3.2.1
name: loki
- image: quay.io/observatorium/api:latest
name: gateway
diff --git a/operator/bundle/community/manifests/loki.grafana.com_alertingrules.yaml b/operator/bundle/community/manifests/loki.grafana.com_alertingrules.yaml
index b21ccf6f5e586..1ebd338f050b4 100644
--- a/operator/bundle/community/manifests/loki.grafana.com_alertingrules.yaml
+++ b/operator/bundle/community/manifests/loki.grafana.com_alertingrules.yaml
@@ -2,6 +2,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
+ cert-manager.io/inject-ca-from: loki-operator/loki-operator-serving-cert
controller-gen.kubebuilder.io/version: v0.16.3
creationTimestamp: null
labels:
diff --git a/operator/bundle/community/manifests/loki.grafana.com_lokistacks.yaml b/operator/bundle/community/manifests/loki.grafana.com_lokistacks.yaml
index 32a8561f742e4..0f5fdb2313635 100644
--- a/operator/bundle/community/manifests/loki.grafana.com_lokistacks.yaml
+++ b/operator/bundle/community/manifests/loki.grafana.com_lokistacks.yaml
@@ -2,6 +2,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
+ cert-manager.io/inject-ca-from: loki-operator/loki-operator-serving-cert
controller-gen.kubebuilder.io/version: v0.16.3
creationTimestamp: null
labels:
@@ -165,125 +166,101 @@ spec:
type: object
otlp:
description: |-
- OTLP to configure which resource, scope and log attributes
- to store as labels or structured metadata or drop them altogether
- for all tenants.
+ OTLP to configure which resource, scope and log attributes are stored as stream labels or structured metadata.
+
+ Tenancy modes can provide a default OTLP configuration, when no custom OTLP configuration is set or even
+ enforce the use of some required attributes.
properties:
- indexedResourceAttributes:
- description: |-
- IndexedResourceAttributes contains the global configuration for resource attributes
- to store them as index labels.
- items:
- type: string
- type: array
- logAttributes:
- description: |-
- LogAttributes contains the configuration for log attributes
- to store them as structured metadata or drop them altogether.
- items:
- description: |-
- OTLPAttributesSpec contains the configuration for a set of attributes
- to store them as index labels or structured metadata or drop them altogether.
- properties:
- action:
- description: |-
- Action defines the indexing action for the selected attributes. They
- can be either added to structured metadata or drop altogether.
- enum:
- - structured_metadata
- - drop
- type: string
- attributes:
- description: Attributes allows choosing the attributes
- by listing their names.
- items:
- type: string
- type: array
- regex:
- description: Regex allows choosing the attributes
- by matching a regular expression.
- type: string
- required:
- - action
- type: object
- type: array
- resourceAttributes:
- description: |-
- ResourceAttributes contains the configuration for resource attributes
- to store them as index labels or structured metadata or drop them altogether.
+ streamLabels:
+ description: StreamLabels configures which resource attributes
+ are converted to Loki stream labels.
properties:
- attributes:
- description: |-
- Attributes contains the configuration for resource attributes
- to store them as index labels or structured metadata or drop them altogether.
+ resourceAttributes:
+ description: ResourceAttributes lists the names of
+ the resource attributes that should be converted
+ into Loki stream labels.
items:
- description: |-
- OTLPResourceAttributesConfigSpec contains the configuration for a set of resource attributes
- to store them as index labels or structured metadata or drop them altogether.
properties:
- action:
- description: |-
- Action defines the indexing action for the selected resoure attributes. They
- can be either indexed as labels, added to structured metadata or drop altogether.
- enum:
- - index_label
- - structured_metadata
- - drop
+ name:
+ description: Name contains either a verbatim
+ name of an attribute or a regular expression
+ matching many attributes.
type: string
- attributes:
- description: |-
- Attributes is the list of attributes to configure indexing or drop them
- altogether.
- items:
- type: string
- type: array
regex:
- description: Regex allows choosing the attributes
- by matching a regular expression.
+ description: If Regex is true, then Name is
+ treated as a regular expression instead of
+ as a verbatim attribute name.
+ type: boolean
+ required:
+ - name
+ type: object
+ type: array
+ type: object
+ structuredMetadata:
+ description: StructuredMetadata configures which attributes
+ are saved in structured metadata.
+ properties:
+ logAttributes:
+ description: LogAttributes lists the names of log
+ attributes that should be included in structured
+ metadata.
+ items:
+ properties:
+ name:
+ description: Name contains either a verbatim
+ name of an attribute or a regular expression
+ matching many attributes.
type: string
+ regex:
+ description: If Regex is true, then Name is
+ treated as a regular expression instead of
+ as a verbatim attribute name.
+ type: boolean
required:
- - action
+ - name
+ type: object
+ type: array
+ resourceAttributes:
+ description: ResourceAttributes lists the names of
+ resource attributes that should be included in structured
+ metadata.
+ items:
+ properties:
+ name:
+ description: Name contains either a verbatim
+ name of an attribute or a regular expression
+ matching many attributes.
+ type: string
+ regex:
+ description: If Regex is true, then Name is
+ treated as a regular expression instead of
+ as a verbatim attribute name.
+ type: boolean
+ required:
+ - name
+ type: object
+ type: array
+ scopeAttributes:
+ description: ScopeAttributes lists the names of scope
+ attributes that should be included in structured
+ metadata.
+ items:
+ properties:
+ name:
+ description: Name contains either a verbatim
+ name of an attribute or a regular expression
+ matching many attributes.
+ type: string
+ regex:
+ description: If Regex is true, then Name is
+ treated as a regular expression instead of
+ as a verbatim attribute name.
+ type: boolean
+ required:
+ - name
type: object
type: array
- ignoreDefaults:
- description: |-
- IgnoreDefaults controls whether to ignore the global configuration for resource attributes
- indexed as labels.
-
- If IgnoreDefaults is true, then this spec needs to contain at least one mapping to a index label.
- type: boolean
type: object
- scopeAttributes:
- description: |-
- ScopeAttributes contains the configuration for scope attributes
- to store them as structured metadata or drop them altogether.
- items:
- description: |-
- OTLPAttributesSpec contains the configuration for a set of attributes
- to store them as index labels or structured metadata or drop them altogether.
- properties:
- action:
- description: |-
- Action defines the indexing action for the selected attributes. They
- can be either added to structured metadata or drop altogether.
- enum:
- - structured_metadata
- - drop
- type: string
- attributes:
- description: Attributes allows choosing the attributes
- by listing their names.
- items:
- type: string
- type: array
- regex:
- description: Regex allows choosing the attributes
- by matching a regular expression.
- type: string
- required:
- - action
- type: object
- type: array
type: object
queries:
description: QueryLimits defines the limit applied on querying
@@ -363,7 +340,7 @@ spec:
type: object
tenants:
additionalProperties:
- description: LimitsTemplateSpec defines the limits applied
+ description: PerTenantLimitsTemplateSpec defines the limits applied
at ingestion or query path.
properties:
ingestion:
@@ -430,118 +407,103 @@ spec:
type: object
otlp:
description: |-
- OTLP to configure which resource, scope and log attributes
- to store as labels or structured metadata or drop them altogether
- for a single tenants.
+ OTLP to configure which resource, scope and log attributes are stored as stream labels or structured metadata.
+
+ Tenancy modes can provide a default OTLP configuration, when no custom OTLP configuration is set or even
+ enforce the use of some required attributes.
+
+ The per-tenant configuration for OTLP attributes will be merged with the global configuration.
properties:
- logAttributes:
- description: |-
- LogAttributes contains the configuration for log attributes
- to store them as structured metadata or drop them altogether.
- items:
- description: |-
- OTLPAttributesSpec contains the configuration for a set of attributes
- to store them as index labels or structured metadata or drop them altogether.
- properties:
- action:
- description: |-
- Action defines the indexing action for the selected attributes. They
- can be either added to structured metadata or drop altogether.
- enum:
- - structured_metadata
- - drop
- type: string
- attributes:
- description: Attributes allows choosing the attributes
- by listing their names.
- items:
- type: string
- type: array
- regex:
- description: Regex allows choosing the attributes
- by matching a regular expression.
- type: string
- required:
- - action
- type: object
- type: array
- resourceAttributes:
- description: |-
- ResourceAttributes contains the configuration for resource attributes
- to store them as index labels or structured metadata or drop them altogether.
+ streamLabels:
+ description: StreamLabels configures which resource
+ attributes are converted to Loki stream labels.
properties:
- attributes:
- description: |-
- Attributes contains the configuration for resource attributes
- to store them as index labels or structured metadata or drop them altogether.
+ resourceAttributes:
+ description: ResourceAttributes lists the names
+ of the resource attributes that should be converted
+ into Loki stream labels.
items:
- description: |-
- OTLPResourceAttributesConfigSpec contains the configuration for a set of resource attributes
- to store them as index labels or structured metadata or drop them altogether.
properties:
- action:
- description: |-
- Action defines the indexing action for the selected resoure attributes. They
- can be either indexed as labels, added to structured metadata or drop altogether.
- enum:
- - index_label
- - structured_metadata
- - drop
+ name:
+ description: Name contains either a verbatim
+ name of an attribute or a regular expression
+ matching many attributes.
type: string
- attributes:
- description: |-
- Attributes is the list of attributes to configure indexing or drop them
- altogether.
- items:
- type: string
- type: array
regex:
- description: Regex allows choosing the attributes
- by matching a regular expression.
+ description: If Regex is true, then Name is
+ treated as a regular expression instead
+ of as a verbatim attribute name.
+ type: boolean
+ required:
+ - name
+ type: object
+ type: array
+ type: object
+ structuredMetadata:
+ description: StructuredMetadata configures which attributes
+ are saved in structured metadata.
+ properties:
+ logAttributes:
+ description: LogAttributes lists the names of log
+ attributes that should be included in structured
+ metadata.
+ items:
+ properties:
+ name:
+ description: Name contains either a verbatim
+ name of an attribute or a regular expression
+ matching many attributes.
type: string
+ regex:
+ description: If Regex is true, then Name is
+ treated as a regular expression instead
+ of as a verbatim attribute name.
+ type: boolean
required:
- - action
+ - name
+ type: object
+ type: array
+ resourceAttributes:
+ description: ResourceAttributes lists the names
+ of resource attributes that should be included
+ in structured metadata.
+ items:
+ properties:
+ name:
+ description: Name contains either a verbatim
+ name of an attribute or a regular expression
+ matching many attributes.
+ type: string
+ regex:
+ description: If Regex is true, then Name is
+ treated as a regular expression instead
+ of as a verbatim attribute name.
+ type: boolean
+ required:
+ - name
+ type: object
+ type: array
+ scopeAttributes:
+ description: ScopeAttributes lists the names of
+ scope attributes that should be included in structured
+ metadata.
+ items:
+ properties:
+ name:
+ description: Name contains either a verbatim
+ name of an attribute or a regular expression
+ matching many attributes.
+ type: string
+ regex:
+ description: If Regex is true, then Name is
+ treated as a regular expression instead
+ of as a verbatim attribute name.
+ type: boolean
+ required:
+ - name
type: object
type: array
- ignoreDefaults:
- description: |-
- IgnoreDefaults controls whether to ignore the global configuration for resource attributes
- indexed as labels.
-
- If IgnoreDefaults is true, then this spec needs to contain at least one mapping to a index label.
- type: boolean
type: object
- scopeAttributes:
- description: |-
- ScopeAttributes contains the configuration for scope attributes
- to store them as structured metadata or drop them altogether.
- items:
- description: |-
- OTLPAttributesSpec contains the configuration for a set of attributes
- to store them as index labels or structured metadata or drop them altogether.
- properties:
- action:
- description: |-
- Action defines the indexing action for the selected attributes. They
- can be either added to structured metadata or drop altogether.
- enum:
- - structured_metadata
- - drop
- type: string
- attributes:
- description: Attributes allows choosing the attributes
- by listing their names.
- items:
- type: string
- type: array
- regex:
- description: Regex allows choosing the attributes
- by matching a regular expression.
- type: string
- required:
- - action
- type: object
- type: array
type: object
queries:
description: QueryLimits defines the limit applied on querying
@@ -830,6 +792,7 @@ spec:
out sizes.
enum:
- 1x.demo
+ - 1x.pico
- 1x.extra-small
- 1x.small
- 1x.medium
@@ -3903,6 +3866,26 @@ spec:
items:
type: string
type: array
+ otlp:
+ description: OTLP contains settings for ingesting data using
+ OTLP in the OpenShift tenancy mode.
+ properties:
+ disableRecommendedAttributes:
+ description: |-
+ DisableRecommendedAttributes can be used to reduce the number of attributes used for stream labels and structured
+ metadata.
+
+ Enabling this setting removes the "recommended attributes" from the generated Loki configuration. This will cause
+ meta information to not be available as stream labels or structured metadata, potentially making queries more
+ expensive and less performant.
+
+ Note that there is a set of "required attributes", needed for OpenShift Logging to work properly. Those will be
+ added to the configuration, even if this field is set to true.
+
+ This option is supposed to be combined with a custom label configuration customizing the labels for the specific
+ usecase.
+ type: boolean
+ type: object
type: object
required:
- mode
diff --git a/operator/bundle/community/manifests/loki.grafana.com_recordingrules.yaml b/operator/bundle/community/manifests/loki.grafana.com_recordingrules.yaml
index 5b0fed1309eac..eb401e5ed9e2a 100644
--- a/operator/bundle/community/manifests/loki.grafana.com_recordingrules.yaml
+++ b/operator/bundle/community/manifests/loki.grafana.com_recordingrules.yaml
@@ -2,6 +2,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
+ cert-manager.io/inject-ca-from: loki-operator/loki-operator-serving-cert
controller-gen.kubebuilder.io/version: v0.16.3
creationTimestamp: null
labels:
diff --git a/operator/bundle/community/manifests/loki.grafana.com_rulerconfigs.yaml b/operator/bundle/community/manifests/loki.grafana.com_rulerconfigs.yaml
index 4ab8c7e73574f..1ec2363bee213 100644
--- a/operator/bundle/community/manifests/loki.grafana.com_rulerconfigs.yaml
+++ b/operator/bundle/community/manifests/loki.grafana.com_rulerconfigs.yaml
@@ -2,6 +2,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
+ cert-manager.io/inject-ca-from: loki-operator/loki-operator-serving-cert
controller-gen.kubebuilder.io/version: v0.16.3
creationTimestamp: null
labels:
diff --git a/operator/bundle/community/metadata/annotations.yaml b/operator/bundle/community/metadata/annotations.yaml
index f17f3ab1caacc..4c99b3aa08786 100644
--- a/operator/bundle/community/metadata/annotations.yaml
+++ b/operator/bundle/community/metadata/annotations.yaml
@@ -8,7 +8,7 @@ annotations:
operators.operatorframework.io.bundle.channel.default.v1: alpha
operators.operatorframework.io.metrics.builder: operator-sdk-unknown
operators.operatorframework.io.metrics.mediatype.v1: metrics+v1
- operators.operatorframework.io.metrics.project_layout: go.kubebuilder.io/v3
+ operators.operatorframework.io.metrics.project_layout: go.kubebuilder.io/v4
# Annotations for testing.
operators.operatorframework.io.test.mediatype.v1: scorecard+v1
diff --git a/operator/bundle/openshift/bundle.Dockerfile b/operator/bundle/openshift/bundle.Dockerfile
index cddaca7779303..70e27a124f05e 100644
--- a/operator/bundle/openshift/bundle.Dockerfile
+++ b/operator/bundle/openshift/bundle.Dockerfile
@@ -9,7 +9,7 @@ LABEL operators.operatorframework.io.bundle.channels.v1=stable
LABEL operators.operatorframework.io.bundle.channel.default.v1=stable
LABEL operators.operatorframework.io.metrics.builder=operator-sdk-unknown
LABEL operators.operatorframework.io.metrics.mediatype.v1=metrics+v1
-LABEL operators.operatorframework.io.metrics.project_layout=go.kubebuilder.io/v3
+LABEL operators.operatorframework.io.metrics.project_layout=go.kubebuilder.io/v4
# Labels for testing.
LABEL operators.operatorframework.io.test.mediatype.v1=scorecard+v1
diff --git a/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml
index d224b499b4724..339b65f1f912b 100644
--- a/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml
+++ b/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml
@@ -150,7 +150,7 @@ metadata:
categories: OpenShift Optional, Logging & Tracing
certified: "false"
containerImage: quay.io/openshift-logging/loki-operator:0.1.0
- createdAt: "2024-10-14T10:09:33Z"
+ createdAt: "2024-10-23T18:05:47Z"
description: |
The Loki Operator for OCP provides a means for configuring and managing a Loki stack for cluster logging.
## Prerequisites and Requirements
@@ -173,7 +173,7 @@ metadata:
operators.openshift.io/valid-subscription: '["OpenShift Container Platform", "OpenShift
Platform Plus"]'
operators.operatorframework.io/builder: operator-sdk-unknown
- operators.operatorframework.io/project_layout: go.kubebuilder.io/v3
+ operators.operatorframework.io/project_layout: go.kubebuilder.io/v4
support: AOS Cluster Logging
labels:
operatorframework.io/arch.amd64: supported
@@ -198,8 +198,9 @@ spec:
- description: List of groups for alerting rules.
displayName: Groups
path: groups
- - description: Interval defines the time interval between evaluation of the
- given alerting rule.
+ - description: |-
+ Interval defines the time interval between evaluation of the given
+ alerting rule.
displayName: Evaluation Interval
path: groups[0].interval
- description: Limit defines the number of alerts an alerting rule can produce.
@@ -221,14 +222,15 @@ spec:
- description: Annotations to add to each alert.
displayName: Annotations
path: groups[0].rules[0].annotations
- - description: The LogQL expression to evaluate. Every evaluation cycle this
- is evaluated at the current time, and all resultant time series become pending/firing
- alerts.
+ - description: |-
+ The LogQL expression to evaluate. Every evaluation cycle this is
+ evaluated at the current time, and all resultant time series become
+ pending/firing alerts.
displayName: LogQL Expression
path: groups[0].rules[0].expr
- - description: Alerts are considered firing once they have been returned for
- this long. Alerts which have not yet fired for long enough are considered
- pending.
+ - description: |-
+ Alerts are considered firing once they have been returned for this long.
+ Alerts which have not yet fired for long enough are considered pending.
displayName: Firing Threshold
path: groups[0].rules[0].for
- description: Labels to add to each alert.
@@ -285,17 +287,21 @@ spec:
- description: MemberList configuration spec
displayName: Memberlist Config
path: hashRing.memberlist
- - description: "EnableIPv6 enables IPv6 support for the memberlist based hash
- ring. \n Currently this also forces the instanceAddrType to podIP to avoid
- local address lookup for the memberlist."
+ - description: |-
+ EnableIPv6 enables IPv6 support for the memberlist based hash ring.
+
+
+ Currently this also forces the instanceAddrType to podIP to avoid local address lookup
+ for the memberlist.
displayName: Enable IPv6
path: hashRing.memberlist.enableIPv6
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:booleanSwitch
- - description: InstanceAddrType defines the type of address to use to advertise
- to the ring. Defaults to the first address from any private network interfaces
- of the current pod. Alternatively the public pod IP can be used in case
- private networks (RFC 1918 and RFC 6598) are not available.
+ - description: |-
+ InstanceAddrType defines the type of address to use to advertise to the ring.
+ Defaults to the first address from any private network interfaces of the current pod.
+ Alternatively the public pod IP can be used in case private networks (RFC 1918 and RFC 6598)
+ are not available.
displayName: Instance Address
path: hashRing.memberlist.instanceAddrType
x-descriptors:
@@ -314,9 +320,10 @@ spec:
- description: Global defines the limits applied globally across the cluster.
displayName: Global Limits
path: limits.global
- - description: IngestionBurstSize defines the local rate-limited sample size
- per distributor replica. It should be set to the set at least to the maximum
- logs size expected in a single push request.
+ - description: |-
+ IngestionBurstSize defines the local rate-limited sample size per
+ distributor replica. It should be set to the set at least to the
+ maximum logs size expected in a single push request.
displayName: Ingestion Burst Size (in MB)
path: limits.global.ingestion.ingestionBurstSize
x-descriptors:
@@ -326,26 +333,30 @@ spec:
path: limits.global.ingestion.ingestionRate
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxGlobalStreamsPerTenant defines the maximum number of active
- streams per tenant, across the cluster.
+ - description: |-
+ MaxGlobalStreamsPerTenant defines the maximum number of active streams
+ per tenant, across the cluster.
displayName: Max Global Streams per Tenant
path: limits.global.ingestion.maxGlobalStreamsPerTenant
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxLabelNameLength defines the maximum number of characters allowed
+ - description: |-
+ MaxLabelNameLength defines the maximum number of characters allowed
for label keys in log streams.
displayName: Max Label Name Length
path: limits.global.ingestion.maxLabelNameLength
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxLabelNamesPerSeries defines the maximum number of label names
- per series in each log stream.
+ - description: |-
+ MaxLabelNamesPerSeries defines the maximum number of label names per series
+ in each log stream.
displayName: Max Labels Names per Series
path: limits.global.ingestion.maxLabelNamesPerSeries
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxLabelValueLength defines the maximum number of characters
- allowed for label values in log streams.
+ - description: |-
+ MaxLabelValueLength defines the maximum number of characters allowed
+ for label values in log streams.
displayName: Max Label Value Length
path: limits.global.ingestion.maxLabelValueLength
x-descriptors:
@@ -356,8 +367,9 @@ spec:
path: limits.global.ingestion.maxLineSize
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: PerStreamDesiredRate defines the desired ingestion rate per second
- that LokiStack should target applying automatic stream sharding. Units MB.
+ - description: |-
+ PerStreamDesiredRate defines the desired ingestion rate per second that LokiStack should
+ target applying automatic stream sharding. Units MB.
displayName: Per Stream Desired Rate (in MB)
path: limits.global.ingestion.perStreamDesiredRate
x-descriptors:
@@ -374,84 +386,84 @@ spec:
path: limits.global.ingestion.perStreamRateLimitBurst
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: IndexedResourceAttributes contains the global configuration for
- resource attributes to store them as index labels.
- displayName: Indexed Resource Attributes
- path: limits.global.otlp.indexedResourceAttributes
- - description: LogAttributes contains the configuration for log attributes to
- store them as structured metadata or drop them altogether.
+ - description: StreamLabels configures which resource attributes are converted
+ to Loki stream labels.
+ displayName: Stream Labels
+ path: limits.global.otlp.streamLabels
+ - description: ResourceAttributes lists the names of the resource attributes
+ that should be converted into Loki stream labels.
+ displayName: Resource Attributes
+ path: limits.global.otlp.streamLabels.resourceAttributes
+ - description: Name contains either a verbatim name of an attribute or a regular
+ expression matching many attributes.
+ displayName: Name
+ path: limits.global.otlp.streamLabels.resourceAttributes[0].name
+ - description: If Regex is true, then Name is treated as a regular expression
+ instead of as a verbatim attribute name.
+ displayName: Treat name as regular expression
+ path: limits.global.otlp.streamLabels.resourceAttributes[0].regex
+ - description: StructuredMetadata configures which attributes are saved in structured
+ metadata.
+ displayName: Structured Metadata
+ path: limits.global.otlp.structuredMetadata
+ - description: LogAttributes lists the names of log attributes that should be
+ included in structured metadata.
displayName: Log Attributes
- path: limits.global.otlp.logAttributes
- - description: Action defines the indexing action for the selected attributes.
- They can be either added to structured metadata or drop altogether.
- displayName: Action
- path: limits.global.otlp.logAttributes[0].action
- - description: Attributes allows choosing the attributes by listing their names.
- displayName: Attribute Names
- path: limits.global.otlp.logAttributes[0].attributes
- - description: Regex allows choosing the attributes by matching a regular expression.
- displayName: Regular Expression
- path: limits.global.otlp.logAttributes[0].regex
- - description: ResourceAttributes contains the configuration for resource attributes
- to store them as index labels or structured metadata or drop them altogether.
+ path: limits.global.otlp.structuredMetadata.logAttributes
+ - description: Name contains either a verbatim name of an attribute or a regular
+ expression matching many attributes.
+ displayName: Name
+ path: limits.global.otlp.structuredMetadata.logAttributes[0].name
+ - description: If Regex is true, then Name is treated as a regular expression
+ instead of as a verbatim attribute name.
+ displayName: Treat name as regular expression
+ path: limits.global.otlp.structuredMetadata.logAttributes[0].regex
+ - description: ResourceAttributes lists the names of resource attributes that
+ should be included in structured metadata.
displayName: Resource Attributes
- path: limits.global.otlp.resourceAttributes
- - description: Attributes contains the configuration for resource attributes
- to store them as index labels or structured metadata or drop them altogether.
- displayName: Attributes
- path: limits.global.otlp.resourceAttributes.attributes
- - description: Action defines the indexing action for the selected resoure attributes.
- They can be either indexed as labels, added to structured metadata or drop
- altogether.
- displayName: Action
- path: limits.global.otlp.resourceAttributes.attributes[0].action
- - description: Attributes is the list of attributes to configure indexing or
- drop them altogether.
- displayName: Attribute Names
- path: limits.global.otlp.resourceAttributes.attributes[0].attributes
- - description: Regex allows choosing the attributes by matching a regular expression.
- displayName: Regular Expression
- path: limits.global.otlp.resourceAttributes.attributes[0].regex
- - description: "IgnoreDefaults controls whether to ignore the global configuration
- for resource attributes indexed as labels. \n If IgnoreDefaults is true,
- then this spec needs to contain at least one mapping to a index label."
- displayName: Ignore Global Defaults
- path: limits.global.otlp.resourceAttributes.ignoreDefaults
- x-descriptors:
- - urn:alm:descriptor:com.tectonic.ui:booleanSwitch
- - description: ScopeAttributes contains the configuration for scope attributes
- to store them as structured metadata or drop them altogether.
+ path: limits.global.otlp.structuredMetadata.resourceAttributes
+ - description: Name contains either a verbatim name of an attribute or a regular
+ expression matching many attributes.
+ displayName: Name
+ path: limits.global.otlp.structuredMetadata.resourceAttributes[0].name
+ - description: If Regex is true, then Name is treated as a regular expression
+ instead of as a verbatim attribute name.
+ displayName: Treat name as regular expression
+ path: limits.global.otlp.structuredMetadata.resourceAttributes[0].regex
+ - description: ScopeAttributes lists the names of scope attributes that should
+ be included in structured metadata.
displayName: Scope Attributes
- path: limits.global.otlp.scopeAttributes
- - description: Action defines the indexing action for the selected attributes.
- They can be either added to structured metadata or drop altogether.
- displayName: Action
- path: limits.global.otlp.scopeAttributes[0].action
- - description: Attributes allows choosing the attributes by listing their names.
- displayName: Attribute Names
- path: limits.global.otlp.scopeAttributes[0].attributes
- - description: Regex allows choosing the attributes by matching a regular expression.
- displayName: Regular Expression
- path: limits.global.otlp.scopeAttributes[0].regex
+ path: limits.global.otlp.structuredMetadata.scopeAttributes
+ - description: Name contains either a verbatim name of an attribute or a regular
+ expression matching many attributes.
+ displayName: Name
+ path: limits.global.otlp.structuredMetadata.scopeAttributes[0].name
+ - description: If Regex is true, then Name is treated as a regular expression
+ instead of as a verbatim attribute name.
+ displayName: Treat name as regular expression
+ path: limits.global.otlp.structuredMetadata.scopeAttributes[0].regex
- description: CardinalityLimit defines the cardinality limit for index queries.
displayName: Cardinality Limit
path: limits.global.queries.cardinalityLimit
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxChunksPerQuery defines the maximum number of chunks that can
- be fetched by a single query.
+ - description: |-
+ MaxChunksPerQuery defines the maximum number of chunks
+ that can be fetched by a single query.
displayName: Max Chunk per Query
path: limits.global.queries.maxChunksPerQuery
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxEntriesLimitsPerQuery defines the maximum number of log entries
+ - description: |-
+ MaxEntriesLimitsPerQuery defines the maximum number of log entries
that will be returned for a query.
displayName: Max Entries Limit per Query
path: limits.global.queries.maxEntriesLimitPerQuery
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxQuerySeries defines the maximum of unique series that is returned
- by a metric query.
+ - description: |-
+ MaxQuerySeries defines the maximum of unique series
+ that is returned by a metric query.
displayName: Max Query Series
path: limits.global.queries.maxQuerySeries
x-descriptors:
@@ -469,9 +481,10 @@ spec:
- description: Tenants defines the limits applied per tenant.
displayName: Limits per Tenant
path: limits.tenants
- - description: IngestionBurstSize defines the local rate-limited sample size
- per distributor replica. It should be set to the set at least to the maximum
- logs size expected in a single push request.
+ - description: |-
+ IngestionBurstSize defines the local rate-limited sample size per
+ distributor replica. It should be set to the set at least to the
+ maximum logs size expected in a single push request.
displayName: Ingestion Burst Size (in MB)
path: limits.tenants.ingestion.ingestionBurstSize
x-descriptors:
@@ -481,26 +494,30 @@ spec:
path: limits.tenants.ingestion.ingestionRate
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxGlobalStreamsPerTenant defines the maximum number of active
- streams per tenant, across the cluster.
+ - description: |-
+ MaxGlobalStreamsPerTenant defines the maximum number of active streams
+ per tenant, across the cluster.
displayName: Max Global Streams per Tenant
path: limits.tenants.ingestion.maxGlobalStreamsPerTenant
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxLabelNameLength defines the maximum number of characters allowed
+ - description: |-
+ MaxLabelNameLength defines the maximum number of characters allowed
for label keys in log streams.
displayName: Max Label Name Length
path: limits.tenants.ingestion.maxLabelNameLength
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxLabelNamesPerSeries defines the maximum number of label names
- per series in each log stream.
+ - description: |-
+ MaxLabelNamesPerSeries defines the maximum number of label names per series
+ in each log stream.
displayName: Max Labels Names per Series
path: limits.tenants.ingestion.maxLabelNamesPerSeries
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxLabelValueLength defines the maximum number of characters
- allowed for label values in log streams.
+ - description: |-
+ MaxLabelValueLength defines the maximum number of characters allowed
+ for label values in log streams.
displayName: Max Label Value Length
path: limits.tenants.ingestion.maxLabelValueLength
x-descriptors:
@@ -511,8 +528,9 @@ spec:
path: limits.tenants.ingestion.maxLineSize
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: PerStreamDesiredRate defines the desired ingestion rate per second
- that LokiStack should target applying automatic stream sharding. Units MB.
+ - description: |-
+ PerStreamDesiredRate defines the desired ingestion rate per second that LokiStack should
+ target applying automatic stream sharding. Units MB.
displayName: Per Stream Desired Rate (in MB)
path: limits.tenants.ingestion.perStreamDesiredRate
x-descriptors:
@@ -529,61 +547,62 @@ spec:
path: limits.tenants.ingestion.perStreamRateLimitBurst
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: LogAttributes contains the configuration for log attributes to
- store them as structured metadata or drop them altogether.
+ - description: StreamLabels configures which resource attributes are converted
+ to Loki stream labels.
+ displayName: Stream Labels
+ path: limits.tenants.otlp.streamLabels
+ - description: ResourceAttributes lists the names of the resource attributes
+ that should be converted into Loki stream labels.
+ displayName: Resource Attributes
+ path: limits.tenants.otlp.streamLabels.resourceAttributes
+ - description: Name contains either a verbatim name of an attribute or a regular
+ expression matching many attributes.
+ displayName: Name
+ path: limits.tenants.otlp.streamLabels.resourceAttributes[0].name
+ - description: If Regex is true, then Name is treated as a regular expression
+ instead of as a verbatim attribute name.
+ displayName: Treat name as regular expression
+ path: limits.tenants.otlp.streamLabels.resourceAttributes[0].regex
+ - description: StructuredMetadata configures which attributes are saved in structured
+ metadata.
+ displayName: Structured Metadata
+ path: limits.tenants.otlp.structuredMetadata
+ - description: LogAttributes lists the names of log attributes that should be
+ included in structured metadata.
displayName: Log Attributes
- path: limits.tenants.otlp.logAttributes
- - description: Action defines the indexing action for the selected attributes.
- They can be either added to structured metadata or drop altogether.
- displayName: Action
- path: limits.tenants.otlp.logAttributes[0].action
- - description: Attributes allows choosing the attributes by listing their names.
- displayName: Attribute Names
- path: limits.tenants.otlp.logAttributes[0].attributes
- - description: Regex allows choosing the attributes by matching a regular expression.
- displayName: Regular Expression
- path: limits.tenants.otlp.logAttributes[0].regex
- - description: ResourceAttributes contains the configuration for resource attributes
- to store them as index labels or structured metadata or drop them altogether.
+ path: limits.tenants.otlp.structuredMetadata.logAttributes
+ - description: Name contains either a verbatim name of an attribute or a regular
+ expression matching many attributes.
+ displayName: Name
+ path: limits.tenants.otlp.structuredMetadata.logAttributes[0].name
+ - description: If Regex is true, then Name is treated as a regular expression
+ instead of as a verbatim attribute name.
+ displayName: Treat name as regular expression
+ path: limits.tenants.otlp.structuredMetadata.logAttributes[0].regex
+ - description: ResourceAttributes lists the names of resource attributes that
+ should be included in structured metadata.
displayName: Resource Attributes
- path: limits.tenants.otlp.resourceAttributes
- - description: Attributes contains the configuration for resource attributes
- to store them as index labels or structured metadata or drop them altogether.
- displayName: Attributes
- path: limits.tenants.otlp.resourceAttributes.attributes
- - description: Action defines the indexing action for the selected resoure attributes.
- They can be either indexed as labels, added to structured metadata or drop
- altogether.
- displayName: Action
- path: limits.tenants.otlp.resourceAttributes.attributes[0].action
- - description: Attributes is the list of attributes to configure indexing or
- drop them altogether.
- displayName: Attribute Names
- path: limits.tenants.otlp.resourceAttributes.attributes[0].attributes
- - description: Regex allows choosing the attributes by matching a regular expression.
- displayName: Regular Expression
- path: limits.tenants.otlp.resourceAttributes.attributes[0].regex
- - description: "IgnoreDefaults controls whether to ignore the global configuration
- for resource attributes indexed as labels. \n If IgnoreDefaults is true,
- then this spec needs to contain at least one mapping to a index label."
- displayName: Ignore Global Defaults
- path: limits.tenants.otlp.resourceAttributes.ignoreDefaults
- x-descriptors:
- - urn:alm:descriptor:com.tectonic.ui:booleanSwitch
- - description: ScopeAttributes contains the configuration for scope attributes
- to store them as structured metadata or drop them altogether.
+ path: limits.tenants.otlp.structuredMetadata.resourceAttributes
+ - description: Name contains either a verbatim name of an attribute or a regular
+ expression matching many attributes.
+ displayName: Name
+ path: limits.tenants.otlp.structuredMetadata.resourceAttributes[0].name
+ - description: If Regex is true, then Name is treated as a regular expression
+ instead of as a verbatim attribute name.
+ displayName: Treat name as regular expression
+ path: limits.tenants.otlp.structuredMetadata.resourceAttributes[0].regex
+ - description: ScopeAttributes lists the names of scope attributes that should
+ be included in structured metadata.
displayName: Scope Attributes
- path: limits.tenants.otlp.scopeAttributes
- - description: Action defines the indexing action for the selected attributes.
- They can be either added to structured metadata or drop altogether.
- displayName: Action
- path: limits.tenants.otlp.scopeAttributes[0].action
- - description: Attributes allows choosing the attributes by listing their names.
- displayName: Attribute Names
- path: limits.tenants.otlp.scopeAttributes[0].attributes
- - description: Regex allows choosing the attributes by matching a regular expression.
- displayName: Regular Expression
- path: limits.tenants.otlp.scopeAttributes[0].regex
+ path: limits.tenants.otlp.structuredMetadata.scopeAttributes
+ - description: Name contains either a verbatim name of an attribute or a regular
+ expression matching many attributes.
+ displayName: Name
+ path: limits.tenants.otlp.structuredMetadata.scopeAttributes[0].name
+ - description: If Regex is true, then Name is treated as a regular expression
+ instead of as a verbatim attribute name.
+ displayName: Treat name as regular expression
+ path: limits.tenants.otlp.structuredMetadata.scopeAttributes[0].regex
- description: Blocked defines the list of rules to block matching queries.
displayName: Blocked
path: limits.tenants.queries.blocked
@@ -610,20 +629,23 @@ spec:
path: limits.tenants.queries.cardinalityLimit
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxChunksPerQuery defines the maximum number of chunks that can
- be fetched by a single query.
+ - description: |-
+ MaxChunksPerQuery defines the maximum number of chunks
+ that can be fetched by a single query.
displayName: Max Chunk per Query
path: limits.tenants.queries.maxChunksPerQuery
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxEntriesLimitsPerQuery defines the maximum number of log entries
+ - description: |-
+ MaxEntriesLimitsPerQuery defines the maximum number of log entries
that will be returned for a query.
displayName: Max Entries Limit per Query
path: limits.tenants.queries.maxEntriesLimitPerQuery
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxQuerySeries defines the maximum of unique series that is returned
- by a metric query.
+ - description: |-
+ MaxQuerySeries defines the maximum of unique series
+ that is returned by a metric query.
displayName: Max Query Series
path: limits.tenants.queries.maxQuerySeries
x-descriptors:
@@ -638,8 +660,9 @@ spec:
of a query request.
displayName: Query Timeout
path: limits.tenants.queries.queryTimeout
- - description: ManagementState defines if the CR should be managed by the operator
- or not. Default is managed.
+ - description: |-
+ ManagementState defines if the CR should be managed by the operator or not.
+ Default is managed.
displayName: Management State
path: managementState
x-descriptors:
@@ -666,9 +689,9 @@ spec:
path: replication.factor
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: 'Zones defines an array of ZoneSpec that the scheduler will try
- to satisfy. IMPORTANT: Make sure that the replication factor defined is
- less than or equal to the number of available zones.'
+ - description: |-
+ Zones defines an array of ZoneSpec that the scheduler will try to satisfy.
+ IMPORTANT: Make sure that the replication factor defined is less than or equal to the number of available zones.
displayName: Zones Spec
path: replication.zones
- description: MaxSkew describes the maximum degree to which Pods can be unevenly
@@ -681,9 +704,9 @@ spec:
labels.
displayName: Topology Key
path: replication.zones[0].topologyKey
- - description: 'Deprecated: Please use replication.factor instead. This field
- will be removed in future versions of this CRD. ReplicationFactor defines
- the policy for log stream replication.'
+ - description: |-
+ Deprecated: Please use replication.factor instead. This field will be removed in future versions of this CRD.
+ ReplicationFactor defines the policy for log stream replication.
displayName: Replication Factor
path: replicationFactor
x-descriptors:
@@ -698,11 +721,13 @@ spec:
path: rules.enabled
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:booleanSwitch
- - description: Namespaces to be selected for PrometheusRules discovery. If unspecified,
- only the same namespace as the LokiStack object is in is used.
+ - description: |-
+ Namespaces to be selected for PrometheusRules discovery. If unspecified, only
+ the same namespace as the LokiStack object is in is used.
displayName: Namespace Selector
path: rules.namespaceSelector
- - description: A selector to select which LokiRules to mount for loading alerting/recording
+ - description: |-
+ A selector to select which LokiRules to mount for loading alerting/recording
rules from.
displayName: Selector
path: rules.selector
@@ -742,13 +767,15 @@ spec:
- description: TLS configuration for reaching the object storage endpoint.
displayName: TLS Config
path: storage.tls
- - description: Key is the data key of a ConfigMap containing a CA certificate.
- It needs to be in the same namespace as the LokiStack custom resource. If
- empty, it defaults to "service-ca.crt".
+ - description: |-
+ Key is the data key of a ConfigMap containing a CA certificate.
+ It needs to be in the same namespace as the LokiStack custom resource.
+ If empty, it defaults to "service-ca.crt".
displayName: CA ConfigMap Key
path: storage.tls.caKey
- - description: CA is the name of a ConfigMap containing a CA certificate. It
- needs to be in the same namespace as the LokiStack custom resource.
+ - description: |-
+ CA is the name of a ConfigMap containing a CA certificate.
+ It needs to be in the same namespace as the LokiStack custom resource.
displayName: CA ConfigMap Name
path: storage.tls.caName
x-descriptors:
@@ -768,8 +795,9 @@ spec:
- description: Compactor defines the compaction component spec.
displayName: Compactor pods
path: template.compactor
- - description: PodAntiAffinity defines the pod anti affinity scheduling rules
- to schedule pods of a component.
+ - description: |-
+ PodAntiAffinity defines the pod anti affinity scheduling rules to schedule pods
+ of a component.
displayName: PodAntiAffinity
path: template.compactor.podAntiAffinity
x-descriptors:
@@ -782,8 +810,9 @@ spec:
- description: Distributor defines the distributor component spec.
displayName: Distributor pods
path: template.distributor
- - description: PodAntiAffinity defines the pod anti affinity scheduling rules
- to schedule pods of a component.
+ - description: |-
+ PodAntiAffinity defines the pod anti affinity scheduling rules to schedule pods
+ of a component.
displayName: PodAntiAffinity
path: template.distributor.podAntiAffinity
x-descriptors:
@@ -796,8 +825,9 @@ spec:
- description: Gateway defines the lokistack gateway component spec.
displayName: Gateway pods
path: template.gateway
- - description: PodAntiAffinity defines the pod anti affinity scheduling rules
- to schedule pods of a component.
+ - description: |-
+ PodAntiAffinity defines the pod anti affinity scheduling rules to schedule pods
+ of a component.
displayName: PodAntiAffinity
path: template.gateway.podAntiAffinity
x-descriptors:
@@ -810,8 +840,9 @@ spec:
- description: IndexGateway defines the index gateway component spec.
displayName: Index Gateway pods
path: template.indexGateway
- - description: PodAntiAffinity defines the pod anti affinity scheduling rules
- to schedule pods of a component.
+ - description: |-
+ PodAntiAffinity defines the pod anti affinity scheduling rules to schedule pods
+ of a component.
displayName: PodAntiAffinity
path: template.indexGateway.podAntiAffinity
x-descriptors:
@@ -824,8 +855,9 @@ spec:
- description: Ingester defines the ingester component spec.
displayName: Ingester pods
path: template.ingester
- - description: PodAntiAffinity defines the pod anti affinity scheduling rules
- to schedule pods of a component.
+ - description: |-
+ PodAntiAffinity defines the pod anti affinity scheduling rules to schedule pods
+ of a component.
displayName: PodAntiAffinity
path: template.ingester.podAntiAffinity
x-descriptors:
@@ -838,8 +870,9 @@ spec:
- description: Querier defines the querier component spec.
displayName: Querier pods
path: template.querier
- - description: PodAntiAffinity defines the pod anti affinity scheduling rules
- to schedule pods of a component.
+ - description: |-
+ PodAntiAffinity defines the pod anti affinity scheduling rules to schedule pods
+ of a component.
displayName: PodAntiAffinity
path: template.querier.podAntiAffinity
x-descriptors:
@@ -852,8 +885,9 @@ spec:
- description: QueryFrontend defines the query frontend component spec.
displayName: Query Frontend pods
path: template.queryFrontend
- - description: PodAntiAffinity defines the pod anti affinity scheduling rules
- to schedule pods of a component.
+ - description: |-
+ PodAntiAffinity defines the pod anti affinity scheduling rules to schedule pods
+ of a component.
displayName: PodAntiAffinity
path: template.queryFrontend.podAntiAffinity
x-descriptors:
@@ -866,8 +900,9 @@ spec:
- description: Ruler defines the ruler component spec.
displayName: Ruler pods
path: template.ruler
- - description: PodAntiAffinity defines the pod anti affinity scheduling rules
- to schedule pods of a component.
+ - description: |-
+ PodAntiAffinity defines the pod anti affinity scheduling rules to schedule pods
+ of a component.
displayName: PodAntiAffinity
path: template.ruler.podAntiAffinity
x-descriptors:
@@ -891,13 +926,15 @@ spec:
- description: CA defines the spec for the custom CA for tenant's authentication.
displayName: CA ConfigMap
path: tenants.authentication[0].mTLS.ca
- - description: Key is the data key of a ConfigMap containing a CA certificate.
- It needs to be in the same namespace as the LokiStack custom resource. If
- empty, it defaults to "service-ca.crt".
+ - description: |-
+ Key is the data key of a ConfigMap containing a CA certificate.
+ It needs to be in the same namespace as the LokiStack custom resource.
+ If empty, it defaults to "service-ca.crt".
displayName: CA ConfigMap Key
path: tenants.authentication[0].mTLS.ca.caKey
- - description: CA is the name of a ConfigMap containing a CA certificate. It
- needs to be in the same namespace as the LokiStack custom resource.
+ - description: |-
+ CA is the name of a ConfigMap containing a CA certificate.
+ It needs to be in the same namespace as the LokiStack custom resource.
displayName: CA ConfigMap Name
path: tenants.authentication[0].mTLS.ca.caName
x-descriptors:
@@ -908,13 +945,15 @@ spec:
- description: IssuerCA defines the spec for the issuer CA for tenant's authentication.
displayName: IssuerCA ConfigMap
path: tenants.authentication[0].oidc.issuerCA
- - description: Key is the data key of a ConfigMap containing a CA certificate.
- It needs to be in the same namespace as the LokiStack custom resource. If
- empty, it defaults to "service-ca.crt".
+ - description: |-
+ Key is the data key of a ConfigMap containing a CA certificate.
+ It needs to be in the same namespace as the LokiStack custom resource.
+ If empty, it defaults to "service-ca.crt".
displayName: CA ConfigMap Key
path: tenants.authentication[0].oidc.issuerCA.caKey
- - description: CA is the name of a ConfigMap containing a CA certificate. It
- needs to be in the same namespace as the LokiStack custom resource.
+ - description: |-
+ CA is the name of a ConfigMap containing a CA certificate.
+ It needs to be in the same namespace as the LokiStack custom resource.
displayName: CA ConfigMap Name
path: tenants.authentication[0].oidc.issuerCA.caName
x-descriptors:
@@ -970,12 +1009,39 @@ spec:
- description: Openshift defines the configuration specific to Openshift modes.
displayName: Openshift
path: tenants.openshift
- - description: "AdminGroups defines a list of groups, whose members are considered
- to have admin-privileges by the Loki Operator. Setting this to an empty
- array disables admin groups. \n By default the following groups are considered
- admin-groups: - system:cluster-admins - cluster-admin - dedicated-admin"
+ - description: |-
+ AdminGroups defines a list of groups, whose members are considered to have admin-privileges by the Loki Operator.
+ Setting this to an empty array disables admin groups.
+
+
+ By default the following groups are considered admin-groups:
+ - system:cluster-admins
+ - cluster-admin
+ - dedicated-admin
displayName: Admin Groups
path: tenants.openshift.adminGroups
+ - description: OTLP contains settings for ingesting data using OTLP in the OpenShift
+ tenancy mode.
+ displayName: OpenTelemetry Protocol
+ path: tenants.openshift.otlp
+ - description: |-
+ DisableRecommendedAttributes can be used to reduce the number of attributes used for stream labels and structured
+ metadata.
+
+
+ Enabling this setting removes the "recommended attributes" from the generated Loki configuration. This will cause
+ meta information to not be available as stream labels or structured metadata, potentially making queries more
+ expensive and less performant.
+
+
+ Note that there is a set of "required attributes", needed for OpenShift Logging to work properly. Those will be
+ added to the configuration, even if this field is set to true.
+
+
+ This option is supposed to be combined with a custom label configuration customizing the labels for the specific
+ usecase.
+ displayName: Disable recommended OTLP attributes
+ path: tenants.openshift.otlp.disableRecommendedAttributes
statusDescriptors:
- description: Distributor is a map to the per pod status of the distributor
deployment
@@ -1039,8 +1105,9 @@ spec:
- description: List of groups for recording rules.
displayName: Groups
path: groups
- - description: Interval defines the time interval between evaluation of the
- given recoding rule.
+ - description: |-
+ Interval defines the time interval between evaluation of the given
+ recoding rule.
displayName: Evaluation Interval
path: groups[0].interval
- description: Limit defines the number of series a recording rule can produce.
@@ -1056,9 +1123,10 @@ spec:
- description: Rules defines a list of recording rules
displayName: Rules
path: groups[0].rules
- - description: The LogQL expression to evaluate. Every evaluation cycle this
- is evaluated at the current time, and all resultant time series become pending/firing
- alerts.
+ - description: |-
+ The LogQL expression to evaluate. Every evaluation cycle this is
+ evaluated at the current time, and all resultant time series become
+ pending/firing alerts.
displayName: LogQL Expression
path: groups[0].rules[0].expr
- description: Labels to add to each recording rule.
@@ -1161,9 +1229,10 @@ spec:
path: alertmanager.enableV2
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:booleanSwitch
- - description: List of AlertManager URLs to send notifications to. Each Alertmanager
- URL is treated as a separate group in the configuration. Multiple Alertmanagers
- in HA per group can be supported by using DNS resolution (See EnableDNSDiscovery).
+ - description: |-
+ List of AlertManager URLs to send notifications to. Each Alertmanager URL is treated as
+ a separate group in the configuration. Multiple Alertmanagers in HA per group can be
+ supported by using DNS resolution (See EnableDNSDiscovery).
displayName: AlertManager Endpoints
path: alertmanager.endpoints
- description: Additional labels to add to all alerts.
@@ -1183,9 +1252,9 @@ spec:
path: alertmanager.notificationQueue.capacity
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: Minimum duration between alert and restored "for" state. This
- is maintained only for alerts with configured "for" time greater than the
- grace period.
+ - description: |-
+ Minimum duration between alert and restored "for" state. This is maintained
+ only for alerts with configured "for" time greater than the grace period.
displayName: Firing Grace Period
path: alertmanager.notificationQueue.forGracePeriod
- description: Max time to tolerate outage for restoring "for" state of alert.
@@ -1218,21 +1287,23 @@ spec:
Default is '(.*)'
displayName: Regex
path: alertmanager.relabelConfigs[0].regex
- - description: Replacement value against which a regex replace is performed
- if the regular expression matches. Regex capture groups are available. Default
- is '$1'
+ - description: |-
+ Replacement value against which a regex replace is performed if the
+ regular expression matches. Regex capture groups are available. Default is '$1'
displayName: Replacement
path: alertmanager.relabelConfigs[0].replacement
- description: Separator placed between concatenated source label values. default
is ';'.
displayName: Separator
path: alertmanager.relabelConfigs[0].separator
- - description: The source labels select values from existing labels. Their content
- is concatenated using the configured separator and matched against the configured
- regular expression for the replace, keep, and drop actions.
+ - description: |-
+ The source labels select values from existing labels. Their content is concatenated
+ using the configured separator and matched against the configured regular expression
+ for the replace, keep, and drop actions.
displayName: Source Labels
path: alertmanager.relabelConfigs[0].sourceLabels
- - description: Label to which the resulting value is written in a replace action.
+ - description: |-
+ Label to which the resulting value is written in a replace action.
It is mandatory for replace actions. Regex capture groups are available.
displayName: Target Label
path: alertmanager.relabelConfigs[0].targetLabel
@@ -1313,9 +1384,10 @@ spec:
path: overrides.alertmanager.enableV2
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:booleanSwitch
- - description: List of AlertManager URLs to send notifications to. Each Alertmanager
- URL is treated as a separate group in the configuration. Multiple Alertmanagers
- in HA per group can be supported by using DNS resolution (See EnableDNSDiscovery).
+ - description: |-
+ List of AlertManager URLs to send notifications to. Each Alertmanager URL is treated as
+ a separate group in the configuration. Multiple Alertmanagers in HA per group can be
+ supported by using DNS resolution (See EnableDNSDiscovery).
displayName: AlertManager Endpoints
path: overrides.alertmanager.endpoints
- description: Additional labels to add to all alerts.
@@ -1335,9 +1407,9 @@ spec:
path: overrides.alertmanager.notificationQueue.capacity
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: Minimum duration between alert and restored "for" state. This
- is maintained only for alerts with configured "for" time greater than the
- grace period.
+ - description: |-
+ Minimum duration between alert and restored "for" state. This is maintained
+ only for alerts with configured "for" time greater than the grace period.
displayName: Firing Grace Period
path: overrides.alertmanager.notificationQueue.forGracePeriod
- description: Max time to tolerate outage for restoring "for" state of alert.
@@ -1370,21 +1442,23 @@ spec:
Default is '(.*)'
displayName: Regex
path: overrides.alertmanager.relabelConfigs[0].regex
- - description: Replacement value against which a regex replace is performed
- if the regular expression matches. Regex capture groups are available. Default
- is '$1'
+ - description: |-
+ Replacement value against which a regex replace is performed if the
+ regular expression matches. Regex capture groups are available. Default is '$1'
displayName: Replacement
path: overrides.alertmanager.relabelConfigs[0].replacement
- description: Separator placed between concatenated source label values. default
is ';'.
displayName: Separator
path: overrides.alertmanager.relabelConfigs[0].separator
- - description: The source labels select values from existing labels. Their content
- is concatenated using the configured separator and matched against the configured
- regular expression for the replace, keep, and drop actions.
+ - description: |-
+ The source labels select values from existing labels. Their content is concatenated
+ using the configured separator and matched against the configured regular expression
+ for the replace, keep, and drop actions.
displayName: Source Labels
path: overrides.alertmanager.relabelConfigs[0].sourceLabels
- - description: Label to which the resulting value is written in a replace action.
+ - description: |-
+ Label to which the resulting value is written in a replace action.
It is mandatory for replace actions. Regex capture groups are available.
displayName: Target Label
path: overrides.alertmanager.relabelConfigs[0].targetLabel
@@ -1446,21 +1520,23 @@ spec:
Default is '(.*)'
displayName: Regex
path: remoteWrite.client.relabelConfigs[0].regex
- - description: Replacement value against which a regex replace is performed
- if the regular expression matches. Regex capture groups are available. Default
- is '$1'
+ - description: |-
+ Replacement value against which a regex replace is performed if the
+ regular expression matches. Regex capture groups are available. Default is '$1'
displayName: Replacement
path: remoteWrite.client.relabelConfigs[0].replacement
- description: Separator placed between concatenated source label values. default
is ';'.
displayName: Separator
path: remoteWrite.client.relabelConfigs[0].separator
- - description: The source labels select values from existing labels. Their content
- is concatenated using the configured separator and matched against the configured
- regular expression for the replace, keep, and drop actions.
+ - description: |-
+ The source labels select values from existing labels. Their content is concatenated
+ using the configured separator and matched against the configured regular expression
+ for the replace, keep, and drop actions.
displayName: Source Labels
path: remoteWrite.client.relabelConfigs[0].sourceLabels
- - description: Label to which the resulting value is written in a replace action.
+ - description: |-
+ Label to which the resulting value is written in a replace action.
It is mandatory for replace actions. Regex capture groups are available.
displayName: Target Label
path: remoteWrite.client.relabelConfigs[0].targetLabel
@@ -1762,7 +1838,7 @@ spec:
- /manager
env:
- name: RELATED_IMAGE_LOKI
- value: quay.io/openshift-logging/loki:v3.2.0
+ value: quay.io/openshift-logging/loki:v3.2.1
- name: RELATED_IMAGE_GATEWAY
value: quay.io/observatorium/api:latest
- name: RELATED_IMAGE_OPA
@@ -1886,7 +1962,7 @@ spec:
provider:
name: Red Hat
relatedImages:
- - image: quay.io/openshift-logging/loki:v3.2.0
+ - image: quay.io/openshift-logging/loki:v3.2.1
name: loki
- image: quay.io/observatorium/api:latest
name: gateway
diff --git a/operator/bundle/openshift/manifests/loki.grafana.com_lokistacks.yaml b/operator/bundle/openshift/manifests/loki.grafana.com_lokistacks.yaml
index 6651f96453e26..277350061c87f 100644
--- a/operator/bundle/openshift/manifests/loki.grafana.com_lokistacks.yaml
+++ b/operator/bundle/openshift/manifests/loki.grafana.com_lokistacks.yaml
@@ -165,125 +165,101 @@ spec:
type: object
otlp:
description: |-
- OTLP to configure which resource, scope and log attributes
- to store as labels or structured metadata or drop them altogether
- for all tenants.
+ OTLP to configure which resource, scope and log attributes are stored as stream labels or structured metadata.
+
+ Tenancy modes can provide a default OTLP configuration, when no custom OTLP configuration is set or even
+ enforce the use of some required attributes.
properties:
- indexedResourceAttributes:
- description: |-
- IndexedResourceAttributes contains the global configuration for resource attributes
- to store them as index labels.
- items:
- type: string
- type: array
- logAttributes:
- description: |-
- LogAttributes contains the configuration for log attributes
- to store them as structured metadata or drop them altogether.
- items:
- description: |-
- OTLPAttributesSpec contains the configuration for a set of attributes
- to store them as index labels or structured metadata or drop them altogether.
- properties:
- action:
- description: |-
- Action defines the indexing action for the selected attributes. They
- can be either added to structured metadata or drop altogether.
- enum:
- - structured_metadata
- - drop
- type: string
- attributes:
- description: Attributes allows choosing the attributes
- by listing their names.
- items:
- type: string
- type: array
- regex:
- description: Regex allows choosing the attributes
- by matching a regular expression.
- type: string
- required:
- - action
- type: object
- type: array
- resourceAttributes:
- description: |-
- ResourceAttributes contains the configuration for resource attributes
- to store them as index labels or structured metadata or drop them altogether.
+ streamLabels:
+ description: StreamLabels configures which resource attributes
+ are converted to Loki stream labels.
properties:
- attributes:
- description: |-
- Attributes contains the configuration for resource attributes
- to store them as index labels or structured metadata or drop them altogether.
+ resourceAttributes:
+ description: ResourceAttributes lists the names of
+ the resource attributes that should be converted
+ into Loki stream labels.
items:
- description: |-
- OTLPResourceAttributesConfigSpec contains the configuration for a set of resource attributes
- to store them as index labels or structured metadata or drop them altogether.
properties:
- action:
- description: |-
- Action defines the indexing action for the selected resoure attributes. They
- can be either indexed as labels, added to structured metadata or drop altogether.
- enum:
- - index_label
- - structured_metadata
- - drop
+ name:
+ description: Name contains either a verbatim
+ name of an attribute or a regular expression
+ matching many attributes.
type: string
- attributes:
- description: |-
- Attributes is the list of attributes to configure indexing or drop them
- altogether.
- items:
- type: string
- type: array
regex:
- description: Regex allows choosing the attributes
- by matching a regular expression.
+ description: If Regex is true, then Name is
+ treated as a regular expression instead of
+ as a verbatim attribute name.
+ type: boolean
+ required:
+ - name
+ type: object
+ type: array
+ type: object
+ structuredMetadata:
+ description: StructuredMetadata configures which attributes
+ are saved in structured metadata.
+ properties:
+ logAttributes:
+ description: LogAttributes lists the names of log
+ attributes that should be included in structured
+ metadata.
+ items:
+ properties:
+ name:
+ description: Name contains either a verbatim
+ name of an attribute or a regular expression
+ matching many attributes.
type: string
+ regex:
+ description: If Regex is true, then Name is
+ treated as a regular expression instead of
+ as a verbatim attribute name.
+ type: boolean
required:
- - action
+ - name
+ type: object
+ type: array
+ resourceAttributes:
+ description: ResourceAttributes lists the names of
+ resource attributes that should be included in structured
+ metadata.
+ items:
+ properties:
+ name:
+ description: Name contains either a verbatim
+ name of an attribute or a regular expression
+ matching many attributes.
+ type: string
+ regex:
+ description: If Regex is true, then Name is
+ treated as a regular expression instead of
+ as a verbatim attribute name.
+ type: boolean
+ required:
+ - name
+ type: object
+ type: array
+ scopeAttributes:
+ description: ScopeAttributes lists the names of scope
+ attributes that should be included in structured
+ metadata.
+ items:
+ properties:
+ name:
+ description: Name contains either a verbatim
+ name of an attribute or a regular expression
+ matching many attributes.
+ type: string
+ regex:
+ description: If Regex is true, then Name is
+ treated as a regular expression instead of
+ as a verbatim attribute name.
+ type: boolean
+ required:
+ - name
type: object
type: array
- ignoreDefaults:
- description: |-
- IgnoreDefaults controls whether to ignore the global configuration for resource attributes
- indexed as labels.
-
- If IgnoreDefaults is true, then this spec needs to contain at least one mapping to a index label.
- type: boolean
type: object
- scopeAttributes:
- description: |-
- ScopeAttributes contains the configuration for scope attributes
- to store them as structured metadata or drop them altogether.
- items:
- description: |-
- OTLPAttributesSpec contains the configuration for a set of attributes
- to store them as index labels or structured metadata or drop them altogether.
- properties:
- action:
- description: |-
- Action defines the indexing action for the selected attributes. They
- can be either added to structured metadata or drop altogether.
- enum:
- - structured_metadata
- - drop
- type: string
- attributes:
- description: Attributes allows choosing the attributes
- by listing their names.
- items:
- type: string
- type: array
- regex:
- description: Regex allows choosing the attributes
- by matching a regular expression.
- type: string
- required:
- - action
- type: object
- type: array
type: object
queries:
description: QueryLimits defines the limit applied on querying
@@ -363,7 +339,7 @@ spec:
type: object
tenants:
additionalProperties:
- description: LimitsTemplateSpec defines the limits applied
+ description: PerTenantLimitsTemplateSpec defines the limits applied
at ingestion or query path.
properties:
ingestion:
@@ -430,118 +406,103 @@ spec:
type: object
otlp:
description: |-
- OTLP to configure which resource, scope and log attributes
- to store as labels or structured metadata or drop them altogether
- for a single tenants.
+ OTLP to configure which resource, scope and log attributes are stored as stream labels or structured metadata.
+
+ Tenancy modes can provide a default OTLP configuration, when no custom OTLP configuration is set or even
+ enforce the use of some required attributes.
+
+ The per-tenant configuration for OTLP attributes will be merged with the global configuration.
properties:
- logAttributes:
- description: |-
- LogAttributes contains the configuration for log attributes
- to store them as structured metadata or drop them altogether.
- items:
- description: |-
- OTLPAttributesSpec contains the configuration for a set of attributes
- to store them as index labels or structured metadata or drop them altogether.
- properties:
- action:
- description: |-
- Action defines the indexing action for the selected attributes. They
- can be either added to structured metadata or drop altogether.
- enum:
- - structured_metadata
- - drop
- type: string
- attributes:
- description: Attributes allows choosing the attributes
- by listing their names.
- items:
- type: string
- type: array
- regex:
- description: Regex allows choosing the attributes
- by matching a regular expression.
- type: string
- required:
- - action
- type: object
- type: array
- resourceAttributes:
- description: |-
- ResourceAttributes contains the configuration for resource attributes
- to store them as index labels or structured metadata or drop them altogether.
+ streamLabels:
+ description: StreamLabels configures which resource
+ attributes are converted to Loki stream labels.
properties:
- attributes:
- description: |-
- Attributes contains the configuration for resource attributes
- to store them as index labels or structured metadata or drop them altogether.
+ resourceAttributes:
+ description: ResourceAttributes lists the names
+ of the resource attributes that should be converted
+ into Loki stream labels.
items:
- description: |-
- OTLPResourceAttributesConfigSpec contains the configuration for a set of resource attributes
- to store them as index labels or structured metadata or drop them altogether.
properties:
- action:
- description: |-
- Action defines the indexing action for the selected resoure attributes. They
- can be either indexed as labels, added to structured metadata or drop altogether.
- enum:
- - index_label
- - structured_metadata
- - drop
+ name:
+ description: Name contains either a verbatim
+ name of an attribute or a regular expression
+ matching many attributes.
type: string
- attributes:
- description: |-
- Attributes is the list of attributes to configure indexing or drop them
- altogether.
- items:
- type: string
- type: array
regex:
- description: Regex allows choosing the attributes
- by matching a regular expression.
+ description: If Regex is true, then Name is
+ treated as a regular expression instead
+ of as a verbatim attribute name.
+ type: boolean
+ required:
+ - name
+ type: object
+ type: array
+ type: object
+ structuredMetadata:
+ description: StructuredMetadata configures which attributes
+ are saved in structured metadata.
+ properties:
+ logAttributes:
+ description: LogAttributes lists the names of log
+ attributes that should be included in structured
+ metadata.
+ items:
+ properties:
+ name:
+ description: Name contains either a verbatim
+ name of an attribute or a regular expression
+ matching many attributes.
type: string
+ regex:
+ description: If Regex is true, then Name is
+ treated as a regular expression instead
+ of as a verbatim attribute name.
+ type: boolean
required:
- - action
+ - name
+ type: object
+ type: array
+ resourceAttributes:
+ description: ResourceAttributes lists the names
+ of resource attributes that should be included
+ in structured metadata.
+ items:
+ properties:
+ name:
+ description: Name contains either a verbatim
+ name of an attribute or a regular expression
+ matching many attributes.
+ type: string
+ regex:
+ description: If Regex is true, then Name is
+ treated as a regular expression instead
+ of as a verbatim attribute name.
+ type: boolean
+ required:
+ - name
+ type: object
+ type: array
+ scopeAttributes:
+ description: ScopeAttributes lists the names of
+ scope attributes that should be included in structured
+ metadata.
+ items:
+ properties:
+ name:
+ description: Name contains either a verbatim
+ name of an attribute or a regular expression
+ matching many attributes.
+ type: string
+ regex:
+ description: If Regex is true, then Name is
+ treated as a regular expression instead
+ of as a verbatim attribute name.
+ type: boolean
+ required:
+ - name
type: object
type: array
- ignoreDefaults:
- description: |-
- IgnoreDefaults controls whether to ignore the global configuration for resource attributes
- indexed as labels.
-
- If IgnoreDefaults is true, then this spec needs to contain at least one mapping to a index label.
- type: boolean
type: object
- scopeAttributes:
- description: |-
- ScopeAttributes contains the configuration for scope attributes
- to store them as structured metadata or drop them altogether.
- items:
- description: |-
- OTLPAttributesSpec contains the configuration for a set of attributes
- to store them as index labels or structured metadata or drop them altogether.
- properties:
- action:
- description: |-
- Action defines the indexing action for the selected attributes. They
- can be either added to structured metadata or drop altogether.
- enum:
- - structured_metadata
- - drop
- type: string
- attributes:
- description: Attributes allows choosing the attributes
- by listing their names.
- items:
- type: string
- type: array
- regex:
- description: Regex allows choosing the attributes
- by matching a regular expression.
- type: string
- required:
- - action
- type: object
- type: array
type: object
queries:
description: QueryLimits defines the limit applied on querying
@@ -830,6 +791,7 @@ spec:
out sizes.
enum:
- 1x.demo
+ - 1x.pico
- 1x.extra-small
- 1x.small
- 1x.medium
@@ -3903,6 +3865,26 @@ spec:
items:
type: string
type: array
+ otlp:
+ description: OTLP contains settings for ingesting data using
+ OTLP in the OpenShift tenancy mode.
+ properties:
+ disableRecommendedAttributes:
+ description: |-
+ DisableRecommendedAttributes can be used to reduce the number of attributes used for stream labels and structured
+ metadata.
+
+ Enabling this setting removes the "recommended attributes" from the generated Loki configuration. This will cause
+ meta information to not be available as stream labels or structured metadata, potentially making queries more
+ expensive and less performant.
+
+ Note that there is a set of "required attributes", needed for OpenShift Logging to work properly. Those will be
+ added to the configuration, even if this field is set to true.
+
+ This option is supposed to be combined with a custom label configuration customizing the labels for the specific
+ usecase.
+ type: boolean
+ type: object
type: object
required:
- mode
diff --git a/operator/bundle/openshift/metadata/annotations.yaml b/operator/bundle/openshift/metadata/annotations.yaml
index 6d1990b6762a3..ecc572548242f 100644
--- a/operator/bundle/openshift/metadata/annotations.yaml
+++ b/operator/bundle/openshift/metadata/annotations.yaml
@@ -8,7 +8,7 @@ annotations:
operators.operatorframework.io.bundle.channel.default.v1: stable
operators.operatorframework.io.metrics.builder: operator-sdk-unknown
operators.operatorframework.io.metrics.mediatype.v1: metrics+v1
- operators.operatorframework.io.metrics.project_layout: go.kubebuilder.io/v3
+ operators.operatorframework.io.metrics.project_layout: go.kubebuilder.io/v4
# Annotations for testing.
operators.operatorframework.io.test.mediatype.v1: scorecard+v1
diff --git a/operator/calculator.Dockerfile b/operator/calculator.Dockerfile
index e1ab9861ed7de..96286dcde1294 100644
--- a/operator/calculator.Dockerfile
+++ b/operator/calculator.Dockerfile
@@ -1,9 +1,9 @@
# Build the calculator binary
-FROM golang:1.22.6 as builder
+FROM golang:1.22.8 as builder
WORKDIR /workspace
# Copy the Go Modules manifests
-COPY apis/ apis/
+COPY api/ api/
COPY go.mod go.mod
COPY go.sum go.sum
# cache deps before building and copying source so that we don't need to re-download as much
diff --git a/operator/cmd/loki-broker/main.go b/operator/cmd/loki-broker/main.go
index 5df903dd5e9ee..0e1c638620e71 100644
--- a/operator/cmd/loki-broker/main.go
+++ b/operator/cmd/loki-broker/main.go
@@ -12,8 +12,8 @@ import (
openshiftv1 "github.com/openshift/api/config/v1"
"sigs.k8s.io/yaml"
- configv1 "github.com/grafana/loki/operator/apis/config/v1"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ configv1 "github.com/grafana/loki/operator/api/config/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/manifests"
"github.com/grafana/loki/operator/internal/manifests/storage"
)
diff --git a/operator/main.go b/operator/cmd/loki-operator/main.go
similarity index 96%
rename from operator/main.go
rename to operator/cmd/loki-operator/main.go
index fdefa6f663a06..a80bef34aba48 100644
--- a/operator/main.go
+++ b/operator/cmd/loki-operator/main.go
@@ -17,11 +17,11 @@ import (
"sigs.k8s.io/controller-runtime/pkg/healthz"
runtimemetrics "sigs.k8s.io/controller-runtime/pkg/metrics"
- ctrlconfigv1 "github.com/grafana/loki/operator/apis/config/v1"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
- lokiv1beta1 "github.com/grafana/loki/operator/apis/loki/v1beta1"
- lokictrl "github.com/grafana/loki/operator/controllers/loki"
+ ctrlconfigv1 "github.com/grafana/loki/operator/api/config/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
+ lokiv1beta1 "github.com/grafana/loki/operator/api/loki/v1beta1"
"github.com/grafana/loki/operator/internal/config"
+ lokictrl "github.com/grafana/loki/operator/internal/controller/loki"
"github.com/grafana/loki/operator/internal/metrics"
"github.com/grafana/loki/operator/internal/operator"
"github.com/grafana/loki/operator/internal/validation"
diff --git a/operator/config/certmanager/certificate.yaml b/operator/config/certmanager/certificate.yaml
index d7c5227840ecf..c484bd7d21d77 100644
--- a/operator/config/certmanager/certificate.yaml
+++ b/operator/config/certmanager/certificate.yaml
@@ -15,10 +15,10 @@ metadata:
name: serving-cert # this name should match the one appeared in kustomizeconfig.yaml
namespace: system
spec:
- # $(SERVICE_NAME) and $(SERVICE_NAMESPACE) will be substituted by kustomize
+ # SERVICE_NAME_PLACEHOLDER and SERVICE_NAMESPACE_PLACEHOLDER will be substituted by kustomize
dnsNames:
- - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc
- - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc.cluster.local
+ - SERVICE_NAME_PLACEHOLDER.SERVICE_NAMESPACE_PLACEHOLDER.svc
+ - SERVICE_NAME_PLACEHOLDER.SERVICE_NAMESPACE_PLACEHOLDER.svc.cluster.local
issuerRef:
kind: Issuer
name: selfsigned-issuer
diff --git a/operator/config/crd/bases/loki.grafana.com_lokistacks.yaml b/operator/config/crd/bases/loki.grafana.com_lokistacks.yaml
index 6df041245f152..02dbdf73a3927 100644
--- a/operator/config/crd/bases/loki.grafana.com_lokistacks.yaml
+++ b/operator/config/crd/bases/loki.grafana.com_lokistacks.yaml
@@ -147,125 +147,101 @@ spec:
type: object
otlp:
description: |-
- OTLP to configure which resource, scope and log attributes
- to store as labels or structured metadata or drop them altogether
- for all tenants.
+ OTLP to configure which resource, scope and log attributes are stored as stream labels or structured metadata.
+
+ Tenancy modes can provide a default OTLP configuration, when no custom OTLP configuration is set or even
+ enforce the use of some required attributes.
properties:
- indexedResourceAttributes:
- description: |-
- IndexedResourceAttributes contains the global configuration for resource attributes
- to store them as index labels.
- items:
- type: string
- type: array
- logAttributes:
- description: |-
- LogAttributes contains the configuration for log attributes
- to store them as structured metadata or drop them altogether.
- items:
- description: |-
- OTLPAttributesSpec contains the configuration for a set of attributes
- to store them as index labels or structured metadata or drop them altogether.
- properties:
- action:
- description: |-
- Action defines the indexing action for the selected attributes. They
- can be either added to structured metadata or drop altogether.
- enum:
- - structured_metadata
- - drop
- type: string
- attributes:
- description: Attributes allows choosing the attributes
- by listing their names.
- items:
- type: string
- type: array
- regex:
- description: Regex allows choosing the attributes
- by matching a regular expression.
- type: string
- required:
- - action
- type: object
- type: array
- resourceAttributes:
- description: |-
- ResourceAttributes contains the configuration for resource attributes
- to store them as index labels or structured metadata or drop them altogether.
+ streamLabels:
+ description: StreamLabels configures which resource attributes
+ are converted to Loki stream labels.
properties:
- attributes:
- description: |-
- Attributes contains the configuration for resource attributes
- to store them as index labels or structured metadata or drop them altogether.
+ resourceAttributes:
+ description: ResourceAttributes lists the names of
+ the resource attributes that should be converted
+ into Loki stream labels.
items:
- description: |-
- OTLPResourceAttributesConfigSpec contains the configuration for a set of resource attributes
- to store them as index labels or structured metadata or drop them altogether.
properties:
- action:
- description: |-
- Action defines the indexing action for the selected resoure attributes. They
- can be either indexed as labels, added to structured metadata or drop altogether.
- enum:
- - index_label
- - structured_metadata
- - drop
+ name:
+ description: Name contains either a verbatim
+ name of an attribute or a regular expression
+ matching many attributes.
type: string
- attributes:
- description: |-
- Attributes is the list of attributes to configure indexing or drop them
- altogether.
- items:
- type: string
- type: array
regex:
- description: Regex allows choosing the attributes
- by matching a regular expression.
+ description: If Regex is true, then Name is
+ treated as a regular expression instead of
+ as a verbatim attribute name.
+ type: boolean
+ required:
+ - name
+ type: object
+ type: array
+ type: object
+ structuredMetadata:
+ description: StructuredMetadata configures which attributes
+ are saved in structured metadata.
+ properties:
+ logAttributes:
+ description: LogAttributes lists the names of log
+ attributes that should be included in structured
+ metadata.
+ items:
+ properties:
+ name:
+ description: Name contains either a verbatim
+ name of an attribute or a regular expression
+ matching many attributes.
type: string
+ regex:
+ description: If Regex is true, then Name is
+ treated as a regular expression instead of
+ as a verbatim attribute name.
+ type: boolean
required:
- - action
+ - name
+ type: object
+ type: array
+ resourceAttributes:
+ description: ResourceAttributes lists the names of
+ resource attributes that should be included in structured
+ metadata.
+ items:
+ properties:
+ name:
+ description: Name contains either a verbatim
+ name of an attribute or a regular expression
+ matching many attributes.
+ type: string
+ regex:
+ description: If Regex is true, then Name is
+ treated as a regular expression instead of
+ as a verbatim attribute name.
+ type: boolean
+ required:
+ - name
+ type: object
+ type: array
+ scopeAttributes:
+ description: ScopeAttributes lists the names of scope
+ attributes that should be included in structured
+ metadata.
+ items:
+ properties:
+ name:
+ description: Name contains either a verbatim
+ name of an attribute or a regular expression
+ matching many attributes.
+ type: string
+ regex:
+ description: If Regex is true, then Name is
+ treated as a regular expression instead of
+ as a verbatim attribute name.
+ type: boolean
+ required:
+ - name
type: object
type: array
- ignoreDefaults:
- description: |-
- IgnoreDefaults controls whether to ignore the global configuration for resource attributes
- indexed as labels.
-
- If IgnoreDefaults is true, then this spec needs to contain at least one mapping to a index label.
- type: boolean
type: object
- scopeAttributes:
- description: |-
- ScopeAttributes contains the configuration for scope attributes
- to store them as structured metadata or drop them altogether.
- items:
- description: |-
- OTLPAttributesSpec contains the configuration for a set of attributes
- to store them as index labels or structured metadata or drop them altogether.
- properties:
- action:
- description: |-
- Action defines the indexing action for the selected attributes. They
- can be either added to structured metadata or drop altogether.
- enum:
- - structured_metadata
- - drop
- type: string
- attributes:
- description: Attributes allows choosing the attributes
- by listing their names.
- items:
- type: string
- type: array
- regex:
- description: Regex allows choosing the attributes
- by matching a regular expression.
- type: string
- required:
- - action
- type: object
- type: array
type: object
queries:
description: QueryLimits defines the limit applied on querying
@@ -345,7 +321,7 @@ spec:
type: object
tenants:
additionalProperties:
- description: LimitsTemplateSpec defines the limits applied
+ description: PerTenantLimitsTemplateSpec defines the limits applied
at ingestion or query path.
properties:
ingestion:
@@ -412,118 +388,103 @@ spec:
type: object
otlp:
description: |-
- OTLP to configure which resource, scope and log attributes
- to store as labels or structured metadata or drop them altogether
- for a single tenants.
+ OTLP to configure which resource, scope and log attributes are stored as stream labels or structured metadata.
+
+ Tenancy modes can provide a default OTLP configuration, when no custom OTLP configuration is set or even
+ enforce the use of some required attributes.
+
+ The per-tenant configuration for OTLP attributes will be merged with the global configuration.
properties:
- logAttributes:
- description: |-
- LogAttributes contains the configuration for log attributes
- to store them as structured metadata or drop them altogether.
- items:
- description: |-
- OTLPAttributesSpec contains the configuration for a set of attributes
- to store them as index labels or structured metadata or drop them altogether.
- properties:
- action:
- description: |-
- Action defines the indexing action for the selected attributes. They
- can be either added to structured metadata or drop altogether.
- enum:
- - structured_metadata
- - drop
- type: string
- attributes:
- description: Attributes allows choosing the attributes
- by listing their names.
- items:
- type: string
- type: array
- regex:
- description: Regex allows choosing the attributes
- by matching a regular expression.
- type: string
- required:
- - action
- type: object
- type: array
- resourceAttributes:
- description: |-
- ResourceAttributes contains the configuration for resource attributes
- to store them as index labels or structured metadata or drop them altogether.
+ streamLabels:
+ description: StreamLabels configures which resource
+ attributes are converted to Loki stream labels.
properties:
- attributes:
- description: |-
- Attributes contains the configuration for resource attributes
- to store them as index labels or structured metadata or drop them altogether.
+ resourceAttributes:
+ description: ResourceAttributes lists the names
+ of the resource attributes that should be converted
+ into Loki stream labels.
items:
- description: |-
- OTLPResourceAttributesConfigSpec contains the configuration for a set of resource attributes
- to store them as index labels or structured metadata or drop them altogether.
properties:
- action:
- description: |-
- Action defines the indexing action for the selected resoure attributes. They
- can be either indexed as labels, added to structured metadata or drop altogether.
- enum:
- - index_label
- - structured_metadata
- - drop
+ name:
+ description: Name contains either a verbatim
+ name of an attribute or a regular expression
+ matching many attributes.
type: string
- attributes:
- description: |-
- Attributes is the list of attributes to configure indexing or drop them
- altogether.
- items:
- type: string
- type: array
regex:
- description: Regex allows choosing the attributes
- by matching a regular expression.
+ description: If Regex is true, then Name is
+ treated as a regular expression instead
+ of as a verbatim attribute name.
+ type: boolean
+ required:
+ - name
+ type: object
+ type: array
+ type: object
+ structuredMetadata:
+ description: StructuredMetadata configures which attributes
+ are saved in structured metadata.
+ properties:
+ logAttributes:
+ description: LogAttributes lists the names of log
+ attributes that should be included in structured
+ metadata.
+ items:
+ properties:
+ name:
+ description: Name contains either a verbatim
+ name of an attribute or a regular expression
+ matching many attributes.
type: string
+ regex:
+ description: If Regex is true, then Name is
+ treated as a regular expression instead
+ of as a verbatim attribute name.
+ type: boolean
required:
- - action
+ - name
+ type: object
+ type: array
+ resourceAttributes:
+ description: ResourceAttributes lists the names
+ of resource attributes that should be included
+ in structured metadata.
+ items:
+ properties:
+ name:
+ description: Name contains either a verbatim
+ name of an attribute or a regular expression
+ matching many attributes.
+ type: string
+ regex:
+ description: If Regex is true, then Name is
+ treated as a regular expression instead
+ of as a verbatim attribute name.
+ type: boolean
+ required:
+ - name
+ type: object
+ type: array
+ scopeAttributes:
+ description: ScopeAttributes lists the names of
+ scope attributes that should be included in structured
+ metadata.
+ items:
+ properties:
+ name:
+ description: Name contains either a verbatim
+ name of an attribute or a regular expression
+ matching many attributes.
+ type: string
+ regex:
+ description: If Regex is true, then Name is
+ treated as a regular expression instead
+ of as a verbatim attribute name.
+ type: boolean
+ required:
+ - name
type: object
type: array
- ignoreDefaults:
- description: |-
- IgnoreDefaults controls whether to ignore the global configuration for resource attributes
- indexed as labels.
-
- If IgnoreDefaults is true, then this spec needs to contain at least one mapping to a index label.
- type: boolean
type: object
- scopeAttributes:
- description: |-
- ScopeAttributes contains the configuration for scope attributes
- to store them as structured metadata or drop them altogether.
- items:
- description: |-
- OTLPAttributesSpec contains the configuration for a set of attributes
- to store them as index labels or structured metadata or drop them altogether.
- properties:
- action:
- description: |-
- Action defines the indexing action for the selected attributes. They
- can be either added to structured metadata or drop altogether.
- enum:
- - structured_metadata
- - drop
- type: string
- attributes:
- description: Attributes allows choosing the attributes
- by listing their names.
- items:
- type: string
- type: array
- regex:
- description: Regex allows choosing the attributes
- by matching a regular expression.
- type: string
- required:
- - action
- type: object
- type: array
type: object
queries:
description: QueryLimits defines the limit applied on querying
@@ -812,6 +773,7 @@ spec:
out sizes.
enum:
- 1x.demo
+ - 1x.pico
- 1x.extra-small
- 1x.small
- 1x.medium
@@ -3885,6 +3847,26 @@ spec:
items:
type: string
type: array
+ otlp:
+ description: OTLP contains settings for ingesting data using
+ OTLP in the OpenShift tenancy mode.
+ properties:
+ disableRecommendedAttributes:
+ description: |-
+ DisableRecommendedAttributes can be used to reduce the number of attributes used for stream labels and structured
+ metadata.
+
+ Enabling this setting removes the "recommended attributes" from the generated Loki configuration. This will cause
+ meta information to not be available as stream labels or structured metadata, potentially making queries more
+ expensive and less performant.
+
+ Note that there is a set of "required attributes", needed for OpenShift Logging to work properly. Those will be
+ added to the configuration, even if this field is set to true.
+
+ This option is supposed to be combined with a custom label configuration customizing the labels for the specific
+ usecase.
+ type: boolean
+ type: object
type: object
required:
- mode
diff --git a/operator/config/crd/kustomization.yaml b/operator/config/crd/kustomization.yaml
index 19e6c0879325b..c2d8ce484b9ee 100644
--- a/operator/config/crd/kustomization.yaml
+++ b/operator/config/crd/kustomization.yaml
@@ -1,3 +1,6 @@
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
# This kustomization.yaml is not intended to be run by itself,
# since it depends on service name and namespace that are out of this kustomize package.
# It should be run by config/default
@@ -8,13 +11,8 @@ resources:
- bases/loki.grafana.com_rulerconfigs.yaml
# +kubebuilder:scaffold:crdkustomizeresource
-patchesStrategicMerge:
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix.
# patches here are for enabling the conversion webhook for each CRD
-- patches/webhook_in_lokistacks.yaml
-- patches/webhook_in_alertingrules.yaml
-- patches/webhook_in_recordingrules.yaml
-- patches/webhook_in_rulerconfigs.yaml
# +kubebuilder:scaffold:crdkustomizewebhookpatch
# [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix.
@@ -28,3 +26,9 @@ patchesStrategicMerge:
# the following config is for teaching kustomize how to do kustomization for CRDs.
configurations:
- kustomizeconfig.yaml
+
+patches:
+- path: patches/webhook_in_lokistacks.yaml
+- path: patches/webhook_in_alertingrules.yaml
+- path: patches/webhook_in_recordingrules.yaml
+- path: patches/webhook_in_rulerconfigs.yaml
diff --git a/operator/config/docs/config.json b/operator/config/docs/config.json
index fb1b7d8a11b2b..c646667f25840 100644
--- a/operator/config/docs/config.json
+++ b/operator/config/docs/config.json
@@ -30,15 +30,15 @@
"docsURLTemplate": "https://pkg.go.dev/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1#JSON"
},
{
- "typeMatchPrefix": "^github\\.com/grafana/loki/operator/apis/loki/v1",
+ "typeMatchPrefix": "^github\\.com/grafana/loki/operator/api/loki/v1",
"docsURLTemplate": "../v1/api.md#loki.grafana.com/v1.{{ .TypeIdentifier}}"
},
{
- "typeMatchPrefix": "^github\\.com/grafana/loki/operator/apis/loki/v1beta1",
+ "typeMatchPrefix": "^github\\.com/grafana/loki/operator/api/loki/v1beta1",
"docsURLTemplate": "../v1beta1/api.md#loki-grafana.com-v1beta1-{{ .TypeIdentifier}}"
},
{
- "typeMatchPrefix": "^github\\.com/grafana/loki/operator/apis/loki/config/v1",
+ "typeMatchPrefix": "^github\\.com/grafana/loki/operator/api/loki/config/v1",
"docsURLTemplate": "../v1/feature-gates.md#loki-grafana-com-v1-{{ .TypeIdentifier}}"
},
{
@@ -51,9 +51,9 @@
"k8s.io/apimachinery/pkg/apis/": "Kubernetes ",
"k8s.io/component-base/config/": "Kubernetes ",
"sigs.k8s.io/controller-runtime/pkg/config/": "K8S Controller-runtime ",
- "github.com/grafana/loki/operator/apis/loki/v1": "Loki Operator v1",
- "github.com/grafana/loki/operator/apis/loki/v1beta1": "Loki Operator v1beta1",
- "github.com/grafana/loki/operator/apis/loki/config/v1": "Feature Gates"
+ "github.com/grafana/loki/operator/api/loki/v1": "Loki Operator v1",
+ "github.com/grafana/loki/operator/api/loki/v1beta1": "Loki Operator v1beta1",
+ "github.com/grafana/loki/operator/api/loki/config/v1": "Feature Gates"
},
"markdownDisabled": false
}
diff --git a/operator/config/manifests/community-openshift/bases/loki-operator.clusterserviceversion.yaml b/operator/config/manifests/community-openshift/bases/loki-operator.clusterserviceversion.yaml
index 7e7e67ad51aab..78710b48121de 100644
--- a/operator/config/manifests/community-openshift/bases/loki-operator.clusterserviceversion.yaml
+++ b/operator/config/manifests/community-openshift/bases/loki-operator.clusterserviceversion.yaml
@@ -40,8 +40,9 @@ spec:
- description: List of groups for alerting rules.
displayName: Groups
path: groups
- - description: Interval defines the time interval between evaluation of the
- given alerting rule.
+ - description: |-
+ Interval defines the time interval between evaluation of the given
+ alerting rule.
displayName: Evaluation Interval
path: groups[0].interval
- description: Limit defines the number of alerts an alerting rule can produce.
@@ -63,14 +64,15 @@ spec:
- description: Annotations to add to each alert.
displayName: Annotations
path: groups[0].rules[0].annotations
- - description: The LogQL expression to evaluate. Every evaluation cycle this
- is evaluated at the current time, and all resultant time series become pending/firing
- alerts.
+ - description: |-
+ The LogQL expression to evaluate. Every evaluation cycle this is
+ evaluated at the current time, and all resultant time series become
+ pending/firing alerts.
displayName: LogQL Expression
path: groups[0].rules[0].expr
- - description: Alerts are considered firing once they have been returned for
- this long. Alerts which have not yet fired for long enough are considered
- pending.
+ - description: |-
+ Alerts are considered firing once they have been returned for this long.
+ Alerts which have not yet fired for long enough are considered pending.
displayName: Firing Threshold
path: groups[0].rules[0].for
- description: Labels to add to each alert.
@@ -98,8 +100,9 @@ spec:
- description: List of groups for alerting rules.
displayName: Groups
path: groups
- - description: Interval defines the time interval between evaluation of the
- given alerting rule.
+ - description: |-
+ Interval defines the time interval between evaluation of the given
+ alerting rule.
displayName: Evaluation Interval
path: groups[0].interval
- description: Limit defines the number of alerts an alerting rule can produce.
@@ -121,14 +124,15 @@ spec:
- description: Annotations to add to each alert.
displayName: Annotations
path: groups[0].rules[0].annotations
- - description: The LogQL expression to evaluate. Every evaluation cycle this
- is evaluated at the current time, and all resultant time series become pending/firing
- alerts.
+ - description: |-
+ The LogQL expression to evaluate. Every evaluation cycle this is
+ evaluated at the current time, and all resultant time series become
+ pending/firing alerts.
displayName: LogQL Expression
path: groups[0].rules[0].expr
- - description: Alerts are considered firing once they have been returned for
- this long. Alerts which have not yet fired for long enough are considered
- pending.
+ - description: |-
+ Alerts are considered firing once they have been returned for this long.
+ Alerts which have not yet fired for long enough are considered pending.
displayName: Firing Threshold
path: groups[0].rules[0].for
- description: Labels to add to each alert.
@@ -185,17 +189,21 @@ spec:
- description: MemberList configuration spec
displayName: Memberlist Config
path: hashRing.memberlist
- - description: "EnableIPv6 enables IPv6 support for the memberlist based hash
- ring. \n Currently this also forces the instanceAddrType to podIP to avoid
- local address lookup for the memberlist."
+ - description: |-
+ EnableIPv6 enables IPv6 support for the memberlist based hash ring.
+
+
+ Currently this also forces the instanceAddrType to podIP to avoid local address lookup
+ for the memberlist.
displayName: Enable IPv6
path: hashRing.memberlist.enableIPv6
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:booleanSwitch
- - description: InstanceAddrType defines the type of address to use to advertise
- to the ring. Defaults to the first address from any private network interfaces
- of the current pod. Alternatively the public pod IP can be used in case
- private networks (RFC 1918 and RFC 6598) are not available.
+ - description: |-
+ InstanceAddrType defines the type of address to use to advertise to the ring.
+ Defaults to the first address from any private network interfaces of the current pod.
+ Alternatively the public pod IP can be used in case private networks (RFC 1918 and RFC 6598)
+ are not available.
displayName: Instance Address
path: hashRing.memberlist.instanceAddrType
x-descriptors:
@@ -214,9 +222,10 @@ spec:
- description: Global defines the limits applied globally across the cluster.
displayName: Global Limits
path: limits.global
- - description: IngestionBurstSize defines the local rate-limited sample size
- per distributor replica. It should be set to the set at least to the maximum
- logs size expected in a single push request.
+ - description: |-
+ IngestionBurstSize defines the local rate-limited sample size per
+ distributor replica. It should be set to the set at least to the
+ maximum logs size expected in a single push request.
displayName: Ingestion Burst Size (in MB)
path: limits.global.ingestion.ingestionBurstSize
x-descriptors:
@@ -226,26 +235,30 @@ spec:
path: limits.global.ingestion.ingestionRate
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxGlobalStreamsPerTenant defines the maximum number of active
- streams per tenant, across the cluster.
+ - description: |-
+ MaxGlobalStreamsPerTenant defines the maximum number of active streams
+ per tenant, across the cluster.
displayName: Max Global Streams per Tenant
path: limits.global.ingestion.maxGlobalStreamsPerTenant
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxLabelNameLength defines the maximum number of characters allowed
+ - description: |-
+ MaxLabelNameLength defines the maximum number of characters allowed
for label keys in log streams.
displayName: Max Label Name Length
path: limits.global.ingestion.maxLabelNameLength
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxLabelNamesPerSeries defines the maximum number of label names
- per series in each log stream.
+ - description: |-
+ MaxLabelNamesPerSeries defines the maximum number of label names per series
+ in each log stream.
displayName: Max Labels Names per Series
path: limits.global.ingestion.maxLabelNamesPerSeries
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxLabelValueLength defines the maximum number of characters
- allowed for label values in log streams.
+ - description: |-
+ MaxLabelValueLength defines the maximum number of characters allowed
+ for label values in log streams.
displayName: Max Label Value Length
path: limits.global.ingestion.maxLabelValueLength
x-descriptors:
@@ -256,8 +269,9 @@ spec:
path: limits.global.ingestion.maxLineSize
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: PerStreamDesiredRate defines the desired ingestion rate per second
- that LokiStack should target applying automatic stream sharding. Units MB.
+ - description: |-
+ PerStreamDesiredRate defines the desired ingestion rate per second that LokiStack should
+ target applying automatic stream sharding. Units MB.
displayName: Per Stream Desired Rate (in MB)
path: limits.global.ingestion.perStreamDesiredRate
x-descriptors:
@@ -274,84 +288,84 @@ spec:
path: limits.global.ingestion.perStreamRateLimitBurst
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: IndexedResourceAttributes contains the global configuration for
- resource attributes to store them as index labels.
- displayName: Indexed Resource Attributes
- path: limits.global.otlp.indexedResourceAttributes
- - description: LogAttributes contains the configuration for log attributes to
- store them as structured metadata or drop them altogether.
+ - description: StreamLabels configures which resource attributes are converted
+ to Loki stream labels.
+ displayName: Stream Labels
+ path: limits.global.otlp.streamLabels
+ - description: ResourceAttributes lists the names of the resource attributes
+ that should be converted into Loki stream labels.
+ displayName: Resource Attributes
+ path: limits.global.otlp.streamLabels.resourceAttributes
+ - description: Name contains either a verbatim name of an attribute or a regular
+ expression matching many attributes.
+ displayName: Name
+ path: limits.global.otlp.streamLabels.resourceAttributes[0].name
+ - description: If Regex is true, then Name is treated as a regular expression
+ instead of as a verbatim attribute name.
+ displayName: Treat name as regular expression
+ path: limits.global.otlp.streamLabels.resourceAttributes[0].regex
+ - description: StructuredMetadata configures which attributes are saved in structured
+ metadata.
+ displayName: Structured Metadata
+ path: limits.global.otlp.structuredMetadata
+ - description: LogAttributes lists the names of log attributes that should be
+ included in structured metadata.
displayName: Log Attributes
- path: limits.global.otlp.logAttributes
- - description: Action defines the indexing action for the selected attributes.
- They can be either added to structured metadata or drop altogether.
- displayName: Action
- path: limits.global.otlp.logAttributes[0].action
- - description: Attributes allows choosing the attributes by listing their names.
- displayName: Attribute Names
- path: limits.global.otlp.logAttributes[0].attributes
- - description: Regex allows choosing the attributes by matching a regular expression.
- displayName: Regular Expression
- path: limits.global.otlp.logAttributes[0].regex
- - description: ResourceAttributes contains the configuration for resource attributes
- to store them as index labels or structured metadata or drop them altogether.
+ path: limits.global.otlp.structuredMetadata.logAttributes
+ - description: Name contains either a verbatim name of an attribute or a regular
+ expression matching many attributes.
+ displayName: Name
+ path: limits.global.otlp.structuredMetadata.logAttributes[0].name
+ - description: If Regex is true, then Name is treated as a regular expression
+ instead of as a verbatim attribute name.
+ displayName: Treat name as regular expression
+ path: limits.global.otlp.structuredMetadata.logAttributes[0].regex
+ - description: ResourceAttributes lists the names of resource attributes that
+ should be included in structured metadata.
displayName: Resource Attributes
- path: limits.global.otlp.resourceAttributes
- - description: Attributes contains the configuration for resource attributes
- to store them as index labels or structured metadata or drop them altogether.
- displayName: Attributes
- path: limits.global.otlp.resourceAttributes.attributes
- - description: Action defines the indexing action for the selected resoure attributes.
- They can be either indexed as labels, added to structured metadata or drop
- altogether.
- displayName: Action
- path: limits.global.otlp.resourceAttributes.attributes[0].action
- - description: Attributes is the list of attributes to configure indexing or
- drop them altogether.
- displayName: Attribute Names
- path: limits.global.otlp.resourceAttributes.attributes[0].attributes
- - description: Regex allows choosing the attributes by matching a regular expression.
- displayName: Regular Expression
- path: limits.global.otlp.resourceAttributes.attributes[0].regex
- - description: "IgnoreDefaults controls whether to ignore the global configuration
- for resource attributes indexed as labels. \n If IgnoreDefaults is true,
- then this spec needs to contain at least one mapping to a index label."
- displayName: Ignore Global Defaults
- path: limits.global.otlp.resourceAttributes.ignoreDefaults
- x-descriptors:
- - urn:alm:descriptor:com.tectonic.ui:booleanSwitch
- - description: ScopeAttributes contains the configuration for scope attributes
- to store them as structured metadata or drop them altogether.
+ path: limits.global.otlp.structuredMetadata.resourceAttributes
+ - description: Name contains either a verbatim name of an attribute or a regular
+ expression matching many attributes.
+ displayName: Name
+ path: limits.global.otlp.structuredMetadata.resourceAttributes[0].name
+ - description: If Regex is true, then Name is treated as a regular expression
+ instead of as a verbatim attribute name.
+ displayName: Treat name as regular expression
+ path: limits.global.otlp.structuredMetadata.resourceAttributes[0].regex
+ - description: ScopeAttributes lists the names of scope attributes that should
+ be included in structured metadata.
displayName: Scope Attributes
- path: limits.global.otlp.scopeAttributes
- - description: Action defines the indexing action for the selected attributes.
- They can be either added to structured metadata or drop altogether.
- displayName: Action
- path: limits.global.otlp.scopeAttributes[0].action
- - description: Attributes allows choosing the attributes by listing their names.
- displayName: Attribute Names
- path: limits.global.otlp.scopeAttributes[0].attributes
- - description: Regex allows choosing the attributes by matching a regular expression.
- displayName: Regular Expression
- path: limits.global.otlp.scopeAttributes[0].regex
+ path: limits.global.otlp.structuredMetadata.scopeAttributes
+ - description: Name contains either a verbatim name of an attribute or a regular
+ expression matching many attributes.
+ displayName: Name
+ path: limits.global.otlp.structuredMetadata.scopeAttributes[0].name
+ - description: If Regex is true, then Name is treated as a regular expression
+ instead of as a verbatim attribute name.
+ displayName: Treat name as regular expression
+ path: limits.global.otlp.structuredMetadata.scopeAttributes[0].regex
- description: CardinalityLimit defines the cardinality limit for index queries.
displayName: Cardinality Limit
path: limits.global.queries.cardinalityLimit
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxChunksPerQuery defines the maximum number of chunks that can
- be fetched by a single query.
+ - description: |-
+ MaxChunksPerQuery defines the maximum number of chunks
+ that can be fetched by a single query.
displayName: Max Chunk per Query
path: limits.global.queries.maxChunksPerQuery
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxEntriesLimitsPerQuery defines the maximum number of log entries
+ - description: |-
+ MaxEntriesLimitsPerQuery defines the maximum number of log entries
that will be returned for a query.
displayName: Max Entries Limit per Query
path: limits.global.queries.maxEntriesLimitPerQuery
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxQuerySeries defines the maximum of unique series that is returned
- by a metric query.
+ - description: |-
+ MaxQuerySeries defines the maximum of unique series
+ that is returned by a metric query.
displayName: Max Query Series
path: limits.global.queries.maxQuerySeries
x-descriptors:
@@ -369,9 +383,10 @@ spec:
- description: Tenants defines the limits applied per tenant.
displayName: Limits per Tenant
path: limits.tenants
- - description: IngestionBurstSize defines the local rate-limited sample size
- per distributor replica. It should be set to the set at least to the maximum
- logs size expected in a single push request.
+ - description: |-
+ IngestionBurstSize defines the local rate-limited sample size per
+ distributor replica. It should be set to the set at least to the
+ maximum logs size expected in a single push request.
displayName: Ingestion Burst Size (in MB)
path: limits.tenants.ingestion.ingestionBurstSize
x-descriptors:
@@ -381,26 +396,30 @@ spec:
path: limits.tenants.ingestion.ingestionRate
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxGlobalStreamsPerTenant defines the maximum number of active
- streams per tenant, across the cluster.
+ - description: |-
+ MaxGlobalStreamsPerTenant defines the maximum number of active streams
+ per tenant, across the cluster.
displayName: Max Global Streams per Tenant
path: limits.tenants.ingestion.maxGlobalStreamsPerTenant
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxLabelNameLength defines the maximum number of characters allowed
+ - description: |-
+ MaxLabelNameLength defines the maximum number of characters allowed
for label keys in log streams.
displayName: Max Label Name Length
path: limits.tenants.ingestion.maxLabelNameLength
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxLabelNamesPerSeries defines the maximum number of label names
- per series in each log stream.
+ - description: |-
+ MaxLabelNamesPerSeries defines the maximum number of label names per series
+ in each log stream.
displayName: Max Labels Names per Series
path: limits.tenants.ingestion.maxLabelNamesPerSeries
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxLabelValueLength defines the maximum number of characters
- allowed for label values in log streams.
+ - description: |-
+ MaxLabelValueLength defines the maximum number of characters allowed
+ for label values in log streams.
displayName: Max Label Value Length
path: limits.tenants.ingestion.maxLabelValueLength
x-descriptors:
@@ -411,8 +430,9 @@ spec:
path: limits.tenants.ingestion.maxLineSize
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: PerStreamDesiredRate defines the desired ingestion rate per second
- that LokiStack should target applying automatic stream sharding. Units MB.
+ - description: |-
+ PerStreamDesiredRate defines the desired ingestion rate per second that LokiStack should
+ target applying automatic stream sharding. Units MB.
displayName: Per Stream Desired Rate (in MB)
path: limits.tenants.ingestion.perStreamDesiredRate
x-descriptors:
@@ -429,61 +449,62 @@ spec:
path: limits.tenants.ingestion.perStreamRateLimitBurst
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: LogAttributes contains the configuration for log attributes to
- store them as structured metadata or drop them altogether.
+ - description: StreamLabels configures which resource attributes are converted
+ to Loki stream labels.
+ displayName: Stream Labels
+ path: limits.tenants.otlp.streamLabels
+ - description: ResourceAttributes lists the names of the resource attributes
+ that should be converted into Loki stream labels.
+ displayName: Resource Attributes
+ path: limits.tenants.otlp.streamLabels.resourceAttributes
+ - description: Name contains either a verbatim name of an attribute or a regular
+ expression matching many attributes.
+ displayName: Name
+ path: limits.tenants.otlp.streamLabels.resourceAttributes[0].name
+ - description: If Regex is true, then Name is treated as a regular expression
+ instead of as a verbatim attribute name.
+ displayName: Treat name as regular expression
+ path: limits.tenants.otlp.streamLabels.resourceAttributes[0].regex
+ - description: StructuredMetadata configures which attributes are saved in structured
+ metadata.
+ displayName: Structured Metadata
+ path: limits.tenants.otlp.structuredMetadata
+ - description: LogAttributes lists the names of log attributes that should be
+ included in structured metadata.
displayName: Log Attributes
- path: limits.tenants.otlp.logAttributes
- - description: Action defines the indexing action for the selected attributes.
- They can be either added to structured metadata or drop altogether.
- displayName: Action
- path: limits.tenants.otlp.logAttributes[0].action
- - description: Attributes allows choosing the attributes by listing their names.
- displayName: Attribute Names
- path: limits.tenants.otlp.logAttributes[0].attributes
- - description: Regex allows choosing the attributes by matching a regular expression.
- displayName: Regular Expression
- path: limits.tenants.otlp.logAttributes[0].regex
- - description: ResourceAttributes contains the configuration for resource attributes
- to store them as index labels or structured metadata or drop them altogether.
+ path: limits.tenants.otlp.structuredMetadata.logAttributes
+ - description: Name contains either a verbatim name of an attribute or a regular
+ expression matching many attributes.
+ displayName: Name
+ path: limits.tenants.otlp.structuredMetadata.logAttributes[0].name
+ - description: If Regex is true, then Name is treated as a regular expression
+ instead of as a verbatim attribute name.
+ displayName: Treat name as regular expression
+ path: limits.tenants.otlp.structuredMetadata.logAttributes[0].regex
+ - description: ResourceAttributes lists the names of resource attributes that
+ should be included in structured metadata.
displayName: Resource Attributes
- path: limits.tenants.otlp.resourceAttributes
- - description: Attributes contains the configuration for resource attributes
- to store them as index labels or structured metadata or drop them altogether.
- displayName: Attributes
- path: limits.tenants.otlp.resourceAttributes.attributes
- - description: Action defines the indexing action for the selected resoure attributes.
- They can be either indexed as labels, added to structured metadata or drop
- altogether.
- displayName: Action
- path: limits.tenants.otlp.resourceAttributes.attributes[0].action
- - description: Attributes is the list of attributes to configure indexing or
- drop them altogether.
- displayName: Attribute Names
- path: limits.tenants.otlp.resourceAttributes.attributes[0].attributes
- - description: Regex allows choosing the attributes by matching a regular expression.
- displayName: Regular Expression
- path: limits.tenants.otlp.resourceAttributes.attributes[0].regex
- - description: "IgnoreDefaults controls whether to ignore the global configuration
- for resource attributes indexed as labels. \n If IgnoreDefaults is true,
- then this spec needs to contain at least one mapping to a index label."
- displayName: Ignore Global Defaults
- path: limits.tenants.otlp.resourceAttributes.ignoreDefaults
- x-descriptors:
- - urn:alm:descriptor:com.tectonic.ui:booleanSwitch
- - description: ScopeAttributes contains the configuration for scope attributes
- to store them as structured metadata or drop them altogether.
+ path: limits.tenants.otlp.structuredMetadata.resourceAttributes
+ - description: Name contains either a verbatim name of an attribute or a regular
+ expression matching many attributes.
+ displayName: Name
+ path: limits.tenants.otlp.structuredMetadata.resourceAttributes[0].name
+ - description: If Regex is true, then Name is treated as a regular expression
+ instead of as a verbatim attribute name.
+ displayName: Treat name as regular expression
+ path: limits.tenants.otlp.structuredMetadata.resourceAttributes[0].regex
+ - description: ScopeAttributes lists the names of scope attributes that should
+ be included in structured metadata.
displayName: Scope Attributes
- path: limits.tenants.otlp.scopeAttributes
- - description: Action defines the indexing action for the selected attributes.
- They can be either added to structured metadata or drop altogether.
- displayName: Action
- path: limits.tenants.otlp.scopeAttributes[0].action
- - description: Attributes allows choosing the attributes by listing their names.
- displayName: Attribute Names
- path: limits.tenants.otlp.scopeAttributes[0].attributes
- - description: Regex allows choosing the attributes by matching a regular expression.
- displayName: Regular Expression
- path: limits.tenants.otlp.scopeAttributes[0].regex
+ path: limits.tenants.otlp.structuredMetadata.scopeAttributes
+ - description: Name contains either a verbatim name of an attribute or a regular
+ expression matching many attributes.
+ displayName: Name
+ path: limits.tenants.otlp.structuredMetadata.scopeAttributes[0].name
+ - description: If Regex is true, then Name is treated as a regular expression
+ instead of as a verbatim attribute name.
+ displayName: Treat name as regular expression
+ path: limits.tenants.otlp.structuredMetadata.scopeAttributes[0].regex
- description: Blocked defines the list of rules to block matching queries.
displayName: Blocked
path: limits.tenants.queries.blocked
@@ -510,20 +531,23 @@ spec:
path: limits.tenants.queries.cardinalityLimit
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxChunksPerQuery defines the maximum number of chunks that can
- be fetched by a single query.
+ - description: |-
+ MaxChunksPerQuery defines the maximum number of chunks
+ that can be fetched by a single query.
displayName: Max Chunk per Query
path: limits.tenants.queries.maxChunksPerQuery
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxEntriesLimitsPerQuery defines the maximum number of log entries
+ - description: |-
+ MaxEntriesLimitsPerQuery defines the maximum number of log entries
that will be returned for a query.
displayName: Max Entries Limit per Query
path: limits.tenants.queries.maxEntriesLimitPerQuery
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxQuerySeries defines the maximum of unique series that is returned
- by a metric query.
+ - description: |-
+ MaxQuerySeries defines the maximum of unique series
+ that is returned by a metric query.
displayName: Max Query Series
path: limits.tenants.queries.maxQuerySeries
x-descriptors:
@@ -538,8 +562,9 @@ spec:
of a query request.
displayName: Query Timeout
path: limits.tenants.queries.queryTimeout
- - description: ManagementState defines if the CR should be managed by the operator
- or not. Default is managed.
+ - description: |-
+ ManagementState defines if the CR should be managed by the operator or not.
+ Default is managed.
displayName: Management State
path: managementState
x-descriptors:
@@ -566,9 +591,9 @@ spec:
path: replication.factor
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: 'Zones defines an array of ZoneSpec that the scheduler will try
- to satisfy. IMPORTANT: Make sure that the replication factor defined is
- less than or equal to the number of available zones.'
+ - description: |-
+ Zones defines an array of ZoneSpec that the scheduler will try to satisfy.
+ IMPORTANT: Make sure that the replication factor defined is less than or equal to the number of available zones.
displayName: Zones Spec
path: replication.zones
- description: MaxSkew describes the maximum degree to which Pods can be unevenly
@@ -581,9 +606,9 @@ spec:
labels.
displayName: Topology Key
path: replication.zones[0].topologyKey
- - description: 'Deprecated: Please use replication.factor instead. This field
- will be removed in future versions of this CRD. ReplicationFactor defines
- the policy for log stream replication.'
+ - description: |-
+ Deprecated: Please use replication.factor instead. This field will be removed in future versions of this CRD.
+ ReplicationFactor defines the policy for log stream replication.
displayName: Replication Factor
path: replicationFactor
x-descriptors:
@@ -598,11 +623,13 @@ spec:
path: rules.enabled
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:booleanSwitch
- - description: Namespaces to be selected for PrometheusRules discovery. If unspecified,
- only the same namespace as the LokiStack object is in is used.
+ - description: |-
+ Namespaces to be selected for PrometheusRules discovery. If unspecified, only
+ the same namespace as the LokiStack object is in is used.
displayName: Namespace Selector
path: rules.namespaceSelector
- - description: A selector to select which LokiRules to mount for loading alerting/recording
+ - description: |-
+ A selector to select which LokiRules to mount for loading alerting/recording
rules from.
displayName: Selector
path: rules.selector
@@ -642,13 +669,15 @@ spec:
- description: TLS configuration for reaching the object storage endpoint.
displayName: TLS Config
path: storage.tls
- - description: Key is the data key of a ConfigMap containing a CA certificate.
- It needs to be in the same namespace as the LokiStack custom resource. If
- empty, it defaults to "service-ca.crt".
+ - description: |-
+ Key is the data key of a ConfigMap containing a CA certificate.
+ It needs to be in the same namespace as the LokiStack custom resource.
+ If empty, it defaults to "service-ca.crt".
displayName: CA ConfigMap Key
path: storage.tls.caKey
- - description: CA is the name of a ConfigMap containing a CA certificate. It
- needs to be in the same namespace as the LokiStack custom resource.
+ - description: |-
+ CA is the name of a ConfigMap containing a CA certificate.
+ It needs to be in the same namespace as the LokiStack custom resource.
displayName: CA ConfigMap Name
path: storage.tls.caName
x-descriptors:
@@ -668,8 +697,9 @@ spec:
- description: Compactor defines the compaction component spec.
displayName: Compactor pods
path: template.compactor
- - description: PodAntiAffinity defines the pod anti affinity scheduling rules
- to schedule pods of a component.
+ - description: |-
+ PodAntiAffinity defines the pod anti affinity scheduling rules to schedule pods
+ of a component.
displayName: PodAntiAffinity
path: template.compactor.podAntiAffinity
x-descriptors:
@@ -682,8 +712,9 @@ spec:
- description: Distributor defines the distributor component spec.
displayName: Distributor pods
path: template.distributor
- - description: PodAntiAffinity defines the pod anti affinity scheduling rules
- to schedule pods of a component.
+ - description: |-
+ PodAntiAffinity defines the pod anti affinity scheduling rules to schedule pods
+ of a component.
displayName: PodAntiAffinity
path: template.distributor.podAntiAffinity
x-descriptors:
@@ -696,8 +727,9 @@ spec:
- description: Gateway defines the lokistack gateway component spec.
displayName: Gateway pods
path: template.gateway
- - description: PodAntiAffinity defines the pod anti affinity scheduling rules
- to schedule pods of a component.
+ - description: |-
+ PodAntiAffinity defines the pod anti affinity scheduling rules to schedule pods
+ of a component.
displayName: PodAntiAffinity
path: template.gateway.podAntiAffinity
x-descriptors:
@@ -710,8 +742,9 @@ spec:
- description: IndexGateway defines the index gateway component spec.
displayName: Index Gateway pods
path: template.indexGateway
- - description: PodAntiAffinity defines the pod anti affinity scheduling rules
- to schedule pods of a component.
+ - description: |-
+ PodAntiAffinity defines the pod anti affinity scheduling rules to schedule pods
+ of a component.
displayName: PodAntiAffinity
path: template.indexGateway.podAntiAffinity
x-descriptors:
@@ -724,8 +757,9 @@ spec:
- description: Ingester defines the ingester component spec.
displayName: Ingester pods
path: template.ingester
- - description: PodAntiAffinity defines the pod anti affinity scheduling rules
- to schedule pods of a component.
+ - description: |-
+ PodAntiAffinity defines the pod anti affinity scheduling rules to schedule pods
+ of a component.
displayName: PodAntiAffinity
path: template.ingester.podAntiAffinity
x-descriptors:
@@ -738,8 +772,9 @@ spec:
- description: Querier defines the querier component spec.
displayName: Querier pods
path: template.querier
- - description: PodAntiAffinity defines the pod anti affinity scheduling rules
- to schedule pods of a component.
+ - description: |-
+ PodAntiAffinity defines the pod anti affinity scheduling rules to schedule pods
+ of a component.
displayName: PodAntiAffinity
path: template.querier.podAntiAffinity
x-descriptors:
@@ -752,8 +787,9 @@ spec:
- description: QueryFrontend defines the query frontend component spec.
displayName: Query Frontend pods
path: template.queryFrontend
- - description: PodAntiAffinity defines the pod anti affinity scheduling rules
- to schedule pods of a component.
+ - description: |-
+ PodAntiAffinity defines the pod anti affinity scheduling rules to schedule pods
+ of a component.
displayName: PodAntiAffinity
path: template.queryFrontend.podAntiAffinity
x-descriptors:
@@ -766,8 +802,9 @@ spec:
- description: Ruler defines the ruler component spec.
displayName: Ruler pods
path: template.ruler
- - description: PodAntiAffinity defines the pod anti affinity scheduling rules
- to schedule pods of a component.
+ - description: |-
+ PodAntiAffinity defines the pod anti affinity scheduling rules to schedule pods
+ of a component.
displayName: PodAntiAffinity
path: template.ruler.podAntiAffinity
x-descriptors:
@@ -791,13 +828,15 @@ spec:
- description: CA defines the spec for the custom CA for tenant's authentication.
displayName: CA ConfigMap
path: tenants.authentication[0].mTLS.ca
- - description: Key is the data key of a ConfigMap containing a CA certificate.
- It needs to be in the same namespace as the LokiStack custom resource. If
- empty, it defaults to "service-ca.crt".
+ - description: |-
+ Key is the data key of a ConfigMap containing a CA certificate.
+ It needs to be in the same namespace as the LokiStack custom resource.
+ If empty, it defaults to "service-ca.crt".
displayName: CA ConfigMap Key
path: tenants.authentication[0].mTLS.ca.caKey
- - description: CA is the name of a ConfigMap containing a CA certificate. It
- needs to be in the same namespace as the LokiStack custom resource.
+ - description: |-
+ CA is the name of a ConfigMap containing a CA certificate.
+ It needs to be in the same namespace as the LokiStack custom resource.
displayName: CA ConfigMap Name
path: tenants.authentication[0].mTLS.ca.caName
x-descriptors:
@@ -808,13 +847,15 @@ spec:
- description: IssuerCA defines the spec for the issuer CA for tenant's authentication.
displayName: IssuerCA ConfigMap
path: tenants.authentication[0].oidc.issuerCA
- - description: Key is the data key of a ConfigMap containing a CA certificate.
- It needs to be in the same namespace as the LokiStack custom resource. If
- empty, it defaults to "service-ca.crt".
+ - description: |-
+ Key is the data key of a ConfigMap containing a CA certificate.
+ It needs to be in the same namespace as the LokiStack custom resource.
+ If empty, it defaults to "service-ca.crt".
displayName: CA ConfigMap Key
path: tenants.authentication[0].oidc.issuerCA.caKey
- - description: CA is the name of a ConfigMap containing a CA certificate. It
- needs to be in the same namespace as the LokiStack custom resource.
+ - description: |-
+ CA is the name of a ConfigMap containing a CA certificate.
+ It needs to be in the same namespace as the LokiStack custom resource.
displayName: CA ConfigMap Name
path: tenants.authentication[0].oidc.issuerCA.caName
x-descriptors:
@@ -870,12 +911,39 @@ spec:
- description: Openshift defines the configuration specific to Openshift modes.
displayName: Openshift
path: tenants.openshift
- - description: "AdminGroups defines a list of groups, whose members are considered
- to have admin-privileges by the Loki Operator. Setting this to an empty
- array disables admin groups. \n By default the following groups are considered
- admin-groups: - system:cluster-admins - cluster-admin - dedicated-admin"
+ - description: |-
+ AdminGroups defines a list of groups, whose members are considered to have admin-privileges by the Loki Operator.
+ Setting this to an empty array disables admin groups.
+
+
+ By default the following groups are considered admin-groups:
+ - system:cluster-admins
+ - cluster-admin
+ - dedicated-admin
displayName: Admin Groups
path: tenants.openshift.adminGroups
+ - description: OTLP contains settings for ingesting data using OTLP in the OpenShift
+ tenancy mode.
+ displayName: OpenTelemetry Protocol
+ path: tenants.openshift.otlp
+ - description: |-
+ DisableRecommendedAttributes can be used to reduce the number of attributes used for stream labels and structured
+ metadata.
+
+
+ Enabling this setting removes the "recommended attributes" from the generated Loki configuration. This will cause
+ meta information to not be available as stream labels or structured metadata, potentially making queries more
+ expensive and less performant.
+
+
+ Note that there is a set of "required attributes", needed for OpenShift Logging to work properly. Those will be
+ added to the configuration, even if this field is set to true.
+
+
+ This option is supposed to be combined with a custom label configuration customizing the labels for the specific
+ usecase.
+ displayName: Disable recommended OTLP attributes
+ path: tenants.openshift.otlp.disableRecommendedAttributes
statusDescriptors:
- description: Distributor is a map to the per pod status of the distributor
deployment
@@ -969,9 +1037,10 @@ spec:
- description: Global defines the limits applied globally across the cluster.
displayName: Global Limits
path: limits.global
- - description: IngestionBurstSize defines the local rate-limited sample size
- per distributor replica. It should be set to the set at least to the maximum
- logs size expected in a single push request.
+ - description: |-
+ IngestionBurstSize defines the local rate-limited sample size per
+ distributor replica. It should be set to the set at least to the
+ maximum logs size expected in a single push request.
displayName: Ingestion Burst Size (in MB)
path: limits.global.ingestion.ingestionBurstSize
x-descriptors:
@@ -981,26 +1050,30 @@ spec:
path: limits.global.ingestion.ingestionRate
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxGlobalStreamsPerTenant defines the maximum number of active
- streams per tenant, across the cluster.
+ - description: |-
+ MaxGlobalStreamsPerTenant defines the maximum number of active streams
+ per tenant, across the cluster.
displayName: Max Global Streams per Tenant
path: limits.global.ingestion.maxGlobalStreamsPerTenant
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxLabelNameLength defines the maximum number of characters allowed
+ - description: |-
+ MaxLabelNameLength defines the maximum number of characters allowed
for label keys in log streams.
displayName: Max Label Name Length
path: limits.global.ingestion.maxLabelNameLength
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxLabelNamesPerSeries defines the maximum number of label names
- per series in each log stream.
+ - description: |-
+ MaxLabelNamesPerSeries defines the maximum number of label names per series
+ in each log stream.
displayName: Max Labels Names per Series
path: limits.global.ingestion.maxLabelNamesPerSeries
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxLabelValueLength defines the maximum number of characters
- allowed for label values in log streams.
+ - description: |-
+ MaxLabelValueLength defines the maximum number of characters allowed
+ for label values in log streams.
displayName: Max Label Value Length
path: limits.global.ingestion.maxLabelValueLength
x-descriptors:
@@ -1011,20 +1084,23 @@ spec:
path: limits.global.ingestion.maxLineSize
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxChunksPerQuery defines the maximum number of chunks that can
- be fetched by a single query.
+ - description: |-
+ MaxChunksPerQuery defines the maximum number of chunks
+ that can be fetched by a single query.
displayName: Max Chunk per Query
path: limits.global.queries.maxChunksPerQuery
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxEntriesLimitsPerQuery defines the maximum number of log entries
+ - description: |-
+ MaxEntriesLimitsPerQuery defines the maximum number of log entries
that will be returned for a query.
displayName: Max Entries Limit per Query
path: limits.global.queries.maxEntriesLimitPerQuery
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxQuerySeries defines the maximum of unique series that is returned
- by a metric query.
+ - description: |-
+ MaxQuerySeries defines the maximum of unique series
+ that is returned by a metric query.
displayName: Max Query Series
path: limits.global.queries.maxQuerySeries
x-descriptors:
@@ -1032,9 +1108,10 @@ spec:
- description: Tenants defines the limits and overrides applied per tenant.
displayName: Limits per Tenant
path: limits.tenants
- - description: IngestionBurstSize defines the local rate-limited sample size
- per distributor replica. It should be set to the set at least to the maximum
- logs size expected in a single push request.
+ - description: |-
+ IngestionBurstSize defines the local rate-limited sample size per
+ distributor replica. It should be set to the set at least to the
+ maximum logs size expected in a single push request.
displayName: Ingestion Burst Size (in MB)
path: limits.tenants.ingestion.ingestionBurstSize
x-descriptors:
@@ -1044,26 +1121,30 @@ spec:
path: limits.tenants.ingestion.ingestionRate
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxGlobalStreamsPerTenant defines the maximum number of active
- streams per tenant, across the cluster.
+ - description: |-
+ MaxGlobalStreamsPerTenant defines the maximum number of active streams
+ per tenant, across the cluster.
displayName: Max Global Streams per Tenant
path: limits.tenants.ingestion.maxGlobalStreamsPerTenant
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxLabelNameLength defines the maximum number of characters allowed
+ - description: |-
+ MaxLabelNameLength defines the maximum number of characters allowed
for label keys in log streams.
displayName: Max Label Name Length
path: limits.tenants.ingestion.maxLabelNameLength
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxLabelNamesPerSeries defines the maximum number of label names
- per series in each log stream.
+ - description: |-
+ MaxLabelNamesPerSeries defines the maximum number of label names per series
+ in each log stream.
displayName: Max Labels Names per Series
path: limits.tenants.ingestion.maxLabelNamesPerSeries
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxLabelValueLength defines the maximum number of characters
- allowed for label values in log streams.
+ - description: |-
+ MaxLabelValueLength defines the maximum number of characters allowed
+ for label values in log streams.
displayName: Max Label Value Length
path: limits.tenants.ingestion.maxLabelValueLength
x-descriptors:
@@ -1074,26 +1155,30 @@ spec:
path: limits.tenants.ingestion.maxLineSize
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxChunksPerQuery defines the maximum number of chunks that can
- be fetched by a single query.
+ - description: |-
+ MaxChunksPerQuery defines the maximum number of chunks
+ that can be fetched by a single query.
displayName: Max Chunk per Query
path: limits.tenants.queries.maxChunksPerQuery
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxEntriesLimitsPerQuery defines the maximum number of log entries
+ - description: |-
+ MaxEntriesLimitsPerQuery defines the maximum number of log entries
that will be returned for a query.
displayName: Max Entries Limit per Query
path: limits.tenants.queries.maxEntriesLimitPerQuery
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxQuerySeries defines the maximum of unique series that is returned
- by a metric query.
+ - description: |-
+ MaxQuerySeries defines the maximum of unique series
+ that is returned by a metric query.
displayName: Max Query Series
path: limits.tenants.queries.maxQuerySeries
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: ManagementState defines if the CR should be managed by the operator
- or not. Default is managed.
+ - description: |-
+ ManagementState defines if the CR should be managed by the operator or not.
+ Default is managed.
displayName: Management State
path: managementState
x-descriptors:
@@ -1114,11 +1199,13 @@ spec:
path: rules.enabled
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:booleanSwitch
- - description: Namespaces to be selected for PrometheusRules discovery. If unspecified,
- only the same namespace as the LokiStack object is in is used.
+ - description: |-
+ Namespaces to be selected for PrometheusRules discovery. If unspecified, only
+ the same namespace as the LokiStack object is in is used.
displayName: Namespace Selector
path: rules.namespaceSelector
- - description: A selector to select which LokiRules to mount for loading alerting/recording
+ - description: |-
+ A selector to select which LokiRules to mount for loading alerting/recording
rules from.
displayName: Selector
path: rules.selector
@@ -1156,8 +1243,9 @@ spec:
- description: TLS configuration for reaching the object storage endpoint.
displayName: TLS Config
path: storage.tls
- - description: CA is the name of a ConfigMap containing a CA certificate. It
- needs to be in the same namespace as the LokiStack custom resource.
+ - description: |-
+ CA is the name of a ConfigMap containing a CA certificate.
+ It needs to be in the same namespace as the LokiStack custom resource.
displayName: CA ConfigMap Name
path: storage.tls.caName
x-descriptors:
@@ -1359,8 +1447,9 @@ spec:
- description: List of groups for recording rules.
displayName: Groups
path: groups
- - description: Interval defines the time interval between evaluation of the
- given recoding rule.
+ - description: |-
+ Interval defines the time interval between evaluation of the given
+ recoding rule.
displayName: Evaluation Interval
path: groups[0].interval
- description: Limit defines the number of series a recording rule can produce.
@@ -1376,9 +1465,10 @@ spec:
- description: Rules defines a list of recording rules
displayName: Rules
path: groups[0].rules
- - description: The LogQL expression to evaluate. Every evaluation cycle this
- is evaluated at the current time, and all resultant time series become pending/firing
- alerts.
+ - description: |-
+ The LogQL expression to evaluate. Every evaluation cycle this is
+ evaluated at the current time, and all resultant time series become
+ pending/firing alerts.
displayName: LogQL Expression
path: groups[0].rules[0].expr
- description: Labels to add to each recording rule.
@@ -1410,8 +1500,9 @@ spec:
- description: List of groups for recording rules.
displayName: Groups
path: groups
- - description: Interval defines the time interval between evaluation of the
- given recoding rule.
+ - description: |-
+ Interval defines the time interval between evaluation of the given
+ recoding rule.
displayName: Evaluation Interval
path: groups[0].interval
- description: Limit defines the number of series a recording rule can produce.
@@ -1427,9 +1518,10 @@ spec:
- description: Rules defines a list of recording rules
displayName: Rules
path: groups[0].rules
- - description: The LogQL expression to evaluate. Every evaluation cycle this
- is evaluated at the current time, and all resultant time series become pending/firing
- alerts.
+ - description: |-
+ The LogQL expression to evaluate. Every evaluation cycle this is
+ evaluated at the current time, and all resultant time series become
+ pending/firing alerts.
displayName: LogQL Expression
path: groups[0].rules[0].expr
- description: The name of the time series to output to. Must be a valid metric
@@ -1529,9 +1621,10 @@ spec:
path: alertmanager.enableV2
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:booleanSwitch
- - description: List of AlertManager URLs to send notifications to. Each Alertmanager
- URL is treated as a separate group in the configuration. Multiple Alertmanagers
- in HA per group can be supported by using DNS resolution (See EnableDNSDiscovery).
+ - description: |-
+ List of AlertManager URLs to send notifications to. Each Alertmanager URL is treated as
+ a separate group in the configuration. Multiple Alertmanagers in HA per group can be
+ supported by using DNS resolution (See EnableDNSDiscovery).
displayName: AlertManager Endpoints
path: alertmanager.endpoints
- description: Additional labels to add to all alerts.
@@ -1551,9 +1644,9 @@ spec:
path: alertmanager.notificationQueue.capacity
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: Minimum duration between alert and restored "for" state. This
- is maintained only for alerts with configured "for" time greater than the
- grace period.
+ - description: |-
+ Minimum duration between alert and restored "for" state. This is maintained
+ only for alerts with configured "for" time greater than the grace period.
displayName: Firing Grace Period
path: alertmanager.notificationQueue.forGracePeriod
- description: Max time to tolerate outage for restoring "for" state of alert.
@@ -1586,21 +1679,23 @@ spec:
Default is '(.*)'
displayName: Regex
path: alertmanager.relabelConfigs[0].regex
- - description: Replacement value against which a regex replace is performed
- if the regular expression matches. Regex capture groups are available. Default
- is '$1'
+ - description: |-
+ Replacement value against which a regex replace is performed if the
+ regular expression matches. Regex capture groups are available. Default is '$1'
displayName: Replacement
path: alertmanager.relabelConfigs[0].replacement
- description: Separator placed between concatenated source label values. default
is ';'.
displayName: Separator
path: alertmanager.relabelConfigs[0].separator
- - description: The source labels select values from existing labels. Their content
- is concatenated using the configured separator and matched against the configured
- regular expression for the replace, keep, and drop actions.
+ - description: |-
+ The source labels select values from existing labels. Their content is concatenated
+ using the configured separator and matched against the configured regular expression
+ for the replace, keep, and drop actions.
displayName: Source Labels
path: alertmanager.relabelConfigs[0].sourceLabels
- - description: Label to which the resulting value is written in a replace action.
+ - description: |-
+ Label to which the resulting value is written in a replace action.
It is mandatory for replace actions. Regex capture groups are available.
displayName: Target Label
path: alertmanager.relabelConfigs[0].targetLabel
@@ -1681,9 +1776,10 @@ spec:
path: overrides.alertmanager.enableV2
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:booleanSwitch
- - description: List of AlertManager URLs to send notifications to. Each Alertmanager
- URL is treated as a separate group in the configuration. Multiple Alertmanagers
- in HA per group can be supported by using DNS resolution (See EnableDNSDiscovery).
+ - description: |-
+ List of AlertManager URLs to send notifications to. Each Alertmanager URL is treated as
+ a separate group in the configuration. Multiple Alertmanagers in HA per group can be
+ supported by using DNS resolution (See EnableDNSDiscovery).
displayName: AlertManager Endpoints
path: overrides.alertmanager.endpoints
- description: Additional labels to add to all alerts.
@@ -1703,9 +1799,9 @@ spec:
path: overrides.alertmanager.notificationQueue.capacity
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: Minimum duration between alert and restored "for" state. This
- is maintained only for alerts with configured "for" time greater than the
- grace period.
+ - description: |-
+ Minimum duration between alert and restored "for" state. This is maintained
+ only for alerts with configured "for" time greater than the grace period.
displayName: Firing Grace Period
path: overrides.alertmanager.notificationQueue.forGracePeriod
- description: Max time to tolerate outage for restoring "for" state of alert.
@@ -1738,21 +1834,23 @@ spec:
Default is '(.*)'
displayName: Regex
path: overrides.alertmanager.relabelConfigs[0].regex
- - description: Replacement value against which a regex replace is performed
- if the regular expression matches. Regex capture groups are available. Default
- is '$1'
+ - description: |-
+ Replacement value against which a regex replace is performed if the
+ regular expression matches. Regex capture groups are available. Default is '$1'
displayName: Replacement
path: overrides.alertmanager.relabelConfigs[0].replacement
- description: Separator placed between concatenated source label values. default
is ';'.
displayName: Separator
path: overrides.alertmanager.relabelConfigs[0].separator
- - description: The source labels select values from existing labels. Their content
- is concatenated using the configured separator and matched against the configured
- regular expression for the replace, keep, and drop actions.
+ - description: |-
+ The source labels select values from existing labels. Their content is concatenated
+ using the configured separator and matched against the configured regular expression
+ for the replace, keep, and drop actions.
displayName: Source Labels
path: overrides.alertmanager.relabelConfigs[0].sourceLabels
- - description: Label to which the resulting value is written in a replace action.
+ - description: |-
+ Label to which the resulting value is written in a replace action.
It is mandatory for replace actions. Regex capture groups are available.
displayName: Target Label
path: overrides.alertmanager.relabelConfigs[0].targetLabel
@@ -1814,21 +1912,23 @@ spec:
Default is '(.*)'
displayName: Regex
path: remoteWrite.client.relabelConfigs[0].regex
- - description: Replacement value against which a regex replace is performed
- if the regular expression matches. Regex capture groups are available. Default
- is '$1'
+ - description: |-
+ Replacement value against which a regex replace is performed if the
+ regular expression matches. Regex capture groups are available. Default is '$1'
displayName: Replacement
path: remoteWrite.client.relabelConfigs[0].replacement
- description: Separator placed between concatenated source label values. default
is ';'.
displayName: Separator
path: remoteWrite.client.relabelConfigs[0].separator
- - description: The source labels select values from existing labels. Their content
- is concatenated using the configured separator and matched against the configured
- regular expression for the replace, keep, and drop actions.
+ - description: |-
+ The source labels select values from existing labels. Their content is concatenated
+ using the configured separator and matched against the configured regular expression
+ for the replace, keep, and drop actions.
displayName: Source Labels
path: remoteWrite.client.relabelConfigs[0].sourceLabels
- - description: Label to which the resulting value is written in a replace action.
+ - description: |-
+ Label to which the resulting value is written in a replace action.
It is mandatory for replace actions. Regex capture groups are available.
displayName: Target Label
path: remoteWrite.client.relabelConfigs[0].targetLabel
@@ -1968,9 +2068,10 @@ spec:
path: alertmanager.enableV2
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:booleanSwitch
- - description: List of AlertManager URLs to send notifications to. Each Alertmanager
- URL is treated as a separate group in the configuration. Multiple Alertmanagers
- in HA per group can be supported by using DNS resolution (See EnableDNSDiscovery).
+ - description: |-
+ List of AlertManager URLs to send notifications to. Each Alertmanager URL is treated as
+ a separate group in the configuration. Multiple Alertmanagers in HA per group can be
+ supported by using DNS resolution (See EnableDNSDiscovery).
displayName: AlertManager Endpoints
path: alertmanager.endpoints
- description: Additional labels to add to all alerts.
@@ -1990,9 +2091,9 @@ spec:
path: alertmanager.notificationQueue.capacity
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: Minimum duration between alert and restored "for" state. This
- is maintained only for alerts with configured "for" time greater than the
- grace period.
+ - description: |-
+ Minimum duration between alert and restored "for" state. This is maintained
+ only for alerts with configured "for" time greater than the grace period.
displayName: Firing Grace Period
path: alertmanager.notificationQueue.forGracePeriod
- description: Max time to tolerate outage for restoring "for" state of alert.
@@ -2025,21 +2126,23 @@ spec:
Default is '(.*)'
displayName: Regex
path: alertmanager.relabelConfigs[0].regex
- - description: Replacement value against which a regex replace is performed
- if the regular expression matches. Regex capture groups are available. Default
- is '$1'
+ - description: |-
+ Replacement value against which a regex replace is performed if the
+ regular expression matches. Regex capture groups are available. Default is '$1'
displayName: Replacement
path: alertmanager.relabelConfigs[0].replacement
- description: Separator placed between concatenated source label values. default
is ';'.
displayName: Separator
path: alertmanager.relabelConfigs[0].separator
- - description: The source labels select values from existing labels. Their content
- is concatenated using the configured separator and matched against the configured
- regular expression for the replace, keep, and drop actions.
+ - description: |-
+ The source labels select values from existing labels. Their content is concatenated
+ using the configured separator and matched against the configured regular expression
+ for the replace, keep, and drop actions.
displayName: Source Labels
path: alertmanager.relabelConfigs[0].sourceLabels
- - description: Label to which the resulting value is written in a replace action.
+ - description: |-
+ Label to which the resulting value is written in a replace action.
It is mandatory for replace actions. Regex capture groups are available.
displayName: Target Label
path: alertmanager.relabelConfigs[0].targetLabel
@@ -2117,9 +2220,10 @@ spec:
path: overrides.alertmanager.enableV2
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:booleanSwitch
- - description: List of AlertManager URLs to send notifications to. Each Alertmanager
- URL is treated as a separate group in the configuration. Multiple Alertmanagers
- in HA per group can be supported by using DNS resolution (See EnableDNSDiscovery).
+ - description: |-
+ List of AlertManager URLs to send notifications to. Each Alertmanager URL is treated as
+ a separate group in the configuration. Multiple Alertmanagers in HA per group can be
+ supported by using DNS resolution (See EnableDNSDiscovery).
displayName: AlertManager Endpoints
path: overrides.alertmanager.endpoints
- description: Additional labels to add to all alerts.
@@ -2139,9 +2243,9 @@ spec:
path: overrides.alertmanager.notificationQueue.capacity
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: Minimum duration between alert and restored "for" state. This
- is maintained only for alerts with configured "for" time greater than the
- grace period.
+ - description: |-
+ Minimum duration between alert and restored "for" state. This is maintained
+ only for alerts with configured "for" time greater than the grace period.
displayName: Firing Grace Period
path: overrides.alertmanager.notificationQueue.forGracePeriod
- description: Max time to tolerate outage for restoring "for" state of alert.
@@ -2174,21 +2278,23 @@ spec:
Default is '(.*)'
displayName: Regex
path: overrides.alertmanager.relabelConfigs[0].regex
- - description: Replacement value against which a regex replace is performed
- if the regular expression matches. Regex capture groups are available. Default
- is '$1'
+ - description: |-
+ Replacement value against which a regex replace is performed if the
+ regular expression matches. Regex capture groups are available. Default is '$1'
displayName: Replacement
path: overrides.alertmanager.relabelConfigs[0].replacement
- description: Separator placed between concatenated source label values. default
is ';'.
displayName: Separator
path: overrides.alertmanager.relabelConfigs[0].separator
- - description: The source labels select values from existing labels. Their content
- is concatenated using the configured separator and matched against the configured
- regular expression for the replace, keep, and drop actions.
+ - description: |-
+ The source labels select values from existing labels. Their content is concatenated
+ using the configured separator and matched against the configured regular expression
+ for the replace, keep, and drop actions.
displayName: Source Labels
path: overrides.alertmanager.relabelConfigs[0].sourceLabels
- - description: Label to which the resulting value is written in a replace action.
+ - description: |-
+ Label to which the resulting value is written in a replace action.
It is mandatory for replace actions. Regex capture groups are available.
displayName: Target Label
path: overrides.alertmanager.relabelConfigs[0].targetLabel
@@ -2250,21 +2356,23 @@ spec:
Default is '(.*)'
displayName: Regex
path: remoteWrite.client.relabelConfigs[0].regex
- - description: Replacement value against which a regex replace is performed
- if the regular expression matches. Regex capture groups are available. Default
- is '$1'
+ - description: |-
+ Replacement value against which a regex replace is performed if the
+ regular expression matches. Regex capture groups are available. Default is '$1'
displayName: Replacement
path: remoteWrite.client.relabelConfigs[0].replacement
- description: Separator placed between concatenated source label values. default
is ';'.
displayName: Separator
path: remoteWrite.client.relabelConfigs[0].separator
- - description: The source labels select values from existing labels. Their content
- is concatenated using the configured separator and matched against the configured
- regular expression for the replace, keep, and drop actions.
+ - description: |-
+ The source labels select values from existing labels. Their content is concatenated
+ using the configured separator and matched against the configured regular expression
+ for the replace, keep, and drop actions.
displayName: Source Labels
path: remoteWrite.client.relabelConfigs[0].sourceLabels
- - description: Label to which the resulting value is written in a replace action.
+ - description: |-
+ Label to which the resulting value is written in a replace action.
It is mandatory for replace actions. Regex capture groups are available.
displayName: Target Label
path: remoteWrite.client.relabelConfigs[0].targetLabel
diff --git a/operator/config/manifests/community/bases/loki-operator.clusterserviceversion.yaml b/operator/config/manifests/community/bases/loki-operator.clusterserviceversion.yaml
index c1ee0008faa43..77760c0c6633c 100644
--- a/operator/config/manifests/community/bases/loki-operator.clusterserviceversion.yaml
+++ b/operator/config/manifests/community/bases/loki-operator.clusterserviceversion.yaml
@@ -33,8 +33,9 @@ spec:
- description: List of groups for alerting rules.
displayName: Groups
path: groups
- - description: Interval defines the time interval between evaluation of the
- given alerting rule.
+ - description: |-
+ Interval defines the time interval between evaluation of the given
+ alerting rule.
displayName: Evaluation Interval
path: groups[0].interval
- description: Limit defines the number of alerts an alerting rule can produce.
@@ -56,14 +57,15 @@ spec:
- description: Annotations to add to each alert.
displayName: Annotations
path: groups[0].rules[0].annotations
- - description: The LogQL expression to evaluate. Every evaluation cycle this
- is evaluated at the current time, and all resultant time series become pending/firing
- alerts.
+ - description: |-
+ The LogQL expression to evaluate. Every evaluation cycle this is
+ evaluated at the current time, and all resultant time series become
+ pending/firing alerts.
displayName: LogQL Expression
path: groups[0].rules[0].expr
- - description: Alerts are considered firing once they have been returned for
- this long. Alerts which have not yet fired for long enough are considered
- pending.
+ - description: |-
+ Alerts are considered firing once they have been returned for this long.
+ Alerts which have not yet fired for long enough are considered pending.
displayName: Firing Threshold
path: groups[0].rules[0].for
- description: Labels to add to each alert.
@@ -91,8 +93,9 @@ spec:
- description: List of groups for alerting rules.
displayName: Groups
path: groups
- - description: Interval defines the time interval between evaluation of the
- given alerting rule.
+ - description: |-
+ Interval defines the time interval between evaluation of the given
+ alerting rule.
displayName: Evaluation Interval
path: groups[0].interval
- description: Limit defines the number of alerts an alerting rule can produce.
@@ -114,14 +117,15 @@ spec:
- description: Annotations to add to each alert.
displayName: Annotations
path: groups[0].rules[0].annotations
- - description: The LogQL expression to evaluate. Every evaluation cycle this
- is evaluated at the current time, and all resultant time series become pending/firing
- alerts.
+ - description: |-
+ The LogQL expression to evaluate. Every evaluation cycle this is
+ evaluated at the current time, and all resultant time series become
+ pending/firing alerts.
displayName: LogQL Expression
path: groups[0].rules[0].expr
- - description: Alerts are considered firing once they have been returned for
- this long. Alerts which have not yet fired for long enough are considered
- pending.
+ - description: |-
+ Alerts are considered firing once they have been returned for this long.
+ Alerts which have not yet fired for long enough are considered pending.
displayName: Firing Threshold
path: groups[0].rules[0].for
- description: Labels to add to each alert.
@@ -178,17 +182,21 @@ spec:
- description: MemberList configuration spec
displayName: Memberlist Config
path: hashRing.memberlist
- - description: "EnableIPv6 enables IPv6 support for the memberlist based hash
- ring. \n Currently this also forces the instanceAddrType to podIP to avoid
- local address lookup for the memberlist."
+ - description: |-
+ EnableIPv6 enables IPv6 support for the memberlist based hash ring.
+
+
+ Currently this also forces the instanceAddrType to podIP to avoid local address lookup
+ for the memberlist.
displayName: Enable IPv6
path: hashRing.memberlist.enableIPv6
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:booleanSwitch
- - description: InstanceAddrType defines the type of address to use to advertise
- to the ring. Defaults to the first address from any private network interfaces
- of the current pod. Alternatively the public pod IP can be used in case
- private networks (RFC 1918 and RFC 6598) are not available.
+ - description: |-
+ InstanceAddrType defines the type of address to use to advertise to the ring.
+ Defaults to the first address from any private network interfaces of the current pod.
+ Alternatively the public pod IP can be used in case private networks (RFC 1918 and RFC 6598)
+ are not available.
displayName: Instance Address
path: hashRing.memberlist.instanceAddrType
x-descriptors:
@@ -207,9 +215,10 @@ spec:
- description: Global defines the limits applied globally across the cluster.
displayName: Global Limits
path: limits.global
- - description: IngestionBurstSize defines the local rate-limited sample size
- per distributor replica. It should be set to the set at least to the maximum
- logs size expected in a single push request.
+ - description: |-
+ IngestionBurstSize defines the local rate-limited sample size per
+ distributor replica. It should be set to the set at least to the
+ maximum logs size expected in a single push request.
displayName: Ingestion Burst Size (in MB)
path: limits.global.ingestion.ingestionBurstSize
x-descriptors:
@@ -219,26 +228,30 @@ spec:
path: limits.global.ingestion.ingestionRate
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxGlobalStreamsPerTenant defines the maximum number of active
- streams per tenant, across the cluster.
+ - description: |-
+ MaxGlobalStreamsPerTenant defines the maximum number of active streams
+ per tenant, across the cluster.
displayName: Max Global Streams per Tenant
path: limits.global.ingestion.maxGlobalStreamsPerTenant
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxLabelNameLength defines the maximum number of characters allowed
+ - description: |-
+ MaxLabelNameLength defines the maximum number of characters allowed
for label keys in log streams.
displayName: Max Label Name Length
path: limits.global.ingestion.maxLabelNameLength
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxLabelNamesPerSeries defines the maximum number of label names
- per series in each log stream.
+ - description: |-
+ MaxLabelNamesPerSeries defines the maximum number of label names per series
+ in each log stream.
displayName: Max Labels Names per Series
path: limits.global.ingestion.maxLabelNamesPerSeries
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxLabelValueLength defines the maximum number of characters
- allowed for label values in log streams.
+ - description: |-
+ MaxLabelValueLength defines the maximum number of characters allowed
+ for label values in log streams.
displayName: Max Label Value Length
path: limits.global.ingestion.maxLabelValueLength
x-descriptors:
@@ -249,8 +262,9 @@ spec:
path: limits.global.ingestion.maxLineSize
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: PerStreamDesiredRate defines the desired ingestion rate per second
- that LokiStack should target applying automatic stream sharding. Units MB.
+ - description: |-
+ PerStreamDesiredRate defines the desired ingestion rate per second that LokiStack should
+ target applying automatic stream sharding. Units MB.
displayName: Per Stream Desired Rate (in MB)
path: limits.global.ingestion.perStreamDesiredRate
x-descriptors:
@@ -267,84 +281,84 @@ spec:
path: limits.global.ingestion.perStreamRateLimitBurst
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: IndexedResourceAttributes contains the global configuration for
- resource attributes to store them as index labels.
- displayName: Indexed Resource Attributes
- path: limits.global.otlp.indexedResourceAttributes
- - description: LogAttributes contains the configuration for log attributes to
- store them as structured metadata or drop them altogether.
+ - description: StreamLabels configures which resource attributes are converted
+ to Loki stream labels.
+ displayName: Stream Labels
+ path: limits.global.otlp.streamLabels
+ - description: ResourceAttributes lists the names of the resource attributes
+ that should be converted into Loki stream labels.
+ displayName: Resource Attributes
+ path: limits.global.otlp.streamLabels.resourceAttributes
+ - description: Name contains either a verbatim name of an attribute or a regular
+ expression matching many attributes.
+ displayName: Name
+ path: limits.global.otlp.streamLabels.resourceAttributes[0].name
+ - description: If Regex is true, then Name is treated as a regular expression
+ instead of as a verbatim attribute name.
+ displayName: Treat name as regular expression
+ path: limits.global.otlp.streamLabels.resourceAttributes[0].regex
+ - description: StructuredMetadata configures which attributes are saved in structured
+ metadata.
+ displayName: Structured Metadata
+ path: limits.global.otlp.structuredMetadata
+ - description: LogAttributes lists the names of log attributes that should be
+ included in structured metadata.
displayName: Log Attributes
- path: limits.global.otlp.logAttributes
- - description: Action defines the indexing action for the selected attributes.
- They can be either added to structured metadata or drop altogether.
- displayName: Action
- path: limits.global.otlp.logAttributes[0].action
- - description: Attributes allows choosing the attributes by listing their names.
- displayName: Attribute Names
- path: limits.global.otlp.logAttributes[0].attributes
- - description: Regex allows choosing the attributes by matching a regular expression.
- displayName: Regular Expression
- path: limits.global.otlp.logAttributes[0].regex
- - description: ResourceAttributes contains the configuration for resource attributes
- to store them as index labels or structured metadata or drop them altogether.
+ path: limits.global.otlp.structuredMetadata.logAttributes
+ - description: Name contains either a verbatim name of an attribute or a regular
+ expression matching many attributes.
+ displayName: Name
+ path: limits.global.otlp.structuredMetadata.logAttributes[0].name
+ - description: If Regex is true, then Name is treated as a regular expression
+ instead of as a verbatim attribute name.
+ displayName: Treat name as regular expression
+ path: limits.global.otlp.structuredMetadata.logAttributes[0].regex
+ - description: ResourceAttributes lists the names of resource attributes that
+ should be included in structured metadata.
displayName: Resource Attributes
- path: limits.global.otlp.resourceAttributes
- - description: Attributes contains the configuration for resource attributes
- to store them as index labels or structured metadata or drop them altogether.
- displayName: Attributes
- path: limits.global.otlp.resourceAttributes.attributes
- - description: Action defines the indexing action for the selected resoure attributes.
- They can be either indexed as labels, added to structured metadata or drop
- altogether.
- displayName: Action
- path: limits.global.otlp.resourceAttributes.attributes[0].action
- - description: Attributes is the list of attributes to configure indexing or
- drop them altogether.
- displayName: Attribute Names
- path: limits.global.otlp.resourceAttributes.attributes[0].attributes
- - description: Regex allows choosing the attributes by matching a regular expression.
- displayName: Regular Expression
- path: limits.global.otlp.resourceAttributes.attributes[0].regex
- - description: "IgnoreDefaults controls whether to ignore the global configuration
- for resource attributes indexed as labels. \n If IgnoreDefaults is true,
- then this spec needs to contain at least one mapping to a index label."
- displayName: Ignore Global Defaults
- path: limits.global.otlp.resourceAttributes.ignoreDefaults
- x-descriptors:
- - urn:alm:descriptor:com.tectonic.ui:booleanSwitch
- - description: ScopeAttributes contains the configuration for scope attributes
- to store them as structured metadata or drop them altogether.
+ path: limits.global.otlp.structuredMetadata.resourceAttributes
+ - description: Name contains either a verbatim name of an attribute or a regular
+ expression matching many attributes.
+ displayName: Name
+ path: limits.global.otlp.structuredMetadata.resourceAttributes[0].name
+ - description: If Regex is true, then Name is treated as a regular expression
+ instead of as a verbatim attribute name.
+ displayName: Treat name as regular expression
+ path: limits.global.otlp.structuredMetadata.resourceAttributes[0].regex
+ - description: ScopeAttributes lists the names of scope attributes that should
+ be included in structured metadata.
displayName: Scope Attributes
- path: limits.global.otlp.scopeAttributes
- - description: Action defines the indexing action for the selected attributes.
- They can be either added to structured metadata or drop altogether.
- displayName: Action
- path: limits.global.otlp.scopeAttributes[0].action
- - description: Attributes allows choosing the attributes by listing their names.
- displayName: Attribute Names
- path: limits.global.otlp.scopeAttributes[0].attributes
- - description: Regex allows choosing the attributes by matching a regular expression.
- displayName: Regular Expression
- path: limits.global.otlp.scopeAttributes[0].regex
+ path: limits.global.otlp.structuredMetadata.scopeAttributes
+ - description: Name contains either a verbatim name of an attribute or a regular
+ expression matching many attributes.
+ displayName: Name
+ path: limits.global.otlp.structuredMetadata.scopeAttributes[0].name
+ - description: If Regex is true, then Name is treated as a regular expression
+ instead of as a verbatim attribute name.
+ displayName: Treat name as regular expression
+ path: limits.global.otlp.structuredMetadata.scopeAttributes[0].regex
- description: CardinalityLimit defines the cardinality limit for index queries.
displayName: Cardinality Limit
path: limits.global.queries.cardinalityLimit
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxChunksPerQuery defines the maximum number of chunks that can
- be fetched by a single query.
+ - description: |-
+ MaxChunksPerQuery defines the maximum number of chunks
+ that can be fetched by a single query.
displayName: Max Chunk per Query
path: limits.global.queries.maxChunksPerQuery
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxEntriesLimitsPerQuery defines the maximum number of log entries
+ - description: |-
+ MaxEntriesLimitsPerQuery defines the maximum number of log entries
that will be returned for a query.
displayName: Max Entries Limit per Query
path: limits.global.queries.maxEntriesLimitPerQuery
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxQuerySeries defines the maximum of unique series that is returned
- by a metric query.
+ - description: |-
+ MaxQuerySeries defines the maximum of unique series
+ that is returned by a metric query.
displayName: Max Query Series
path: limits.global.queries.maxQuerySeries
x-descriptors:
@@ -362,9 +376,10 @@ spec:
- description: Tenants defines the limits applied per tenant.
displayName: Limits per Tenant
path: limits.tenants
- - description: IngestionBurstSize defines the local rate-limited sample size
- per distributor replica. It should be set to the set at least to the maximum
- logs size expected in a single push request.
+ - description: |-
+ IngestionBurstSize defines the local rate-limited sample size per
+ distributor replica. It should be set to the set at least to the
+ maximum logs size expected in a single push request.
displayName: Ingestion Burst Size (in MB)
path: limits.tenants.ingestion.ingestionBurstSize
x-descriptors:
@@ -374,26 +389,30 @@ spec:
path: limits.tenants.ingestion.ingestionRate
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxGlobalStreamsPerTenant defines the maximum number of active
- streams per tenant, across the cluster.
+ - description: |-
+ MaxGlobalStreamsPerTenant defines the maximum number of active streams
+ per tenant, across the cluster.
displayName: Max Global Streams per Tenant
path: limits.tenants.ingestion.maxGlobalStreamsPerTenant
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxLabelNameLength defines the maximum number of characters allowed
+ - description: |-
+ MaxLabelNameLength defines the maximum number of characters allowed
for label keys in log streams.
displayName: Max Label Name Length
path: limits.tenants.ingestion.maxLabelNameLength
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxLabelNamesPerSeries defines the maximum number of label names
- per series in each log stream.
+ - description: |-
+ MaxLabelNamesPerSeries defines the maximum number of label names per series
+ in each log stream.
displayName: Max Labels Names per Series
path: limits.tenants.ingestion.maxLabelNamesPerSeries
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxLabelValueLength defines the maximum number of characters
- allowed for label values in log streams.
+ - description: |-
+ MaxLabelValueLength defines the maximum number of characters allowed
+ for label values in log streams.
displayName: Max Label Value Length
path: limits.tenants.ingestion.maxLabelValueLength
x-descriptors:
@@ -404,8 +423,9 @@ spec:
path: limits.tenants.ingestion.maxLineSize
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: PerStreamDesiredRate defines the desired ingestion rate per second
- that LokiStack should target applying automatic stream sharding. Units MB.
+ - description: |-
+ PerStreamDesiredRate defines the desired ingestion rate per second that LokiStack should
+ target applying automatic stream sharding. Units MB.
displayName: Per Stream Desired Rate (in MB)
path: limits.tenants.ingestion.perStreamDesiredRate
x-descriptors:
@@ -422,61 +442,62 @@ spec:
path: limits.tenants.ingestion.perStreamRateLimitBurst
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: LogAttributes contains the configuration for log attributes to
- store them as structured metadata or drop them altogether.
+ - description: StreamLabels configures which resource attributes are converted
+ to Loki stream labels.
+ displayName: Stream Labels
+ path: limits.tenants.otlp.streamLabels
+ - description: ResourceAttributes lists the names of the resource attributes
+ that should be converted into Loki stream labels.
+ displayName: Resource Attributes
+ path: limits.tenants.otlp.streamLabels.resourceAttributes
+ - description: Name contains either a verbatim name of an attribute or a regular
+ expression matching many attributes.
+ displayName: Name
+ path: limits.tenants.otlp.streamLabels.resourceAttributes[0].name
+ - description: If Regex is true, then Name is treated as a regular expression
+ instead of as a verbatim attribute name.
+ displayName: Treat name as regular expression
+ path: limits.tenants.otlp.streamLabels.resourceAttributes[0].regex
+ - description: StructuredMetadata configures which attributes are saved in structured
+ metadata.
+ displayName: Structured Metadata
+ path: limits.tenants.otlp.structuredMetadata
+ - description: LogAttributes lists the names of log attributes that should be
+ included in structured metadata.
displayName: Log Attributes
- path: limits.tenants.otlp.logAttributes
- - description: Action defines the indexing action for the selected attributes.
- They can be either added to structured metadata or drop altogether.
- displayName: Action
- path: limits.tenants.otlp.logAttributes[0].action
- - description: Attributes allows choosing the attributes by listing their names.
- displayName: Attribute Names
- path: limits.tenants.otlp.logAttributes[0].attributes
- - description: Regex allows choosing the attributes by matching a regular expression.
- displayName: Regular Expression
- path: limits.tenants.otlp.logAttributes[0].regex
- - description: ResourceAttributes contains the configuration for resource attributes
- to store them as index labels or structured metadata or drop them altogether.
+ path: limits.tenants.otlp.structuredMetadata.logAttributes
+ - description: Name contains either a verbatim name of an attribute or a regular
+ expression matching many attributes.
+ displayName: Name
+ path: limits.tenants.otlp.structuredMetadata.logAttributes[0].name
+ - description: If Regex is true, then Name is treated as a regular expression
+ instead of as a verbatim attribute name.
+ displayName: Treat name as regular expression
+ path: limits.tenants.otlp.structuredMetadata.logAttributes[0].regex
+ - description: ResourceAttributes lists the names of resource attributes that
+ should be included in structured metadata.
displayName: Resource Attributes
- path: limits.tenants.otlp.resourceAttributes
- - description: Attributes contains the configuration for resource attributes
- to store them as index labels or structured metadata or drop them altogether.
- displayName: Attributes
- path: limits.tenants.otlp.resourceAttributes.attributes
- - description: Action defines the indexing action for the selected resoure attributes.
- They can be either indexed as labels, added to structured metadata or drop
- altogether.
- displayName: Action
- path: limits.tenants.otlp.resourceAttributes.attributes[0].action
- - description: Attributes is the list of attributes to configure indexing or
- drop them altogether.
- displayName: Attribute Names
- path: limits.tenants.otlp.resourceAttributes.attributes[0].attributes
- - description: Regex allows choosing the attributes by matching a regular expression.
- displayName: Regular Expression
- path: limits.tenants.otlp.resourceAttributes.attributes[0].regex
- - description: "IgnoreDefaults controls whether to ignore the global configuration
- for resource attributes indexed as labels. \n If IgnoreDefaults is true,
- then this spec needs to contain at least one mapping to a index label."
- displayName: Ignore Global Defaults
- path: limits.tenants.otlp.resourceAttributes.ignoreDefaults
- x-descriptors:
- - urn:alm:descriptor:com.tectonic.ui:booleanSwitch
- - description: ScopeAttributes contains the configuration for scope attributes
- to store them as structured metadata or drop them altogether.
+ path: limits.tenants.otlp.structuredMetadata.resourceAttributes
+ - description: Name contains either a verbatim name of an attribute or a regular
+ expression matching many attributes.
+ displayName: Name
+ path: limits.tenants.otlp.structuredMetadata.resourceAttributes[0].name
+ - description: If Regex is true, then Name is treated as a regular expression
+ instead of as a verbatim attribute name.
+ displayName: Treat name as regular expression
+ path: limits.tenants.otlp.structuredMetadata.resourceAttributes[0].regex
+ - description: ScopeAttributes lists the names of scope attributes that should
+ be included in structured metadata.
displayName: Scope Attributes
- path: limits.tenants.otlp.scopeAttributes
- - description: Action defines the indexing action for the selected attributes.
- They can be either added to structured metadata or drop altogether.
- displayName: Action
- path: limits.tenants.otlp.scopeAttributes[0].action
- - description: Attributes allows choosing the attributes by listing their names.
- displayName: Attribute Names
- path: limits.tenants.otlp.scopeAttributes[0].attributes
- - description: Regex allows choosing the attributes by matching a regular expression.
- displayName: Regular Expression
- path: limits.tenants.otlp.scopeAttributes[0].regex
+ path: limits.tenants.otlp.structuredMetadata.scopeAttributes
+ - description: Name contains either a verbatim name of an attribute or a regular
+ expression matching many attributes.
+ displayName: Name
+ path: limits.tenants.otlp.structuredMetadata.scopeAttributes[0].name
+ - description: If Regex is true, then Name is treated as a regular expression
+ instead of as a verbatim attribute name.
+ displayName: Treat name as regular expression
+ path: limits.tenants.otlp.structuredMetadata.scopeAttributes[0].regex
- description: Blocked defines the list of rules to block matching queries.
displayName: Blocked
path: limits.tenants.queries.blocked
@@ -503,20 +524,23 @@ spec:
path: limits.tenants.queries.cardinalityLimit
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxChunksPerQuery defines the maximum number of chunks that can
- be fetched by a single query.
+ - description: |-
+ MaxChunksPerQuery defines the maximum number of chunks
+ that can be fetched by a single query.
displayName: Max Chunk per Query
path: limits.tenants.queries.maxChunksPerQuery
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxEntriesLimitsPerQuery defines the maximum number of log entries
+ - description: |-
+ MaxEntriesLimitsPerQuery defines the maximum number of log entries
that will be returned for a query.
displayName: Max Entries Limit per Query
path: limits.tenants.queries.maxEntriesLimitPerQuery
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxQuerySeries defines the maximum of unique series that is returned
- by a metric query.
+ - description: |-
+ MaxQuerySeries defines the maximum of unique series
+ that is returned by a metric query.
displayName: Max Query Series
path: limits.tenants.queries.maxQuerySeries
x-descriptors:
@@ -531,8 +555,9 @@ spec:
of a query request.
displayName: Query Timeout
path: limits.tenants.queries.queryTimeout
- - description: ManagementState defines if the CR should be managed by the operator
- or not. Default is managed.
+ - description: |-
+ ManagementState defines if the CR should be managed by the operator or not.
+ Default is managed.
displayName: Management State
path: managementState
x-descriptors:
@@ -559,9 +584,9 @@ spec:
path: replication.factor
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: 'Zones defines an array of ZoneSpec that the scheduler will try
- to satisfy. IMPORTANT: Make sure that the replication factor defined is
- less than or equal to the number of available zones.'
+ - description: |-
+ Zones defines an array of ZoneSpec that the scheduler will try to satisfy.
+ IMPORTANT: Make sure that the replication factor defined is less than or equal to the number of available zones.
displayName: Zones Spec
path: replication.zones
- description: MaxSkew describes the maximum degree to which Pods can be unevenly
@@ -574,9 +599,9 @@ spec:
labels.
displayName: Topology Key
path: replication.zones[0].topologyKey
- - description: 'Deprecated: Please use replication.factor instead. This field
- will be removed in future versions of this CRD. ReplicationFactor defines
- the policy for log stream replication.'
+ - description: |-
+ Deprecated: Please use replication.factor instead. This field will be removed in future versions of this CRD.
+ ReplicationFactor defines the policy for log stream replication.
displayName: Replication Factor
path: replicationFactor
x-descriptors:
@@ -591,11 +616,13 @@ spec:
path: rules.enabled
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:booleanSwitch
- - description: Namespaces to be selected for PrometheusRules discovery. If unspecified,
- only the same namespace as the LokiStack object is in is used.
+ - description: |-
+ Namespaces to be selected for PrometheusRules discovery. If unspecified, only
+ the same namespace as the LokiStack object is in is used.
displayName: Namespace Selector
path: rules.namespaceSelector
- - description: A selector to select which LokiRules to mount for loading alerting/recording
+ - description: |-
+ A selector to select which LokiRules to mount for loading alerting/recording
rules from.
displayName: Selector
path: rules.selector
@@ -635,13 +662,15 @@ spec:
- description: TLS configuration for reaching the object storage endpoint.
displayName: TLS Config
path: storage.tls
- - description: Key is the data key of a ConfigMap containing a CA certificate.
- It needs to be in the same namespace as the LokiStack custom resource. If
- empty, it defaults to "service-ca.crt".
+ - description: |-
+ Key is the data key of a ConfigMap containing a CA certificate.
+ It needs to be in the same namespace as the LokiStack custom resource.
+ If empty, it defaults to "service-ca.crt".
displayName: CA ConfigMap Key
path: storage.tls.caKey
- - description: CA is the name of a ConfigMap containing a CA certificate. It
- needs to be in the same namespace as the LokiStack custom resource.
+ - description: |-
+ CA is the name of a ConfigMap containing a CA certificate.
+ It needs to be in the same namespace as the LokiStack custom resource.
displayName: CA ConfigMap Name
path: storage.tls.caName
x-descriptors:
@@ -661,8 +690,9 @@ spec:
- description: Compactor defines the compaction component spec.
displayName: Compactor pods
path: template.compactor
- - description: PodAntiAffinity defines the pod anti affinity scheduling rules
- to schedule pods of a component.
+ - description: |-
+ PodAntiAffinity defines the pod anti affinity scheduling rules to schedule pods
+ of a component.
displayName: PodAntiAffinity
path: template.compactor.podAntiAffinity
x-descriptors:
@@ -675,8 +705,9 @@ spec:
- description: Distributor defines the distributor component spec.
displayName: Distributor pods
path: template.distributor
- - description: PodAntiAffinity defines the pod anti affinity scheduling rules
- to schedule pods of a component.
+ - description: |-
+ PodAntiAffinity defines the pod anti affinity scheduling rules to schedule pods
+ of a component.
displayName: PodAntiAffinity
path: template.distributor.podAntiAffinity
x-descriptors:
@@ -689,8 +720,9 @@ spec:
- description: Gateway defines the lokistack gateway component spec.
displayName: Gateway pods
path: template.gateway
- - description: PodAntiAffinity defines the pod anti affinity scheduling rules
- to schedule pods of a component.
+ - description: |-
+ PodAntiAffinity defines the pod anti affinity scheduling rules to schedule pods
+ of a component.
displayName: PodAntiAffinity
path: template.gateway.podAntiAffinity
x-descriptors:
@@ -703,8 +735,9 @@ spec:
- description: IndexGateway defines the index gateway component spec.
displayName: Index Gateway pods
path: template.indexGateway
- - description: PodAntiAffinity defines the pod anti affinity scheduling rules
- to schedule pods of a component.
+ - description: |-
+ PodAntiAffinity defines the pod anti affinity scheduling rules to schedule pods
+ of a component.
displayName: PodAntiAffinity
path: template.indexGateway.podAntiAffinity
x-descriptors:
@@ -717,8 +750,9 @@ spec:
- description: Ingester defines the ingester component spec.
displayName: Ingester pods
path: template.ingester
- - description: PodAntiAffinity defines the pod anti affinity scheduling rules
- to schedule pods of a component.
+ - description: |-
+ PodAntiAffinity defines the pod anti affinity scheduling rules to schedule pods
+ of a component.
displayName: PodAntiAffinity
path: template.ingester.podAntiAffinity
x-descriptors:
@@ -731,8 +765,9 @@ spec:
- description: Querier defines the querier component spec.
displayName: Querier pods
path: template.querier
- - description: PodAntiAffinity defines the pod anti affinity scheduling rules
- to schedule pods of a component.
+ - description: |-
+ PodAntiAffinity defines the pod anti affinity scheduling rules to schedule pods
+ of a component.
displayName: PodAntiAffinity
path: template.querier.podAntiAffinity
x-descriptors:
@@ -745,8 +780,9 @@ spec:
- description: QueryFrontend defines the query frontend component spec.
displayName: Query Frontend pods
path: template.queryFrontend
- - description: PodAntiAffinity defines the pod anti affinity scheduling rules
- to schedule pods of a component.
+ - description: |-
+ PodAntiAffinity defines the pod anti affinity scheduling rules to schedule pods
+ of a component.
displayName: PodAntiAffinity
path: template.queryFrontend.podAntiAffinity
x-descriptors:
@@ -759,8 +795,9 @@ spec:
- description: Ruler defines the ruler component spec.
displayName: Ruler pods
path: template.ruler
- - description: PodAntiAffinity defines the pod anti affinity scheduling rules
- to schedule pods of a component.
+ - description: |-
+ PodAntiAffinity defines the pod anti affinity scheduling rules to schedule pods
+ of a component.
displayName: PodAntiAffinity
path: template.ruler.podAntiAffinity
x-descriptors:
@@ -784,13 +821,15 @@ spec:
- description: CA defines the spec for the custom CA for tenant's authentication.
displayName: CA ConfigMap
path: tenants.authentication[0].mTLS.ca
- - description: Key is the data key of a ConfigMap containing a CA certificate.
- It needs to be in the same namespace as the LokiStack custom resource. If
- empty, it defaults to "service-ca.crt".
+ - description: |-
+ Key is the data key of a ConfigMap containing a CA certificate.
+ It needs to be in the same namespace as the LokiStack custom resource.
+ If empty, it defaults to "service-ca.crt".
displayName: CA ConfigMap Key
path: tenants.authentication[0].mTLS.ca.caKey
- - description: CA is the name of a ConfigMap containing a CA certificate. It
- needs to be in the same namespace as the LokiStack custom resource.
+ - description: |-
+ CA is the name of a ConfigMap containing a CA certificate.
+ It needs to be in the same namespace as the LokiStack custom resource.
displayName: CA ConfigMap Name
path: tenants.authentication[0].mTLS.ca.caName
x-descriptors:
@@ -801,13 +840,15 @@ spec:
- description: IssuerCA defines the spec for the issuer CA for tenant's authentication.
displayName: IssuerCA ConfigMap
path: tenants.authentication[0].oidc.issuerCA
- - description: Key is the data key of a ConfigMap containing a CA certificate.
- It needs to be in the same namespace as the LokiStack custom resource. If
- empty, it defaults to "service-ca.crt".
+ - description: |-
+ Key is the data key of a ConfigMap containing a CA certificate.
+ It needs to be in the same namespace as the LokiStack custom resource.
+ If empty, it defaults to "service-ca.crt".
displayName: CA ConfigMap Key
path: tenants.authentication[0].oidc.issuerCA.caKey
- - description: CA is the name of a ConfigMap containing a CA certificate. It
- needs to be in the same namespace as the LokiStack custom resource.
+ - description: |-
+ CA is the name of a ConfigMap containing a CA certificate.
+ It needs to be in the same namespace as the LokiStack custom resource.
displayName: CA ConfigMap Name
path: tenants.authentication[0].oidc.issuerCA.caName
x-descriptors:
@@ -863,12 +904,39 @@ spec:
- description: Openshift defines the configuration specific to Openshift modes.
displayName: Openshift
path: tenants.openshift
- - description: "AdminGroups defines a list of groups, whose members are considered
- to have admin-privileges by the Loki Operator. Setting this to an empty
- array disables admin groups. \n By default the following groups are considered
- admin-groups: - system:cluster-admins - cluster-admin - dedicated-admin"
+ - description: |-
+ AdminGroups defines a list of groups, whose members are considered to have admin-privileges by the Loki Operator.
+ Setting this to an empty array disables admin groups.
+
+
+ By default the following groups are considered admin-groups:
+ - system:cluster-admins
+ - cluster-admin
+ - dedicated-admin
displayName: Admin Groups
path: tenants.openshift.adminGroups
+ - description: OTLP contains settings for ingesting data using OTLP in the OpenShift
+ tenancy mode.
+ displayName: OpenTelemetry Protocol
+ path: tenants.openshift.otlp
+ - description: |-
+ DisableRecommendedAttributes can be used to reduce the number of attributes used for stream labels and structured
+ metadata.
+
+
+ Enabling this setting removes the "recommended attributes" from the generated Loki configuration. This will cause
+ meta information to not be available as stream labels or structured metadata, potentially making queries more
+ expensive and less performant.
+
+
+ Note that there is a set of "required attributes", needed for OpenShift Logging to work properly. Those will be
+ added to the configuration, even if this field is set to true.
+
+
+ This option is supposed to be combined with a custom label configuration customizing the labels for the specific
+ usecase.
+ displayName: Disable recommended OTLP attributes
+ path: tenants.openshift.otlp.disableRecommendedAttributes
statusDescriptors:
- description: Distributor is a map to the per pod status of the distributor
deployment
@@ -962,9 +1030,10 @@ spec:
- description: Global defines the limits applied globally across the cluster.
displayName: Global Limits
path: limits.global
- - description: IngestionBurstSize defines the local rate-limited sample size
- per distributor replica. It should be set to the set at least to the maximum
- logs size expected in a single push request.
+ - description: |-
+ IngestionBurstSize defines the local rate-limited sample size per
+ distributor replica. It should be set to the set at least to the
+ maximum logs size expected in a single push request.
displayName: Ingestion Burst Size (in MB)
path: limits.global.ingestion.ingestionBurstSize
x-descriptors:
@@ -974,26 +1043,30 @@ spec:
path: limits.global.ingestion.ingestionRate
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxGlobalStreamsPerTenant defines the maximum number of active
- streams per tenant, across the cluster.
+ - description: |-
+ MaxGlobalStreamsPerTenant defines the maximum number of active streams
+ per tenant, across the cluster.
displayName: Max Global Streams per Tenant
path: limits.global.ingestion.maxGlobalStreamsPerTenant
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxLabelNameLength defines the maximum number of characters allowed
+ - description: |-
+ MaxLabelNameLength defines the maximum number of characters allowed
for label keys in log streams.
displayName: Max Label Name Length
path: limits.global.ingestion.maxLabelNameLength
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxLabelNamesPerSeries defines the maximum number of label names
- per series in each log stream.
+ - description: |-
+ MaxLabelNamesPerSeries defines the maximum number of label names per series
+ in each log stream.
displayName: Max Labels Names per Series
path: limits.global.ingestion.maxLabelNamesPerSeries
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxLabelValueLength defines the maximum number of characters
- allowed for label values in log streams.
+ - description: |-
+ MaxLabelValueLength defines the maximum number of characters allowed
+ for label values in log streams.
displayName: Max Label Value Length
path: limits.global.ingestion.maxLabelValueLength
x-descriptors:
@@ -1004,20 +1077,23 @@ spec:
path: limits.global.ingestion.maxLineSize
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxChunksPerQuery defines the maximum number of chunks that can
- be fetched by a single query.
+ - description: |-
+ MaxChunksPerQuery defines the maximum number of chunks
+ that can be fetched by a single query.
displayName: Max Chunk per Query
path: limits.global.queries.maxChunksPerQuery
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxEntriesLimitsPerQuery defines the maximum number of log entries
+ - description: |-
+ MaxEntriesLimitsPerQuery defines the maximum number of log entries
that will be returned for a query.
displayName: Max Entries Limit per Query
path: limits.global.queries.maxEntriesLimitPerQuery
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxQuerySeries defines the maximum of unique series that is returned
- by a metric query.
+ - description: |-
+ MaxQuerySeries defines the maximum of unique series
+ that is returned by a metric query.
displayName: Max Query Series
path: limits.global.queries.maxQuerySeries
x-descriptors:
@@ -1025,9 +1101,10 @@ spec:
- description: Tenants defines the limits and overrides applied per tenant.
displayName: Limits per Tenant
path: limits.tenants
- - description: IngestionBurstSize defines the local rate-limited sample size
- per distributor replica. It should be set to the set at least to the maximum
- logs size expected in a single push request.
+ - description: |-
+ IngestionBurstSize defines the local rate-limited sample size per
+ distributor replica. It should be set to the set at least to the
+ maximum logs size expected in a single push request.
displayName: Ingestion Burst Size (in MB)
path: limits.tenants.ingestion.ingestionBurstSize
x-descriptors:
@@ -1037,26 +1114,30 @@ spec:
path: limits.tenants.ingestion.ingestionRate
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxGlobalStreamsPerTenant defines the maximum number of active
- streams per tenant, across the cluster.
+ - description: |-
+ MaxGlobalStreamsPerTenant defines the maximum number of active streams
+ per tenant, across the cluster.
displayName: Max Global Streams per Tenant
path: limits.tenants.ingestion.maxGlobalStreamsPerTenant
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxLabelNameLength defines the maximum number of characters allowed
+ - description: |-
+ MaxLabelNameLength defines the maximum number of characters allowed
for label keys in log streams.
displayName: Max Label Name Length
path: limits.tenants.ingestion.maxLabelNameLength
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxLabelNamesPerSeries defines the maximum number of label names
- per series in each log stream.
+ - description: |-
+ MaxLabelNamesPerSeries defines the maximum number of label names per series
+ in each log stream.
displayName: Max Labels Names per Series
path: limits.tenants.ingestion.maxLabelNamesPerSeries
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxLabelValueLength defines the maximum number of characters
- allowed for label values in log streams.
+ - description: |-
+ MaxLabelValueLength defines the maximum number of characters allowed
+ for label values in log streams.
displayName: Max Label Value Length
path: limits.tenants.ingestion.maxLabelValueLength
x-descriptors:
@@ -1067,26 +1148,30 @@ spec:
path: limits.tenants.ingestion.maxLineSize
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxChunksPerQuery defines the maximum number of chunks that can
- be fetched by a single query.
+ - description: |-
+ MaxChunksPerQuery defines the maximum number of chunks
+ that can be fetched by a single query.
displayName: Max Chunk per Query
path: limits.tenants.queries.maxChunksPerQuery
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxEntriesLimitsPerQuery defines the maximum number of log entries
+ - description: |-
+ MaxEntriesLimitsPerQuery defines the maximum number of log entries
that will be returned for a query.
displayName: Max Entries Limit per Query
path: limits.tenants.queries.maxEntriesLimitPerQuery
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxQuerySeries defines the maximum of unique series that is returned
- by a metric query.
+ - description: |-
+ MaxQuerySeries defines the maximum of unique series
+ that is returned by a metric query.
displayName: Max Query Series
path: limits.tenants.queries.maxQuerySeries
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: ManagementState defines if the CR should be managed by the operator
- or not. Default is managed.
+ - description: |-
+ ManagementState defines if the CR should be managed by the operator or not.
+ Default is managed.
displayName: Management State
path: managementState
x-descriptors:
@@ -1107,11 +1192,13 @@ spec:
path: rules.enabled
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:booleanSwitch
- - description: Namespaces to be selected for PrometheusRules discovery. If unspecified,
- only the same namespace as the LokiStack object is in is used.
+ - description: |-
+ Namespaces to be selected for PrometheusRules discovery. If unspecified, only
+ the same namespace as the LokiStack object is in is used.
displayName: Namespace Selector
path: rules.namespaceSelector
- - description: A selector to select which LokiRules to mount for loading alerting/recording
+ - description: |-
+ A selector to select which LokiRules to mount for loading alerting/recording
rules from.
displayName: Selector
path: rules.selector
@@ -1149,8 +1236,9 @@ spec:
- description: TLS configuration for reaching the object storage endpoint.
displayName: TLS Config
path: storage.tls
- - description: CA is the name of a ConfigMap containing a CA certificate. It
- needs to be in the same namespace as the LokiStack custom resource.
+ - description: |-
+ CA is the name of a ConfigMap containing a CA certificate.
+ It needs to be in the same namespace as the LokiStack custom resource.
displayName: CA ConfigMap Name
path: storage.tls.caName
x-descriptors:
@@ -1352,8 +1440,9 @@ spec:
- description: List of groups for recording rules.
displayName: Groups
path: groups
- - description: Interval defines the time interval between evaluation of the
- given recoding rule.
+ - description: |-
+ Interval defines the time interval between evaluation of the given
+ recoding rule.
displayName: Evaluation Interval
path: groups[0].interval
- description: Limit defines the number of series a recording rule can produce.
@@ -1369,9 +1458,10 @@ spec:
- description: Rules defines a list of recording rules
displayName: Rules
path: groups[0].rules
- - description: The LogQL expression to evaluate. Every evaluation cycle this
- is evaluated at the current time, and all resultant time series become pending/firing
- alerts.
+ - description: |-
+ The LogQL expression to evaluate. Every evaluation cycle this is
+ evaluated at the current time, and all resultant time series become
+ pending/firing alerts.
displayName: LogQL Expression
path: groups[0].rules[0].expr
- description: Labels to add to each recording rule.
@@ -1403,8 +1493,9 @@ spec:
- description: List of groups for recording rules.
displayName: Groups
path: groups
- - description: Interval defines the time interval between evaluation of the
- given recoding rule.
+ - description: |-
+ Interval defines the time interval between evaluation of the given
+ recoding rule.
displayName: Evaluation Interval
path: groups[0].interval
- description: Limit defines the number of series a recording rule can produce.
@@ -1420,9 +1511,10 @@ spec:
- description: Rules defines a list of recording rules
displayName: Rules
path: groups[0].rules
- - description: The LogQL expression to evaluate. Every evaluation cycle this
- is evaluated at the current time, and all resultant time series become pending/firing
- alerts.
+ - description: |-
+ The LogQL expression to evaluate. Every evaluation cycle this is
+ evaluated at the current time, and all resultant time series become
+ pending/firing alerts.
displayName: LogQL Expression
path: groups[0].rules[0].expr
- description: The name of the time series to output to. Must be a valid metric
@@ -1522,9 +1614,10 @@ spec:
path: alertmanager.enableV2
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:booleanSwitch
- - description: List of AlertManager URLs to send notifications to. Each Alertmanager
- URL is treated as a separate group in the configuration. Multiple Alertmanagers
- in HA per group can be supported by using DNS resolution (See EnableDNSDiscovery).
+ - description: |-
+ List of AlertManager URLs to send notifications to. Each Alertmanager URL is treated as
+ a separate group in the configuration. Multiple Alertmanagers in HA per group can be
+ supported by using DNS resolution (See EnableDNSDiscovery).
displayName: AlertManager Endpoints
path: alertmanager.endpoints
- description: Additional labels to add to all alerts.
@@ -1544,9 +1637,9 @@ spec:
path: alertmanager.notificationQueue.capacity
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: Minimum duration between alert and restored "for" state. This
- is maintained only for alerts with configured "for" time greater than the
- grace period.
+ - description: |-
+ Minimum duration between alert and restored "for" state. This is maintained
+ only for alerts with configured "for" time greater than the grace period.
displayName: Firing Grace Period
path: alertmanager.notificationQueue.forGracePeriod
- description: Max time to tolerate outage for restoring "for" state of alert.
@@ -1579,21 +1672,23 @@ spec:
Default is '(.*)'
displayName: Regex
path: alertmanager.relabelConfigs[0].regex
- - description: Replacement value against which a regex replace is performed
- if the regular expression matches. Regex capture groups are available. Default
- is '$1'
+ - description: |-
+ Replacement value against which a regex replace is performed if the
+ regular expression matches. Regex capture groups are available. Default is '$1'
displayName: Replacement
path: alertmanager.relabelConfigs[0].replacement
- description: Separator placed between concatenated source label values. default
is ';'.
displayName: Separator
path: alertmanager.relabelConfigs[0].separator
- - description: The source labels select values from existing labels. Their content
- is concatenated using the configured separator and matched against the configured
- regular expression for the replace, keep, and drop actions.
+ - description: |-
+ The source labels select values from existing labels. Their content is concatenated
+ using the configured separator and matched against the configured regular expression
+ for the replace, keep, and drop actions.
displayName: Source Labels
path: alertmanager.relabelConfigs[0].sourceLabels
- - description: Label to which the resulting value is written in a replace action.
+ - description: |-
+ Label to which the resulting value is written in a replace action.
It is mandatory for replace actions. Regex capture groups are available.
displayName: Target Label
path: alertmanager.relabelConfigs[0].targetLabel
@@ -1674,9 +1769,10 @@ spec:
path: overrides.alertmanager.enableV2
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:booleanSwitch
- - description: List of AlertManager URLs to send notifications to. Each Alertmanager
- URL is treated as a separate group in the configuration. Multiple Alertmanagers
- in HA per group can be supported by using DNS resolution (See EnableDNSDiscovery).
+ - description: |-
+ List of AlertManager URLs to send notifications to. Each Alertmanager URL is treated as
+ a separate group in the configuration. Multiple Alertmanagers in HA per group can be
+ supported by using DNS resolution (See EnableDNSDiscovery).
displayName: AlertManager Endpoints
path: overrides.alertmanager.endpoints
- description: Additional labels to add to all alerts.
@@ -1696,9 +1792,9 @@ spec:
path: overrides.alertmanager.notificationQueue.capacity
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: Minimum duration between alert and restored "for" state. This
- is maintained only for alerts with configured "for" time greater than the
- grace period.
+ - description: |-
+ Minimum duration between alert and restored "for" state. This is maintained
+ only for alerts with configured "for" time greater than the grace period.
displayName: Firing Grace Period
path: overrides.alertmanager.notificationQueue.forGracePeriod
- description: Max time to tolerate outage for restoring "for" state of alert.
@@ -1731,21 +1827,23 @@ spec:
Default is '(.*)'
displayName: Regex
path: overrides.alertmanager.relabelConfigs[0].regex
- - description: Replacement value against which a regex replace is performed
- if the regular expression matches. Regex capture groups are available. Default
- is '$1'
+ - description: |-
+ Replacement value against which a regex replace is performed if the
+ regular expression matches. Regex capture groups are available. Default is '$1'
displayName: Replacement
path: overrides.alertmanager.relabelConfigs[0].replacement
- description: Separator placed between concatenated source label values. default
is ';'.
displayName: Separator
path: overrides.alertmanager.relabelConfigs[0].separator
- - description: The source labels select values from existing labels. Their content
- is concatenated using the configured separator and matched against the configured
- regular expression for the replace, keep, and drop actions.
+ - description: |-
+ The source labels select values from existing labels. Their content is concatenated
+ using the configured separator and matched against the configured regular expression
+ for the replace, keep, and drop actions.
displayName: Source Labels
path: overrides.alertmanager.relabelConfigs[0].sourceLabels
- - description: Label to which the resulting value is written in a replace action.
+ - description: |-
+ Label to which the resulting value is written in a replace action.
It is mandatory for replace actions. Regex capture groups are available.
displayName: Target Label
path: overrides.alertmanager.relabelConfigs[0].targetLabel
@@ -1807,21 +1905,23 @@ spec:
Default is '(.*)'
displayName: Regex
path: remoteWrite.client.relabelConfigs[0].regex
- - description: Replacement value against which a regex replace is performed
- if the regular expression matches. Regex capture groups are available. Default
- is '$1'
+ - description: |-
+ Replacement value against which a regex replace is performed if the
+ regular expression matches. Regex capture groups are available. Default is '$1'
displayName: Replacement
path: remoteWrite.client.relabelConfigs[0].replacement
- description: Separator placed between concatenated source label values. default
is ';'.
displayName: Separator
path: remoteWrite.client.relabelConfigs[0].separator
- - description: The source labels select values from existing labels. Their content
- is concatenated using the configured separator and matched against the configured
- regular expression for the replace, keep, and drop actions.
+ - description: |-
+ The source labels select values from existing labels. Their content is concatenated
+ using the configured separator and matched against the configured regular expression
+ for the replace, keep, and drop actions.
displayName: Source Labels
path: remoteWrite.client.relabelConfigs[0].sourceLabels
- - description: Label to which the resulting value is written in a replace action.
+ - description: |-
+ Label to which the resulting value is written in a replace action.
It is mandatory for replace actions. Regex capture groups are available.
displayName: Target Label
path: remoteWrite.client.relabelConfigs[0].targetLabel
@@ -1961,9 +2061,10 @@ spec:
path: alertmanager.enableV2
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:booleanSwitch
- - description: List of AlertManager URLs to send notifications to. Each Alertmanager
- URL is treated as a separate group in the configuration. Multiple Alertmanagers
- in HA per group can be supported by using DNS resolution (See EnableDNSDiscovery).
+ - description: |-
+ List of AlertManager URLs to send notifications to. Each Alertmanager URL is treated as
+ a separate group in the configuration. Multiple Alertmanagers in HA per group can be
+ supported by using DNS resolution (See EnableDNSDiscovery).
displayName: AlertManager Endpoints
path: alertmanager.endpoints
- description: Additional labels to add to all alerts.
@@ -1983,9 +2084,9 @@ spec:
path: alertmanager.notificationQueue.capacity
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: Minimum duration between alert and restored "for" state. This
- is maintained only for alerts with configured "for" time greater than the
- grace period.
+ - description: |-
+ Minimum duration between alert and restored "for" state. This is maintained
+ only for alerts with configured "for" time greater than the grace period.
displayName: Firing Grace Period
path: alertmanager.notificationQueue.forGracePeriod
- description: Max time to tolerate outage for restoring "for" state of alert.
@@ -2018,21 +2119,23 @@ spec:
Default is '(.*)'
displayName: Regex
path: alertmanager.relabelConfigs[0].regex
- - description: Replacement value against which a regex replace is performed
- if the regular expression matches. Regex capture groups are available. Default
- is '$1'
+ - description: |-
+ Replacement value against which a regex replace is performed if the
+ regular expression matches. Regex capture groups are available. Default is '$1'
displayName: Replacement
path: alertmanager.relabelConfigs[0].replacement
- description: Separator placed between concatenated source label values. default
is ';'.
displayName: Separator
path: alertmanager.relabelConfigs[0].separator
- - description: The source labels select values from existing labels. Their content
- is concatenated using the configured separator and matched against the configured
- regular expression for the replace, keep, and drop actions.
+ - description: |-
+ The source labels select values from existing labels. Their content is concatenated
+ using the configured separator and matched against the configured regular expression
+ for the replace, keep, and drop actions.
displayName: Source Labels
path: alertmanager.relabelConfigs[0].sourceLabels
- - description: Label to which the resulting value is written in a replace action.
+ - description: |-
+ Label to which the resulting value is written in a replace action.
It is mandatory for replace actions. Regex capture groups are available.
displayName: Target Label
path: alertmanager.relabelConfigs[0].targetLabel
@@ -2110,9 +2213,10 @@ spec:
path: overrides.alertmanager.enableV2
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:booleanSwitch
- - description: List of AlertManager URLs to send notifications to. Each Alertmanager
- URL is treated as a separate group in the configuration. Multiple Alertmanagers
- in HA per group can be supported by using DNS resolution (See EnableDNSDiscovery).
+ - description: |-
+ List of AlertManager URLs to send notifications to. Each Alertmanager URL is treated as
+ a separate group in the configuration. Multiple Alertmanagers in HA per group can be
+ supported by using DNS resolution (See EnableDNSDiscovery).
displayName: AlertManager Endpoints
path: overrides.alertmanager.endpoints
- description: Additional labels to add to all alerts.
@@ -2132,9 +2236,9 @@ spec:
path: overrides.alertmanager.notificationQueue.capacity
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: Minimum duration between alert and restored "for" state. This
- is maintained only for alerts with configured "for" time greater than the
- grace period.
+ - description: |-
+ Minimum duration between alert and restored "for" state. This is maintained
+ only for alerts with configured "for" time greater than the grace period.
displayName: Firing Grace Period
path: overrides.alertmanager.notificationQueue.forGracePeriod
- description: Max time to tolerate outage for restoring "for" state of alert.
@@ -2167,21 +2271,23 @@ spec:
Default is '(.*)'
displayName: Regex
path: overrides.alertmanager.relabelConfigs[0].regex
- - description: Replacement value against which a regex replace is performed
- if the regular expression matches. Regex capture groups are available. Default
- is '$1'
+ - description: |-
+ Replacement value against which a regex replace is performed if the
+ regular expression matches. Regex capture groups are available. Default is '$1'
displayName: Replacement
path: overrides.alertmanager.relabelConfigs[0].replacement
- description: Separator placed between concatenated source label values. default
is ';'.
displayName: Separator
path: overrides.alertmanager.relabelConfigs[0].separator
- - description: The source labels select values from existing labels. Their content
- is concatenated using the configured separator and matched against the configured
- regular expression for the replace, keep, and drop actions.
+ - description: |-
+ The source labels select values from existing labels. Their content is concatenated
+ using the configured separator and matched against the configured regular expression
+ for the replace, keep, and drop actions.
displayName: Source Labels
path: overrides.alertmanager.relabelConfigs[0].sourceLabels
- - description: Label to which the resulting value is written in a replace action.
+ - description: |-
+ Label to which the resulting value is written in a replace action.
It is mandatory for replace actions. Regex capture groups are available.
displayName: Target Label
path: overrides.alertmanager.relabelConfigs[0].targetLabel
@@ -2243,21 +2349,23 @@ spec:
Default is '(.*)'
displayName: Regex
path: remoteWrite.client.relabelConfigs[0].regex
- - description: Replacement value against which a regex replace is performed
- if the regular expression matches. Regex capture groups are available. Default
- is '$1'
+ - description: |-
+ Replacement value against which a regex replace is performed if the
+ regular expression matches. Regex capture groups are available. Default is '$1'
displayName: Replacement
path: remoteWrite.client.relabelConfigs[0].replacement
- description: Separator placed between concatenated source label values. default
is ';'.
displayName: Separator
path: remoteWrite.client.relabelConfigs[0].separator
- - description: The source labels select values from existing labels. Their content
- is concatenated using the configured separator and matched against the configured
- regular expression for the replace, keep, and drop actions.
+ - description: |-
+ The source labels select values from existing labels. Their content is concatenated
+ using the configured separator and matched against the configured regular expression
+ for the replace, keep, and drop actions.
displayName: Source Labels
path: remoteWrite.client.relabelConfigs[0].sourceLabels
- - description: Label to which the resulting value is written in a replace action.
+ - description: |-
+ Label to which the resulting value is written in a replace action.
It is mandatory for replace actions. Regex capture groups are available.
displayName: Target Label
path: remoteWrite.client.relabelConfigs[0].targetLabel
diff --git a/operator/config/manifests/openshift/bases/loki-operator.clusterserviceversion.yaml b/operator/config/manifests/openshift/bases/loki-operator.clusterserviceversion.yaml
index b841ead1147aa..66bcc30f2b524 100644
--- a/operator/config/manifests/openshift/bases/loki-operator.clusterserviceversion.yaml
+++ b/operator/config/manifests/openshift/bases/loki-operator.clusterserviceversion.yaml
@@ -52,8 +52,9 @@ spec:
- description: List of groups for alerting rules.
displayName: Groups
path: groups
- - description: Interval defines the time interval between evaluation of the
- given alerting rule.
+ - description: |-
+ Interval defines the time interval between evaluation of the given
+ alerting rule.
displayName: Evaluation Interval
path: groups[0].interval
- description: Limit defines the number of alerts an alerting rule can produce.
@@ -75,14 +76,15 @@ spec:
- description: Annotations to add to each alert.
displayName: Annotations
path: groups[0].rules[0].annotations
- - description: The LogQL expression to evaluate. Every evaluation cycle this
- is evaluated at the current time, and all resultant time series become pending/firing
- alerts.
+ - description: |-
+ The LogQL expression to evaluate. Every evaluation cycle this is
+ evaluated at the current time, and all resultant time series become
+ pending/firing alerts.
displayName: LogQL Expression
path: groups[0].rules[0].expr
- - description: Alerts are considered firing once they have been returned for
- this long. Alerts which have not yet fired for long enough are considered
- pending.
+ - description: |-
+ Alerts are considered firing once they have been returned for this long.
+ Alerts which have not yet fired for long enough are considered pending.
displayName: Firing Threshold
path: groups[0].rules[0].for
- description: Labels to add to each alert.
@@ -110,8 +112,9 @@ spec:
- description: List of groups for alerting rules.
displayName: Groups
path: groups
- - description: Interval defines the time interval between evaluation of the
- given alerting rule.
+ - description: |-
+ Interval defines the time interval between evaluation of the given
+ alerting rule.
displayName: Evaluation Interval
path: groups[0].interval
- description: Limit defines the number of alerts an alerting rule can produce.
@@ -133,14 +136,15 @@ spec:
- description: Annotations to add to each alert.
displayName: Annotations
path: groups[0].rules[0].annotations
- - description: The LogQL expression to evaluate. Every evaluation cycle this
- is evaluated at the current time, and all resultant time series become pending/firing
- alerts.
+ - description: |-
+ The LogQL expression to evaluate. Every evaluation cycle this is
+ evaluated at the current time, and all resultant time series become
+ pending/firing alerts.
displayName: LogQL Expression
path: groups[0].rules[0].expr
- - description: Alerts are considered firing once they have been returned for
- this long. Alerts which have not yet fired for long enough are considered
- pending.
+ - description: |-
+ Alerts are considered firing once they have been returned for this long.
+ Alerts which have not yet fired for long enough are considered pending.
displayName: Firing Threshold
path: groups[0].rules[0].for
- description: Labels to add to each alert.
@@ -197,17 +201,21 @@ spec:
- description: MemberList configuration spec
displayName: Memberlist Config
path: hashRing.memberlist
- - description: "EnableIPv6 enables IPv6 support for the memberlist based hash
- ring. \n Currently this also forces the instanceAddrType to podIP to avoid
- local address lookup for the memberlist."
+ - description: |-
+ EnableIPv6 enables IPv6 support for the memberlist based hash ring.
+
+
+ Currently this also forces the instanceAddrType to podIP to avoid local address lookup
+ for the memberlist.
displayName: Enable IPv6
path: hashRing.memberlist.enableIPv6
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:booleanSwitch
- - description: InstanceAddrType defines the type of address to use to advertise
- to the ring. Defaults to the first address from any private network interfaces
- of the current pod. Alternatively the public pod IP can be used in case
- private networks (RFC 1918 and RFC 6598) are not available.
+ - description: |-
+ InstanceAddrType defines the type of address to use to advertise to the ring.
+ Defaults to the first address from any private network interfaces of the current pod.
+ Alternatively the public pod IP can be used in case private networks (RFC 1918 and RFC 6598)
+ are not available.
displayName: Instance Address
path: hashRing.memberlist.instanceAddrType
x-descriptors:
@@ -226,9 +234,10 @@ spec:
- description: Global defines the limits applied globally across the cluster.
displayName: Global Limits
path: limits.global
- - description: IngestionBurstSize defines the local rate-limited sample size
- per distributor replica. It should be set to the set at least to the maximum
- logs size expected in a single push request.
+ - description: |-
+ IngestionBurstSize defines the local rate-limited sample size per
+ distributor replica. It should be set to the set at least to the
+ maximum logs size expected in a single push request.
displayName: Ingestion Burst Size (in MB)
path: limits.global.ingestion.ingestionBurstSize
x-descriptors:
@@ -238,26 +247,30 @@ spec:
path: limits.global.ingestion.ingestionRate
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxGlobalStreamsPerTenant defines the maximum number of active
- streams per tenant, across the cluster.
+ - description: |-
+ MaxGlobalStreamsPerTenant defines the maximum number of active streams
+ per tenant, across the cluster.
displayName: Max Global Streams per Tenant
path: limits.global.ingestion.maxGlobalStreamsPerTenant
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxLabelNameLength defines the maximum number of characters allowed
+ - description: |-
+ MaxLabelNameLength defines the maximum number of characters allowed
for label keys in log streams.
displayName: Max Label Name Length
path: limits.global.ingestion.maxLabelNameLength
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxLabelNamesPerSeries defines the maximum number of label names
- per series in each log stream.
+ - description: |-
+ MaxLabelNamesPerSeries defines the maximum number of label names per series
+ in each log stream.
displayName: Max Labels Names per Series
path: limits.global.ingestion.maxLabelNamesPerSeries
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxLabelValueLength defines the maximum number of characters
- allowed for label values in log streams.
+ - description: |-
+ MaxLabelValueLength defines the maximum number of characters allowed
+ for label values in log streams.
displayName: Max Label Value Length
path: limits.global.ingestion.maxLabelValueLength
x-descriptors:
@@ -268,8 +281,9 @@ spec:
path: limits.global.ingestion.maxLineSize
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: PerStreamDesiredRate defines the desired ingestion rate per second
- that LokiStack should target applying automatic stream sharding. Units MB.
+ - description: |-
+ PerStreamDesiredRate defines the desired ingestion rate per second that LokiStack should
+ target applying automatic stream sharding. Units MB.
displayName: Per Stream Desired Rate (in MB)
path: limits.global.ingestion.perStreamDesiredRate
x-descriptors:
@@ -286,84 +300,84 @@ spec:
path: limits.global.ingestion.perStreamRateLimitBurst
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: IndexedResourceAttributes contains the global configuration for
- resource attributes to store them as index labels.
- displayName: Indexed Resource Attributes
- path: limits.global.otlp.indexedResourceAttributes
- - description: LogAttributes contains the configuration for log attributes to
- store them as structured metadata or drop them altogether.
+ - description: StreamLabels configures which resource attributes are converted
+ to Loki stream labels.
+ displayName: Stream Labels
+ path: limits.global.otlp.streamLabels
+ - description: ResourceAttributes lists the names of the resource attributes
+ that should be converted into Loki stream labels.
+ displayName: Resource Attributes
+ path: limits.global.otlp.streamLabels.resourceAttributes
+ - description: Name contains either a verbatim name of an attribute or a regular
+ expression matching many attributes.
+ displayName: Name
+ path: limits.global.otlp.streamLabels.resourceAttributes[0].name
+ - description: If Regex is true, then Name is treated as a regular expression
+ instead of as a verbatim attribute name.
+ displayName: Treat name as regular expression
+ path: limits.global.otlp.streamLabels.resourceAttributes[0].regex
+ - description: StructuredMetadata configures which attributes are saved in structured
+ metadata.
+ displayName: Structured Metadata
+ path: limits.global.otlp.structuredMetadata
+ - description: LogAttributes lists the names of log attributes that should be
+ included in structured metadata.
displayName: Log Attributes
- path: limits.global.otlp.logAttributes
- - description: Action defines the indexing action for the selected attributes.
- They can be either added to structured metadata or drop altogether.
- displayName: Action
- path: limits.global.otlp.logAttributes[0].action
- - description: Attributes allows choosing the attributes by listing their names.
- displayName: Attribute Names
- path: limits.global.otlp.logAttributes[0].attributes
- - description: Regex allows choosing the attributes by matching a regular expression.
- displayName: Regular Expression
- path: limits.global.otlp.logAttributes[0].regex
- - description: ResourceAttributes contains the configuration for resource attributes
- to store them as index labels or structured metadata or drop them altogether.
+ path: limits.global.otlp.structuredMetadata.logAttributes
+ - description: Name contains either a verbatim name of an attribute or a regular
+ expression matching many attributes.
+ displayName: Name
+ path: limits.global.otlp.structuredMetadata.logAttributes[0].name
+ - description: If Regex is true, then Name is treated as a regular expression
+ instead of as a verbatim attribute name.
+ displayName: Treat name as regular expression
+ path: limits.global.otlp.structuredMetadata.logAttributes[0].regex
+ - description: ResourceAttributes lists the names of resource attributes that
+ should be included in structured metadata.
displayName: Resource Attributes
- path: limits.global.otlp.resourceAttributes
- - description: Attributes contains the configuration for resource attributes
- to store them as index labels or structured metadata or drop them altogether.
- displayName: Attributes
- path: limits.global.otlp.resourceAttributes.attributes
- - description: Action defines the indexing action for the selected resoure attributes.
- They can be either indexed as labels, added to structured metadata or drop
- altogether.
- displayName: Action
- path: limits.global.otlp.resourceAttributes.attributes[0].action
- - description: Attributes is the list of attributes to configure indexing or
- drop them altogether.
- displayName: Attribute Names
- path: limits.global.otlp.resourceAttributes.attributes[0].attributes
- - description: Regex allows choosing the attributes by matching a regular expression.
- displayName: Regular Expression
- path: limits.global.otlp.resourceAttributes.attributes[0].regex
- - description: "IgnoreDefaults controls whether to ignore the global configuration
- for resource attributes indexed as labels. \n If IgnoreDefaults is true,
- then this spec needs to contain at least one mapping to a index label."
- displayName: Ignore Global Defaults
- path: limits.global.otlp.resourceAttributes.ignoreDefaults
- x-descriptors:
- - urn:alm:descriptor:com.tectonic.ui:booleanSwitch
- - description: ScopeAttributes contains the configuration for scope attributes
- to store them as structured metadata or drop them altogether.
+ path: limits.global.otlp.structuredMetadata.resourceAttributes
+ - description: Name contains either a verbatim name of an attribute or a regular
+ expression matching many attributes.
+ displayName: Name
+ path: limits.global.otlp.structuredMetadata.resourceAttributes[0].name
+ - description: If Regex is true, then Name is treated as a regular expression
+ instead of as a verbatim attribute name.
+ displayName: Treat name as regular expression
+ path: limits.global.otlp.structuredMetadata.resourceAttributes[0].regex
+ - description: ScopeAttributes lists the names of scope attributes that should
+ be included in structured metadata.
displayName: Scope Attributes
- path: limits.global.otlp.scopeAttributes
- - description: Action defines the indexing action for the selected attributes.
- They can be either added to structured metadata or drop altogether.
- displayName: Action
- path: limits.global.otlp.scopeAttributes[0].action
- - description: Attributes allows choosing the attributes by listing their names.
- displayName: Attribute Names
- path: limits.global.otlp.scopeAttributes[0].attributes
- - description: Regex allows choosing the attributes by matching a regular expression.
- displayName: Regular Expression
- path: limits.global.otlp.scopeAttributes[0].regex
+ path: limits.global.otlp.structuredMetadata.scopeAttributes
+ - description: Name contains either a verbatim name of an attribute or a regular
+ expression matching many attributes.
+ displayName: Name
+ path: limits.global.otlp.structuredMetadata.scopeAttributes[0].name
+ - description: If Regex is true, then Name is treated as a regular expression
+ instead of as a verbatim attribute name.
+ displayName: Treat name as regular expression
+ path: limits.global.otlp.structuredMetadata.scopeAttributes[0].regex
- description: CardinalityLimit defines the cardinality limit for index queries.
displayName: Cardinality Limit
path: limits.global.queries.cardinalityLimit
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxChunksPerQuery defines the maximum number of chunks that can
- be fetched by a single query.
+ - description: |-
+ MaxChunksPerQuery defines the maximum number of chunks
+ that can be fetched by a single query.
displayName: Max Chunk per Query
path: limits.global.queries.maxChunksPerQuery
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxEntriesLimitsPerQuery defines the maximum number of log entries
+ - description: |-
+ MaxEntriesLimitsPerQuery defines the maximum number of log entries
that will be returned for a query.
displayName: Max Entries Limit per Query
path: limits.global.queries.maxEntriesLimitPerQuery
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxQuerySeries defines the maximum of unique series that is returned
- by a metric query.
+ - description: |-
+ MaxQuerySeries defines the maximum of unique series
+ that is returned by a metric query.
displayName: Max Query Series
path: limits.global.queries.maxQuerySeries
x-descriptors:
@@ -381,9 +395,10 @@ spec:
- description: Tenants defines the limits applied per tenant.
displayName: Limits per Tenant
path: limits.tenants
- - description: IngestionBurstSize defines the local rate-limited sample size
- per distributor replica. It should be set to the set at least to the maximum
- logs size expected in a single push request.
+ - description: |-
+ IngestionBurstSize defines the local rate-limited sample size per
+ distributor replica. It should be set to the set at least to the
+ maximum logs size expected in a single push request.
displayName: Ingestion Burst Size (in MB)
path: limits.tenants.ingestion.ingestionBurstSize
x-descriptors:
@@ -393,26 +408,30 @@ spec:
path: limits.tenants.ingestion.ingestionRate
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxGlobalStreamsPerTenant defines the maximum number of active
- streams per tenant, across the cluster.
+ - description: |-
+ MaxGlobalStreamsPerTenant defines the maximum number of active streams
+ per tenant, across the cluster.
displayName: Max Global Streams per Tenant
path: limits.tenants.ingestion.maxGlobalStreamsPerTenant
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxLabelNameLength defines the maximum number of characters allowed
+ - description: |-
+ MaxLabelNameLength defines the maximum number of characters allowed
for label keys in log streams.
displayName: Max Label Name Length
path: limits.tenants.ingestion.maxLabelNameLength
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxLabelNamesPerSeries defines the maximum number of label names
- per series in each log stream.
+ - description: |-
+ MaxLabelNamesPerSeries defines the maximum number of label names per series
+ in each log stream.
displayName: Max Labels Names per Series
path: limits.tenants.ingestion.maxLabelNamesPerSeries
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxLabelValueLength defines the maximum number of characters
- allowed for label values in log streams.
+ - description: |-
+ MaxLabelValueLength defines the maximum number of characters allowed
+ for label values in log streams.
displayName: Max Label Value Length
path: limits.tenants.ingestion.maxLabelValueLength
x-descriptors:
@@ -423,8 +442,9 @@ spec:
path: limits.tenants.ingestion.maxLineSize
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: PerStreamDesiredRate defines the desired ingestion rate per second
- that LokiStack should target applying automatic stream sharding. Units MB.
+ - description: |-
+ PerStreamDesiredRate defines the desired ingestion rate per second that LokiStack should
+ target applying automatic stream sharding. Units MB.
displayName: Per Stream Desired Rate (in MB)
path: limits.tenants.ingestion.perStreamDesiredRate
x-descriptors:
@@ -441,61 +461,62 @@ spec:
path: limits.tenants.ingestion.perStreamRateLimitBurst
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: LogAttributes contains the configuration for log attributes to
- store them as structured metadata or drop them altogether.
+ - description: StreamLabels configures which resource attributes are converted
+ to Loki stream labels.
+ displayName: Stream Labels
+ path: limits.tenants.otlp.streamLabels
+ - description: ResourceAttributes lists the names of the resource attributes
+ that should be converted into Loki stream labels.
+ displayName: Resource Attributes
+ path: limits.tenants.otlp.streamLabels.resourceAttributes
+ - description: Name contains either a verbatim name of an attribute or a regular
+ expression matching many attributes.
+ displayName: Name
+ path: limits.tenants.otlp.streamLabels.resourceAttributes[0].name
+ - description: If Regex is true, then Name is treated as a regular expression
+ instead of as a verbatim attribute name.
+ displayName: Treat name as regular expression
+ path: limits.tenants.otlp.streamLabels.resourceAttributes[0].regex
+ - description: StructuredMetadata configures which attributes are saved in structured
+ metadata.
+ displayName: Structured Metadata
+ path: limits.tenants.otlp.structuredMetadata
+ - description: LogAttributes lists the names of log attributes that should be
+ included in structured metadata.
displayName: Log Attributes
- path: limits.tenants.otlp.logAttributes
- - description: Action defines the indexing action for the selected attributes.
- They can be either added to structured metadata or drop altogether.
- displayName: Action
- path: limits.tenants.otlp.logAttributes[0].action
- - description: Attributes allows choosing the attributes by listing their names.
- displayName: Attribute Names
- path: limits.tenants.otlp.logAttributes[0].attributes
- - description: Regex allows choosing the attributes by matching a regular expression.
- displayName: Regular Expression
- path: limits.tenants.otlp.logAttributes[0].regex
- - description: ResourceAttributes contains the configuration for resource attributes
- to store them as index labels or structured metadata or drop them altogether.
+ path: limits.tenants.otlp.structuredMetadata.logAttributes
+ - description: Name contains either a verbatim name of an attribute or a regular
+ expression matching many attributes.
+ displayName: Name
+ path: limits.tenants.otlp.structuredMetadata.logAttributes[0].name
+ - description: If Regex is true, then Name is treated as a regular expression
+ instead of as a verbatim attribute name.
+ displayName: Treat name as regular expression
+ path: limits.tenants.otlp.structuredMetadata.logAttributes[0].regex
+ - description: ResourceAttributes lists the names of resource attributes that
+ should be included in structured metadata.
displayName: Resource Attributes
- path: limits.tenants.otlp.resourceAttributes
- - description: Attributes contains the configuration for resource attributes
- to store them as index labels or structured metadata or drop them altogether.
- displayName: Attributes
- path: limits.tenants.otlp.resourceAttributes.attributes
- - description: Action defines the indexing action for the selected resoure attributes.
- They can be either indexed as labels, added to structured metadata or drop
- altogether.
- displayName: Action
- path: limits.tenants.otlp.resourceAttributes.attributes[0].action
- - description: Attributes is the list of attributes to configure indexing or
- drop them altogether.
- displayName: Attribute Names
- path: limits.tenants.otlp.resourceAttributes.attributes[0].attributes
- - description: Regex allows choosing the attributes by matching a regular expression.
- displayName: Regular Expression
- path: limits.tenants.otlp.resourceAttributes.attributes[0].regex
- - description: "IgnoreDefaults controls whether to ignore the global configuration
- for resource attributes indexed as labels. \n If IgnoreDefaults is true,
- then this spec needs to contain at least one mapping to a index label."
- displayName: Ignore Global Defaults
- path: limits.tenants.otlp.resourceAttributes.ignoreDefaults
- x-descriptors:
- - urn:alm:descriptor:com.tectonic.ui:booleanSwitch
- - description: ScopeAttributes contains the configuration for scope attributes
- to store them as structured metadata or drop them altogether.
+ path: limits.tenants.otlp.structuredMetadata.resourceAttributes
+ - description: Name contains either a verbatim name of an attribute or a regular
+ expression matching many attributes.
+ displayName: Name
+ path: limits.tenants.otlp.structuredMetadata.resourceAttributes[0].name
+ - description: If Regex is true, then Name is treated as a regular expression
+ instead of as a verbatim attribute name.
+ displayName: Treat name as regular expression
+ path: limits.tenants.otlp.structuredMetadata.resourceAttributes[0].regex
+ - description: ScopeAttributes lists the names of scope attributes that should
+ be included in structured metadata.
displayName: Scope Attributes
- path: limits.tenants.otlp.scopeAttributes
- - description: Action defines the indexing action for the selected attributes.
- They can be either added to structured metadata or drop altogether.
- displayName: Action
- path: limits.tenants.otlp.scopeAttributes[0].action
- - description: Attributes allows choosing the attributes by listing their names.
- displayName: Attribute Names
- path: limits.tenants.otlp.scopeAttributes[0].attributes
- - description: Regex allows choosing the attributes by matching a regular expression.
- displayName: Regular Expression
- path: limits.tenants.otlp.scopeAttributes[0].regex
+ path: limits.tenants.otlp.structuredMetadata.scopeAttributes
+ - description: Name contains either a verbatim name of an attribute or a regular
+ expression matching many attributes.
+ displayName: Name
+ path: limits.tenants.otlp.structuredMetadata.scopeAttributes[0].name
+ - description: If Regex is true, then Name is treated as a regular expression
+ instead of as a verbatim attribute name.
+ displayName: Treat name as regular expression
+ path: limits.tenants.otlp.structuredMetadata.scopeAttributes[0].regex
- description: Blocked defines the list of rules to block matching queries.
displayName: Blocked
path: limits.tenants.queries.blocked
@@ -522,20 +543,23 @@ spec:
path: limits.tenants.queries.cardinalityLimit
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxChunksPerQuery defines the maximum number of chunks that can
- be fetched by a single query.
+ - description: |-
+ MaxChunksPerQuery defines the maximum number of chunks
+ that can be fetched by a single query.
displayName: Max Chunk per Query
path: limits.tenants.queries.maxChunksPerQuery
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxEntriesLimitsPerQuery defines the maximum number of log entries
+ - description: |-
+ MaxEntriesLimitsPerQuery defines the maximum number of log entries
that will be returned for a query.
displayName: Max Entries Limit per Query
path: limits.tenants.queries.maxEntriesLimitPerQuery
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxQuerySeries defines the maximum of unique series that is returned
- by a metric query.
+ - description: |-
+ MaxQuerySeries defines the maximum of unique series
+ that is returned by a metric query.
displayName: Max Query Series
path: limits.tenants.queries.maxQuerySeries
x-descriptors:
@@ -550,8 +574,9 @@ spec:
of a query request.
displayName: Query Timeout
path: limits.tenants.queries.queryTimeout
- - description: ManagementState defines if the CR should be managed by the operator
- or not. Default is managed.
+ - description: |-
+ ManagementState defines if the CR should be managed by the operator or not.
+ Default is managed.
displayName: Management State
path: managementState
x-descriptors:
@@ -578,9 +603,9 @@ spec:
path: replication.factor
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: 'Zones defines an array of ZoneSpec that the scheduler will try
- to satisfy. IMPORTANT: Make sure that the replication factor defined is
- less than or equal to the number of available zones.'
+ - description: |-
+ Zones defines an array of ZoneSpec that the scheduler will try to satisfy.
+ IMPORTANT: Make sure that the replication factor defined is less than or equal to the number of available zones.
displayName: Zones Spec
path: replication.zones
- description: MaxSkew describes the maximum degree to which Pods can be unevenly
@@ -593,9 +618,9 @@ spec:
labels.
displayName: Topology Key
path: replication.zones[0].topologyKey
- - description: 'Deprecated: Please use replication.factor instead. This field
- will be removed in future versions of this CRD. ReplicationFactor defines
- the policy for log stream replication.'
+ - description: |-
+ Deprecated: Please use replication.factor instead. This field will be removed in future versions of this CRD.
+ ReplicationFactor defines the policy for log stream replication.
displayName: Replication Factor
path: replicationFactor
x-descriptors:
@@ -610,11 +635,13 @@ spec:
path: rules.enabled
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:booleanSwitch
- - description: Namespaces to be selected for PrometheusRules discovery. If unspecified,
- only the same namespace as the LokiStack object is in is used.
+ - description: |-
+ Namespaces to be selected for PrometheusRules discovery. If unspecified, only
+ the same namespace as the LokiStack object is in is used.
displayName: Namespace Selector
path: rules.namespaceSelector
- - description: A selector to select which LokiRules to mount for loading alerting/recording
+ - description: |-
+ A selector to select which LokiRules to mount for loading alerting/recording
rules from.
displayName: Selector
path: rules.selector
@@ -654,13 +681,15 @@ spec:
- description: TLS configuration for reaching the object storage endpoint.
displayName: TLS Config
path: storage.tls
- - description: Key is the data key of a ConfigMap containing a CA certificate.
- It needs to be in the same namespace as the LokiStack custom resource. If
- empty, it defaults to "service-ca.crt".
+ - description: |-
+ Key is the data key of a ConfigMap containing a CA certificate.
+ It needs to be in the same namespace as the LokiStack custom resource.
+ If empty, it defaults to "service-ca.crt".
displayName: CA ConfigMap Key
path: storage.tls.caKey
- - description: CA is the name of a ConfigMap containing a CA certificate. It
- needs to be in the same namespace as the LokiStack custom resource.
+ - description: |-
+ CA is the name of a ConfigMap containing a CA certificate.
+ It needs to be in the same namespace as the LokiStack custom resource.
displayName: CA ConfigMap Name
path: storage.tls.caName
x-descriptors:
@@ -680,8 +709,9 @@ spec:
- description: Compactor defines the compaction component spec.
displayName: Compactor pods
path: template.compactor
- - description: PodAntiAffinity defines the pod anti affinity scheduling rules
- to schedule pods of a component.
+ - description: |-
+ PodAntiAffinity defines the pod anti affinity scheduling rules to schedule pods
+ of a component.
displayName: PodAntiAffinity
path: template.compactor.podAntiAffinity
x-descriptors:
@@ -694,8 +724,9 @@ spec:
- description: Distributor defines the distributor component spec.
displayName: Distributor pods
path: template.distributor
- - description: PodAntiAffinity defines the pod anti affinity scheduling rules
- to schedule pods of a component.
+ - description: |-
+ PodAntiAffinity defines the pod anti affinity scheduling rules to schedule pods
+ of a component.
displayName: PodAntiAffinity
path: template.distributor.podAntiAffinity
x-descriptors:
@@ -708,8 +739,9 @@ spec:
- description: Gateway defines the lokistack gateway component spec.
displayName: Gateway pods
path: template.gateway
- - description: PodAntiAffinity defines the pod anti affinity scheduling rules
- to schedule pods of a component.
+ - description: |-
+ PodAntiAffinity defines the pod anti affinity scheduling rules to schedule pods
+ of a component.
displayName: PodAntiAffinity
path: template.gateway.podAntiAffinity
x-descriptors:
@@ -722,8 +754,9 @@ spec:
- description: IndexGateway defines the index gateway component spec.
displayName: Index Gateway pods
path: template.indexGateway
- - description: PodAntiAffinity defines the pod anti affinity scheduling rules
- to schedule pods of a component.
+ - description: |-
+ PodAntiAffinity defines the pod anti affinity scheduling rules to schedule pods
+ of a component.
displayName: PodAntiAffinity
path: template.indexGateway.podAntiAffinity
x-descriptors:
@@ -736,8 +769,9 @@ spec:
- description: Ingester defines the ingester component spec.
displayName: Ingester pods
path: template.ingester
- - description: PodAntiAffinity defines the pod anti affinity scheduling rules
- to schedule pods of a component.
+ - description: |-
+ PodAntiAffinity defines the pod anti affinity scheduling rules to schedule pods
+ of a component.
displayName: PodAntiAffinity
path: template.ingester.podAntiAffinity
x-descriptors:
@@ -750,8 +784,9 @@ spec:
- description: Querier defines the querier component spec.
displayName: Querier pods
path: template.querier
- - description: PodAntiAffinity defines the pod anti affinity scheduling rules
- to schedule pods of a component.
+ - description: |-
+ PodAntiAffinity defines the pod anti affinity scheduling rules to schedule pods
+ of a component.
displayName: PodAntiAffinity
path: template.querier.podAntiAffinity
x-descriptors:
@@ -764,8 +799,9 @@ spec:
- description: QueryFrontend defines the query frontend component spec.
displayName: Query Frontend pods
path: template.queryFrontend
- - description: PodAntiAffinity defines the pod anti affinity scheduling rules
- to schedule pods of a component.
+ - description: |-
+ PodAntiAffinity defines the pod anti affinity scheduling rules to schedule pods
+ of a component.
displayName: PodAntiAffinity
path: template.queryFrontend.podAntiAffinity
x-descriptors:
@@ -778,8 +814,9 @@ spec:
- description: Ruler defines the ruler component spec.
displayName: Ruler pods
path: template.ruler
- - description: PodAntiAffinity defines the pod anti affinity scheduling rules
- to schedule pods of a component.
+ - description: |-
+ PodAntiAffinity defines the pod anti affinity scheduling rules to schedule pods
+ of a component.
displayName: PodAntiAffinity
path: template.ruler.podAntiAffinity
x-descriptors:
@@ -803,13 +840,15 @@ spec:
- description: CA defines the spec for the custom CA for tenant's authentication.
displayName: CA ConfigMap
path: tenants.authentication[0].mTLS.ca
- - description: Key is the data key of a ConfigMap containing a CA certificate.
- It needs to be in the same namespace as the LokiStack custom resource. If
- empty, it defaults to "service-ca.crt".
+ - description: |-
+ Key is the data key of a ConfigMap containing a CA certificate.
+ It needs to be in the same namespace as the LokiStack custom resource.
+ If empty, it defaults to "service-ca.crt".
displayName: CA ConfigMap Key
path: tenants.authentication[0].mTLS.ca.caKey
- - description: CA is the name of a ConfigMap containing a CA certificate. It
- needs to be in the same namespace as the LokiStack custom resource.
+ - description: |-
+ CA is the name of a ConfigMap containing a CA certificate.
+ It needs to be in the same namespace as the LokiStack custom resource.
displayName: CA ConfigMap Name
path: tenants.authentication[0].mTLS.ca.caName
x-descriptors:
@@ -820,13 +859,15 @@ spec:
- description: IssuerCA defines the spec for the issuer CA for tenant's authentication.
displayName: IssuerCA ConfigMap
path: tenants.authentication[0].oidc.issuerCA
- - description: Key is the data key of a ConfigMap containing a CA certificate.
- It needs to be in the same namespace as the LokiStack custom resource. If
- empty, it defaults to "service-ca.crt".
+ - description: |-
+ Key is the data key of a ConfigMap containing a CA certificate.
+ It needs to be in the same namespace as the LokiStack custom resource.
+ If empty, it defaults to "service-ca.crt".
displayName: CA ConfigMap Key
path: tenants.authentication[0].oidc.issuerCA.caKey
- - description: CA is the name of a ConfigMap containing a CA certificate. It
- needs to be in the same namespace as the LokiStack custom resource.
+ - description: |-
+ CA is the name of a ConfigMap containing a CA certificate.
+ It needs to be in the same namespace as the LokiStack custom resource.
displayName: CA ConfigMap Name
path: tenants.authentication[0].oidc.issuerCA.caName
x-descriptors:
@@ -882,12 +923,39 @@ spec:
- description: Openshift defines the configuration specific to Openshift modes.
displayName: Openshift
path: tenants.openshift
- - description: "AdminGroups defines a list of groups, whose members are considered
- to have admin-privileges by the Loki Operator. Setting this to an empty
- array disables admin groups. \n By default the following groups are considered
- admin-groups: - system:cluster-admins - cluster-admin - dedicated-admin"
+ - description: |-
+ AdminGroups defines a list of groups, whose members are considered to have admin-privileges by the Loki Operator.
+ Setting this to an empty array disables admin groups.
+
+
+ By default the following groups are considered admin-groups:
+ - system:cluster-admins
+ - cluster-admin
+ - dedicated-admin
displayName: Admin Groups
path: tenants.openshift.adminGroups
+ - description: OTLP contains settings for ingesting data using OTLP in the OpenShift
+ tenancy mode.
+ displayName: OpenTelemetry Protocol
+ path: tenants.openshift.otlp
+ - description: |-
+ DisableRecommendedAttributes can be used to reduce the number of attributes used for stream labels and structured
+ metadata.
+
+
+ Enabling this setting removes the "recommended attributes" from the generated Loki configuration. This will cause
+ meta information to not be available as stream labels or structured metadata, potentially making queries more
+ expensive and less performant.
+
+
+ Note that there is a set of "required attributes", needed for OpenShift Logging to work properly. Those will be
+ added to the configuration, even if this field is set to true.
+
+
+ This option is supposed to be combined with a custom label configuration customizing the labels for the specific
+ usecase.
+ displayName: Disable recommended OTLP attributes
+ path: tenants.openshift.otlp.disableRecommendedAttributes
statusDescriptors:
- description: Distributor is a map to the per pod status of the distributor
deployment
@@ -981,9 +1049,10 @@ spec:
- description: Global defines the limits applied globally across the cluster.
displayName: Global Limits
path: limits.global
- - description: IngestionBurstSize defines the local rate-limited sample size
- per distributor replica. It should be set to the set at least to the maximum
- logs size expected in a single push request.
+ - description: |-
+ IngestionBurstSize defines the local rate-limited sample size per
+ distributor replica. It should be set to the set at least to the
+ maximum logs size expected in a single push request.
displayName: Ingestion Burst Size (in MB)
path: limits.global.ingestion.ingestionBurstSize
x-descriptors:
@@ -993,26 +1062,30 @@ spec:
path: limits.global.ingestion.ingestionRate
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxGlobalStreamsPerTenant defines the maximum number of active
- streams per tenant, across the cluster.
+ - description: |-
+ MaxGlobalStreamsPerTenant defines the maximum number of active streams
+ per tenant, across the cluster.
displayName: Max Global Streams per Tenant
path: limits.global.ingestion.maxGlobalStreamsPerTenant
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxLabelNameLength defines the maximum number of characters allowed
+ - description: |-
+ MaxLabelNameLength defines the maximum number of characters allowed
for label keys in log streams.
displayName: Max Label Name Length
path: limits.global.ingestion.maxLabelNameLength
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxLabelNamesPerSeries defines the maximum number of label names
- per series in each log stream.
+ - description: |-
+ MaxLabelNamesPerSeries defines the maximum number of label names per series
+ in each log stream.
displayName: Max Labels Names per Series
path: limits.global.ingestion.maxLabelNamesPerSeries
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxLabelValueLength defines the maximum number of characters
- allowed for label values in log streams.
+ - description: |-
+ MaxLabelValueLength defines the maximum number of characters allowed
+ for label values in log streams.
displayName: Max Label Value Length
path: limits.global.ingestion.maxLabelValueLength
x-descriptors:
@@ -1023,20 +1096,23 @@ spec:
path: limits.global.ingestion.maxLineSize
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxChunksPerQuery defines the maximum number of chunks that can
- be fetched by a single query.
+ - description: |-
+ MaxChunksPerQuery defines the maximum number of chunks
+ that can be fetched by a single query.
displayName: Max Chunk per Query
path: limits.global.queries.maxChunksPerQuery
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxEntriesLimitsPerQuery defines the maximum number of log entries
+ - description: |-
+ MaxEntriesLimitsPerQuery defines the maximum number of log entries
that will be returned for a query.
displayName: Max Entries Limit per Query
path: limits.global.queries.maxEntriesLimitPerQuery
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxQuerySeries defines the maximum of unique series that is returned
- by a metric query.
+ - description: |-
+ MaxQuerySeries defines the maximum of unique series
+ that is returned by a metric query.
displayName: Max Query Series
path: limits.global.queries.maxQuerySeries
x-descriptors:
@@ -1044,9 +1120,10 @@ spec:
- description: Tenants defines the limits and overrides applied per tenant.
displayName: Limits per Tenant
path: limits.tenants
- - description: IngestionBurstSize defines the local rate-limited sample size
- per distributor replica. It should be set to the set at least to the maximum
- logs size expected in a single push request.
+ - description: |-
+ IngestionBurstSize defines the local rate-limited sample size per
+ distributor replica. It should be set to the set at least to the
+ maximum logs size expected in a single push request.
displayName: Ingestion Burst Size (in MB)
path: limits.tenants.ingestion.ingestionBurstSize
x-descriptors:
@@ -1056,26 +1133,30 @@ spec:
path: limits.tenants.ingestion.ingestionRate
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxGlobalStreamsPerTenant defines the maximum number of active
- streams per tenant, across the cluster.
+ - description: |-
+ MaxGlobalStreamsPerTenant defines the maximum number of active streams
+ per tenant, across the cluster.
displayName: Max Global Streams per Tenant
path: limits.tenants.ingestion.maxGlobalStreamsPerTenant
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxLabelNameLength defines the maximum number of characters allowed
+ - description: |-
+ MaxLabelNameLength defines the maximum number of characters allowed
for label keys in log streams.
displayName: Max Label Name Length
path: limits.tenants.ingestion.maxLabelNameLength
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxLabelNamesPerSeries defines the maximum number of label names
- per series in each log stream.
+ - description: |-
+ MaxLabelNamesPerSeries defines the maximum number of label names per series
+ in each log stream.
displayName: Max Labels Names per Series
path: limits.tenants.ingestion.maxLabelNamesPerSeries
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxLabelValueLength defines the maximum number of characters
- allowed for label values in log streams.
+ - description: |-
+ MaxLabelValueLength defines the maximum number of characters allowed
+ for label values in log streams.
displayName: Max Label Value Length
path: limits.tenants.ingestion.maxLabelValueLength
x-descriptors:
@@ -1086,26 +1167,30 @@ spec:
path: limits.tenants.ingestion.maxLineSize
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxChunksPerQuery defines the maximum number of chunks that can
- be fetched by a single query.
+ - description: |-
+ MaxChunksPerQuery defines the maximum number of chunks
+ that can be fetched by a single query.
displayName: Max Chunk per Query
path: limits.tenants.queries.maxChunksPerQuery
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxEntriesLimitsPerQuery defines the maximum number of log entries
+ - description: |-
+ MaxEntriesLimitsPerQuery defines the maximum number of log entries
that will be returned for a query.
displayName: Max Entries Limit per Query
path: limits.tenants.queries.maxEntriesLimitPerQuery
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: MaxQuerySeries defines the maximum of unique series that is returned
- by a metric query.
+ - description: |-
+ MaxQuerySeries defines the maximum of unique series
+ that is returned by a metric query.
displayName: Max Query Series
path: limits.tenants.queries.maxQuerySeries
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: ManagementState defines if the CR should be managed by the operator
- or not. Default is managed.
+ - description: |-
+ ManagementState defines if the CR should be managed by the operator or not.
+ Default is managed.
displayName: Management State
path: managementState
x-descriptors:
@@ -1126,11 +1211,13 @@ spec:
path: rules.enabled
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:booleanSwitch
- - description: Namespaces to be selected for PrometheusRules discovery. If unspecified,
- only the same namespace as the LokiStack object is in is used.
+ - description: |-
+ Namespaces to be selected for PrometheusRules discovery. If unspecified, only
+ the same namespace as the LokiStack object is in is used.
displayName: Namespace Selector
path: rules.namespaceSelector
- - description: A selector to select which LokiRules to mount for loading alerting/recording
+ - description: |-
+ A selector to select which LokiRules to mount for loading alerting/recording
rules from.
displayName: Selector
path: rules.selector
@@ -1168,8 +1255,9 @@ spec:
- description: TLS configuration for reaching the object storage endpoint.
displayName: TLS Config
path: storage.tls
- - description: CA is the name of a ConfigMap containing a CA certificate. It
- needs to be in the same namespace as the LokiStack custom resource.
+ - description: |-
+ CA is the name of a ConfigMap containing a CA certificate.
+ It needs to be in the same namespace as the LokiStack custom resource.
displayName: CA ConfigMap Name
path: storage.tls.caName
x-descriptors:
@@ -1371,8 +1459,9 @@ spec:
- description: List of groups for recording rules.
displayName: Groups
path: groups
- - description: Interval defines the time interval between evaluation of the
- given recoding rule.
+ - description: |-
+ Interval defines the time interval between evaluation of the given
+ recoding rule.
displayName: Evaluation Interval
path: groups[0].interval
- description: Limit defines the number of series a recording rule can produce.
@@ -1388,9 +1477,10 @@ spec:
- description: Rules defines a list of recording rules
displayName: Rules
path: groups[0].rules
- - description: The LogQL expression to evaluate. Every evaluation cycle this
- is evaluated at the current time, and all resultant time series become pending/firing
- alerts.
+ - description: |-
+ The LogQL expression to evaluate. Every evaluation cycle this is
+ evaluated at the current time, and all resultant time series become
+ pending/firing alerts.
displayName: LogQL Expression
path: groups[0].rules[0].expr
- description: Labels to add to each recording rule.
@@ -1422,8 +1512,9 @@ spec:
- description: List of groups for recording rules.
displayName: Groups
path: groups
- - description: Interval defines the time interval between evaluation of the
- given recoding rule.
+ - description: |-
+ Interval defines the time interval between evaluation of the given
+ recoding rule.
displayName: Evaluation Interval
path: groups[0].interval
- description: Limit defines the number of series a recording rule can produce.
@@ -1439,9 +1530,10 @@ spec:
- description: Rules defines a list of recording rules
displayName: Rules
path: groups[0].rules
- - description: The LogQL expression to evaluate. Every evaluation cycle this
- is evaluated at the current time, and all resultant time series become pending/firing
- alerts.
+ - description: |-
+ The LogQL expression to evaluate. Every evaluation cycle this is
+ evaluated at the current time, and all resultant time series become
+ pending/firing alerts.
displayName: LogQL Expression
path: groups[0].rules[0].expr
- description: The name of the time series to output to. Must be a valid metric
@@ -1541,9 +1633,10 @@ spec:
path: alertmanager.enableV2
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:booleanSwitch
- - description: List of AlertManager URLs to send notifications to. Each Alertmanager
- URL is treated as a separate group in the configuration. Multiple Alertmanagers
- in HA per group can be supported by using DNS resolution (See EnableDNSDiscovery).
+ - description: |-
+ List of AlertManager URLs to send notifications to. Each Alertmanager URL is treated as
+ a separate group in the configuration. Multiple Alertmanagers in HA per group can be
+ supported by using DNS resolution (See EnableDNSDiscovery).
displayName: AlertManager Endpoints
path: alertmanager.endpoints
- description: Additional labels to add to all alerts.
@@ -1563,9 +1656,9 @@ spec:
path: alertmanager.notificationQueue.capacity
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: Minimum duration between alert and restored "for" state. This
- is maintained only for alerts with configured "for" time greater than the
- grace period.
+ - description: |-
+ Minimum duration between alert and restored "for" state. This is maintained
+ only for alerts with configured "for" time greater than the grace period.
displayName: Firing Grace Period
path: alertmanager.notificationQueue.forGracePeriod
- description: Max time to tolerate outage for restoring "for" state of alert.
@@ -1598,21 +1691,23 @@ spec:
Default is '(.*)'
displayName: Regex
path: alertmanager.relabelConfigs[0].regex
- - description: Replacement value against which a regex replace is performed
- if the regular expression matches. Regex capture groups are available. Default
- is '$1'
+ - description: |-
+ Replacement value against which a regex replace is performed if the
+ regular expression matches. Regex capture groups are available. Default is '$1'
displayName: Replacement
path: alertmanager.relabelConfigs[0].replacement
- description: Separator placed between concatenated source label values. default
is ';'.
displayName: Separator
path: alertmanager.relabelConfigs[0].separator
- - description: The source labels select values from existing labels. Their content
- is concatenated using the configured separator and matched against the configured
- regular expression for the replace, keep, and drop actions.
+ - description: |-
+ The source labels select values from existing labels. Their content is concatenated
+ using the configured separator and matched against the configured regular expression
+ for the replace, keep, and drop actions.
displayName: Source Labels
path: alertmanager.relabelConfigs[0].sourceLabels
- - description: Label to which the resulting value is written in a replace action.
+ - description: |-
+ Label to which the resulting value is written in a replace action.
It is mandatory for replace actions. Regex capture groups are available.
displayName: Target Label
path: alertmanager.relabelConfigs[0].targetLabel
@@ -1693,9 +1788,10 @@ spec:
path: overrides.alertmanager.enableV2
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:booleanSwitch
- - description: List of AlertManager URLs to send notifications to. Each Alertmanager
- URL is treated as a separate group in the configuration. Multiple Alertmanagers
- in HA per group can be supported by using DNS resolution (See EnableDNSDiscovery).
+ - description: |-
+ List of AlertManager URLs to send notifications to. Each Alertmanager URL is treated as
+ a separate group in the configuration. Multiple Alertmanagers in HA per group can be
+ supported by using DNS resolution (See EnableDNSDiscovery).
displayName: AlertManager Endpoints
path: overrides.alertmanager.endpoints
- description: Additional labels to add to all alerts.
@@ -1715,9 +1811,9 @@ spec:
path: overrides.alertmanager.notificationQueue.capacity
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: Minimum duration between alert and restored "for" state. This
- is maintained only for alerts with configured "for" time greater than the
- grace period.
+ - description: |-
+ Minimum duration between alert and restored "for" state. This is maintained
+ only for alerts with configured "for" time greater than the grace period.
displayName: Firing Grace Period
path: overrides.alertmanager.notificationQueue.forGracePeriod
- description: Max time to tolerate outage for restoring "for" state of alert.
@@ -1750,21 +1846,23 @@ spec:
Default is '(.*)'
displayName: Regex
path: overrides.alertmanager.relabelConfigs[0].regex
- - description: Replacement value against which a regex replace is performed
- if the regular expression matches. Regex capture groups are available. Default
- is '$1'
+ - description: |-
+ Replacement value against which a regex replace is performed if the
+ regular expression matches. Regex capture groups are available. Default is '$1'
displayName: Replacement
path: overrides.alertmanager.relabelConfigs[0].replacement
- description: Separator placed between concatenated source label values. default
is ';'.
displayName: Separator
path: overrides.alertmanager.relabelConfigs[0].separator
- - description: The source labels select values from existing labels. Their content
- is concatenated using the configured separator and matched against the configured
- regular expression for the replace, keep, and drop actions.
+ - description: |-
+ The source labels select values from existing labels. Their content is concatenated
+ using the configured separator and matched against the configured regular expression
+ for the replace, keep, and drop actions.
displayName: Source Labels
path: overrides.alertmanager.relabelConfigs[0].sourceLabels
- - description: Label to which the resulting value is written in a replace action.
+ - description: |-
+ Label to which the resulting value is written in a replace action.
It is mandatory for replace actions. Regex capture groups are available.
displayName: Target Label
path: overrides.alertmanager.relabelConfigs[0].targetLabel
@@ -1826,21 +1924,23 @@ spec:
Default is '(.*)'
displayName: Regex
path: remoteWrite.client.relabelConfigs[0].regex
- - description: Replacement value against which a regex replace is performed
- if the regular expression matches. Regex capture groups are available. Default
- is '$1'
+ - description: |-
+ Replacement value against which a regex replace is performed if the
+ regular expression matches. Regex capture groups are available. Default is '$1'
displayName: Replacement
path: remoteWrite.client.relabelConfigs[0].replacement
- description: Separator placed between concatenated source label values. default
is ';'.
displayName: Separator
path: remoteWrite.client.relabelConfigs[0].separator
- - description: The source labels select values from existing labels. Their content
- is concatenated using the configured separator and matched against the configured
- regular expression for the replace, keep, and drop actions.
+ - description: |-
+ The source labels select values from existing labels. Their content is concatenated
+ using the configured separator and matched against the configured regular expression
+ for the replace, keep, and drop actions.
displayName: Source Labels
path: remoteWrite.client.relabelConfigs[0].sourceLabels
- - description: Label to which the resulting value is written in a replace action.
+ - description: |-
+ Label to which the resulting value is written in a replace action.
It is mandatory for replace actions. Regex capture groups are available.
displayName: Target Label
path: remoteWrite.client.relabelConfigs[0].targetLabel
@@ -1980,9 +2080,10 @@ spec:
path: alertmanager.enableV2
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:booleanSwitch
- - description: List of AlertManager URLs to send notifications to. Each Alertmanager
- URL is treated as a separate group in the configuration. Multiple Alertmanagers
- in HA per group can be supported by using DNS resolution (See EnableDNSDiscovery).
+ - description: |-
+ List of AlertManager URLs to send notifications to. Each Alertmanager URL is treated as
+ a separate group in the configuration. Multiple Alertmanagers in HA per group can be
+ supported by using DNS resolution (See EnableDNSDiscovery).
displayName: AlertManager Endpoints
path: alertmanager.endpoints
- description: Additional labels to add to all alerts.
@@ -2002,9 +2103,9 @@ spec:
path: alertmanager.notificationQueue.capacity
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: Minimum duration between alert and restored "for" state. This
- is maintained only for alerts with configured "for" time greater than the
- grace period.
+ - description: |-
+ Minimum duration between alert and restored "for" state. This is maintained
+ only for alerts with configured "for" time greater than the grace period.
displayName: Firing Grace Period
path: alertmanager.notificationQueue.forGracePeriod
- description: Max time to tolerate outage for restoring "for" state of alert.
@@ -2037,21 +2138,23 @@ spec:
Default is '(.*)'
displayName: Regex
path: alertmanager.relabelConfigs[0].regex
- - description: Replacement value against which a regex replace is performed
- if the regular expression matches. Regex capture groups are available. Default
- is '$1'
+ - description: |-
+ Replacement value against which a regex replace is performed if the
+ regular expression matches. Regex capture groups are available. Default is '$1'
displayName: Replacement
path: alertmanager.relabelConfigs[0].replacement
- description: Separator placed between concatenated source label values. default
is ';'.
displayName: Separator
path: alertmanager.relabelConfigs[0].separator
- - description: The source labels select values from existing labels. Their content
- is concatenated using the configured separator and matched against the configured
- regular expression for the replace, keep, and drop actions.
+ - description: |-
+ The source labels select values from existing labels. Their content is concatenated
+ using the configured separator and matched against the configured regular expression
+ for the replace, keep, and drop actions.
displayName: Source Labels
path: alertmanager.relabelConfigs[0].sourceLabels
- - description: Label to which the resulting value is written in a replace action.
+ - description: |-
+ Label to which the resulting value is written in a replace action.
It is mandatory for replace actions. Regex capture groups are available.
displayName: Target Label
path: alertmanager.relabelConfigs[0].targetLabel
@@ -2129,9 +2232,10 @@ spec:
path: overrides.alertmanager.enableV2
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:booleanSwitch
- - description: List of AlertManager URLs to send notifications to. Each Alertmanager
- URL is treated as a separate group in the configuration. Multiple Alertmanagers
- in HA per group can be supported by using DNS resolution (See EnableDNSDiscovery).
+ - description: |-
+ List of AlertManager URLs to send notifications to. Each Alertmanager URL is treated as
+ a separate group in the configuration. Multiple Alertmanagers in HA per group can be
+ supported by using DNS resolution (See EnableDNSDiscovery).
displayName: AlertManager Endpoints
path: overrides.alertmanager.endpoints
- description: Additional labels to add to all alerts.
@@ -2151,9 +2255,9 @@ spec:
path: overrides.alertmanager.notificationQueue.capacity
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
- - description: Minimum duration between alert and restored "for" state. This
- is maintained only for alerts with configured "for" time greater than the
- grace period.
+ - description: |-
+ Minimum duration between alert and restored "for" state. This is maintained
+ only for alerts with configured "for" time greater than the grace period.
displayName: Firing Grace Period
path: overrides.alertmanager.notificationQueue.forGracePeriod
- description: Max time to tolerate outage for restoring "for" state of alert.
@@ -2186,21 +2290,23 @@ spec:
Default is '(.*)'
displayName: Regex
path: overrides.alertmanager.relabelConfigs[0].regex
- - description: Replacement value against which a regex replace is performed
- if the regular expression matches. Regex capture groups are available. Default
- is '$1'
+ - description: |-
+ Replacement value against which a regex replace is performed if the
+ regular expression matches. Regex capture groups are available. Default is '$1'
displayName: Replacement
path: overrides.alertmanager.relabelConfigs[0].replacement
- description: Separator placed between concatenated source label values. default
is ';'.
displayName: Separator
path: overrides.alertmanager.relabelConfigs[0].separator
- - description: The source labels select values from existing labels. Their content
- is concatenated using the configured separator and matched against the configured
- regular expression for the replace, keep, and drop actions.
+ - description: |-
+ The source labels select values from existing labels. Their content is concatenated
+ using the configured separator and matched against the configured regular expression
+ for the replace, keep, and drop actions.
displayName: Source Labels
path: overrides.alertmanager.relabelConfigs[0].sourceLabels
- - description: Label to which the resulting value is written in a replace action.
+ - description: |-
+ Label to which the resulting value is written in a replace action.
It is mandatory for replace actions. Regex capture groups are available.
displayName: Target Label
path: overrides.alertmanager.relabelConfigs[0].targetLabel
@@ -2262,21 +2368,23 @@ spec:
Default is '(.*)'
displayName: Regex
path: remoteWrite.client.relabelConfigs[0].regex
- - description: Replacement value against which a regex replace is performed
- if the regular expression matches. Regex capture groups are available. Default
- is '$1'
+ - description: |-
+ Replacement value against which a regex replace is performed if the
+ regular expression matches. Regex capture groups are available. Default is '$1'
displayName: Replacement
path: remoteWrite.client.relabelConfigs[0].replacement
- description: Separator placed between concatenated source label values. default
is ';'.
displayName: Separator
path: remoteWrite.client.relabelConfigs[0].separator
- - description: The source labels select values from existing labels. Their content
- is concatenated using the configured separator and matched against the configured
- regular expression for the replace, keep, and drop actions.
+ - description: |-
+ The source labels select values from existing labels. Their content is concatenated
+ using the configured separator and matched against the configured regular expression
+ for the replace, keep, and drop actions.
displayName: Source Labels
path: remoteWrite.client.relabelConfigs[0].sourceLabels
- - description: Label to which the resulting value is written in a replace action.
+ - description: |-
+ Label to which the resulting value is written in a replace action.
It is mandatory for replace actions. Regex capture groups are available.
displayName: Target Label
path: remoteWrite.client.relabelConfigs[0].targetLabel
diff --git a/operator/config/overlays/community-openshift/kustomization.yaml b/operator/config/overlays/community-openshift/kustomization.yaml
index af5e40aac80b8..d5740c4a5d2b1 100644
--- a/operator/config/overlays/community-openshift/kustomization.yaml
+++ b/operator/config/overlays/community-openshift/kustomization.yaml
@@ -1,3 +1,6 @@
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
resources:
- ./../openshift
@@ -5,26 +8,27 @@ resources:
namespace: kubernetes-operators
labels:
-- pairs:
+- includeSelectors: true
+ pairs:
+ app.kubernetes.io/managed-by: operator-lifecycle-manager
app.kubernetes.io/name: loki-operator
app.kubernetes.io/part-of: loki-operator
- app.kubernetes.io/managed-by: operator-lifecycle-manager
- includeSelectors: true
- pairs:
app.kubernetes.io/instance: loki-operator-v0.6.2
- app.kubernetes.io/version: "0.6.2"
+ app.kubernetes.io/version: 0.6.2
configMapGenerator:
-- files:
+- behavior: replace
+ files:
- controller_manager_config.yaml
name: manager-config
- behavior: replace
-patchesStrategicMerge:
-- manager_related_image_patch.yaml
-- prometheus_service_monitor_patch.yaml
images:
- name: controller
newName: docker.io/grafana/loki-operator
newTag: 0.6.2
+
+patches:
+- path: manager_related_image_patch.yaml
+- path: prometheus_service_monitor_patch.yaml
diff --git a/operator/config/overlays/community-openshift/manager_related_image_patch.yaml b/operator/config/overlays/community-openshift/manager_related_image_patch.yaml
index 538e6658383b3..126a4f29c9de7 100644
--- a/operator/config/overlays/community-openshift/manager_related_image_patch.yaml
+++ b/operator/config/overlays/community-openshift/manager_related_image_patch.yaml
@@ -9,7 +9,7 @@ spec:
- name: manager
env:
- name: RELATED_IMAGE_LOKI
- value: docker.io/grafana/loki:3.2.0
+ value: docker.io/grafana/loki:3.2.1
- name: RELATED_IMAGE_GATEWAY
value: quay.io/observatorium/api:latest
- name: RELATED_IMAGE_OPA
diff --git a/operator/config/overlays/community/kustomization.yaml b/operator/config/overlays/community/kustomization.yaml
index ed910555da2c5..8deef07f26d50 100644
--- a/operator/config/overlays/community/kustomization.yaml
+++ b/operator/config/overlays/community/kustomization.yaml
@@ -1,3 +1,6 @@
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
resources:
- ../../crd
- ../../rbac
@@ -16,14 +19,14 @@ namespace: loki-operator
namePrefix: loki-operator-
labels:
-- pairs:
+- includeSelectors: true
+ pairs:
+ app.kubernetes.io/managed-by: operator-lifecycle-manager
app.kubernetes.io/name: loki-operator
app.kubernetes.io/part-of: loki-operator
- app.kubernetes.io/managed-by: operator-lifecycle-manager
- includeSelectors: true
- pairs:
app.kubernetes.io/instance: loki-operator-v0.6.2
- app.kubernetes.io/version: "0.6.2"
+ app.kubernetes.io/version: 0.6.2
generatorOptions:
disableNameSuffixHash: true
@@ -33,12 +36,6 @@ configMapGenerator:
- controller_manager_config.yaml
name: manager-config
-patchesStrategicMerge:
-- manager_auth_proxy_patch.yaml
-- manager_related_image_patch.yaml
-- manager_run_flags_patch.yaml
-- manager_webhook_patch.yaml
-- webhookcainjection_patch.yaml
images:
- name: controller
@@ -46,31 +43,109 @@ images:
newTag: 0.6.2
# the following config is for teaching kustomize how to do var substitution
-vars:
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix.
-- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR
- objref:
- kind: Certificate
- group: cert-manager.io
- version: v1
- name: serving-cert # this name should match the one in certificate.yaml
- fieldref:
- fieldpath: metadata.namespace
-- name: CERTIFICATE_NAME
- objref:
- kind: Certificate
- group: cert-manager.io
- version: v1
- name: serving-cert # this name should match the one in certificate.yaml
-- name: SERVICE_NAMESPACE # namespace of the service
- objref:
- kind: Service
- version: v1
- name: webhook-service
- fieldref:
- fieldpath: metadata.namespace
-- name: SERVICE_NAME
- objref:
- kind: Service
- version: v1
- name: webhook-service
+patches:
+- path: manager_auth_proxy_patch.yaml
+- path: manager_related_image_patch.yaml
+- path: manager_run_flags_patch.yaml
+- path: manager_webhook_patch.yaml
+- path: webhookcainjection_patch.yaml
+# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix.
+# Uncomment the following replacements to add the cert-manager CA injection annotations
+replacements:
+ - source: # Add cert-manager annotation to ValidatingWebhookConfiguration, MutatingWebhookConfiguration and CRDs
+ kind: Certificate
+ group: cert-manager.io
+ version: v1
+ name: serving-cert # this name should match the one in certificate.yaml
+ fieldPath: .metadata.namespace # namespace of the certificate CR
+ targets:
+ - select:
+ kind: ValidatingWebhookConfiguration
+ fieldPaths:
+ - .metadata.annotations.[cert-manager.io/inject-ca-from]
+ options:
+ delimiter: '/'
+ index: 0
+ create: true
+ - select:
+ kind: MutatingWebhookConfiguration
+ fieldPaths:
+ - .metadata.annotations.[cert-manager.io/inject-ca-from]
+ options:
+ delimiter: '/'
+ index: 0
+ create: true
+ - select:
+ kind: CustomResourceDefinition
+ fieldPaths:
+ - .metadata.annotations.[cert-manager.io/inject-ca-from]
+ options:
+ delimiter: '/'
+ index: 0
+ create: true
+ - source:
+ kind: Certificate
+ group: cert-manager.io
+ version: v1
+ name: serving-cert # this name should match the one in certificate.yaml
+ fieldPath: .metadata.name
+ targets:
+ - select:
+ kind: ValidatingWebhookConfiguration
+ fieldPaths:
+ - .metadata.annotations.[cert-manager.io/inject-ca-from]
+ options:
+ delimiter: '/'
+ index: 1
+ create: true
+ - select:
+ kind: MutatingWebhookConfiguration
+ fieldPaths:
+ - .metadata.annotations.[cert-manager.io/inject-ca-from]
+ options:
+ delimiter: '/'
+ index: 1
+ create: true
+ - select:
+ kind: CustomResourceDefinition
+ fieldPaths:
+ - .metadata.annotations.[cert-manager.io/inject-ca-from]
+ options:
+ delimiter: '/'
+ index: 1
+ create: true
+ - source: # Add cert-manager annotation to the webhook Service
+ kind: Service
+ version: v1
+ name: webhook-service
+ fieldPath: .metadata.name # namespace of the service
+ targets:
+ - select:
+ kind: Certificate
+ group: cert-manager.io
+ version: v1
+ fieldPaths:
+ - .spec.dnsNames.0
+ - .spec.dnsNames.1
+ options:
+ delimiter: '.'
+ index: 0
+ create: true
+ - source:
+ kind: Service
+ version: v1
+ name: webhook-service
+ fieldPath: .metadata.namespace # namespace of the service
+ targets:
+ - select:
+ kind: Certificate
+ group: cert-manager.io
+ version: v1
+ fieldPaths:
+ - .spec.dnsNames.0
+ - .spec.dnsNames.1
+ options:
+ delimiter: '.'
+ index: 1
+ create: true
diff --git a/operator/config/overlays/community/manager_related_image_patch.yaml b/operator/config/overlays/community/manager_related_image_patch.yaml
index 538e6658383b3..126a4f29c9de7 100644
--- a/operator/config/overlays/community/manager_related_image_patch.yaml
+++ b/operator/config/overlays/community/manager_related_image_patch.yaml
@@ -9,7 +9,7 @@ spec:
- name: manager
env:
- name: RELATED_IMAGE_LOKI
- value: docker.io/grafana/loki:3.2.0
+ value: docker.io/grafana/loki:3.2.1
- name: RELATED_IMAGE_GATEWAY
value: quay.io/observatorium/api:latest
- name: RELATED_IMAGE_OPA
diff --git a/operator/config/overlays/community/webhookcainjection_patch.yaml b/operator/config/overlays/community/webhookcainjection_patch.yaml
index cbcbf762a647b..40c40fe3ced15 100644
--- a/operator/config/overlays/community/webhookcainjection_patch.yaml
+++ b/operator/config/overlays/community/webhookcainjection_patch.yaml
@@ -1,5 +1,5 @@
# This patch add annotation to admission webhook config and
-# the variables $(CERTIFICATE_NAMESPACE) and $(CERTIFICATE_NAME) will be substituted by kustomize.
+# the variables CERTIFICATE_NAMESPACE_PLACEHOLDER and CERTIFICATE_NAME_PLACEHOLDER will be substituted by kustomize.
#
# [WEBHOOK] To enable mutating webhook hook, uncomment the following section
#
@@ -8,11 +8,11 @@
# metadata:
# name: mutating-webhook-configuration
# annotations:
-# cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
+# cert-manager.io/inject-ca-from: CERTIFICATE_NAMESPACE_PLACEHOLDER/CERTIFICATE_NAME_PLACEHOLDER
---
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
name: validating-webhook-configuration
annotations:
- cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
+ cert-manager.io/inject-ca-from: CERTIFICATE_NAMESPACE_PLACEHOLDER/CERTIFICATE_NAME_PLACEHOLDER
diff --git a/operator/config/overlays/development/manager_related_image_patch.yaml b/operator/config/overlays/development/manager_related_image_patch.yaml
index e4af25f50e449..91f6016599415 100644
--- a/operator/config/overlays/development/manager_related_image_patch.yaml
+++ b/operator/config/overlays/development/manager_related_image_patch.yaml
@@ -9,6 +9,6 @@ spec:
- name: manager
env:
- name: RELATED_IMAGE_LOKI
- value: docker.io/grafana/loki:3.2.0
+ value: docker.io/grafana/loki:3.2.1
- name: RELATED_IMAGE_GATEWAY
value: quay.io/observatorium/api:latest
diff --git a/operator/config/overlays/openshift/auth_proxy_service_annotations_patch.yaml b/operator/config/overlays/openshift/auth_proxy_service_annotations_patch.yaml
index a24f8b83bc8ca..a2b8d2e84c4bb 100644
--- a/operator/config/overlays/openshift/auth_proxy_service_annotations_patch.yaml
+++ b/operator/config/overlays/openshift/auth_proxy_service_annotations_patch.yaml
@@ -3,5 +3,4 @@ kind: Service
metadata:
annotations:
service.beta.openshift.io/serving-cert-secret-name: loki-operator-metrics
- labels:
name: controller-manager-metrics-service
diff --git a/operator/config/overlays/openshift/kustomization.yaml b/operator/config/overlays/openshift/kustomization.yaml
index cdd65f1cbeafc..29c420ed2f43a 100644
--- a/operator/config/overlays/openshift/kustomization.yaml
+++ b/operator/config/overlays/openshift/kustomization.yaml
@@ -1,3 +1,6 @@
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
resources:
- ../../crd
- ../../rbac
@@ -17,14 +20,14 @@ namespace: openshift-operators-redhat
namePrefix: loki-operator-
labels:
-- pairs:
+- includeSelectors: true
+ pairs:
+ app.kubernetes.io/managed-by: operator-lifecycle-manager
app.kubernetes.io/name: loki-operator
app.kubernetes.io/part-of: cluster-logging
- app.kubernetes.io/managed-by: operator-lifecycle-manager
- includeSelectors: true
- pairs:
app.kubernetes.io/instance: loki-operator-0.1.0
- app.kubernetes.io/version: "0.1.0"
+ app.kubernetes.io/version: 0.1.0
generatorOptions:
disableNameSuffixHash: true
@@ -34,16 +37,17 @@ configMapGenerator:
- controller_manager_config.yaml
name: manager-config
-patchesStrategicMerge:
-- auth_proxy_service_annotations_patch.yaml
-- manager_auth_proxy_patch.yaml
-- manager_related_image_patch.yaml
-- manager_run_flags_patch.yaml
-- manager_security_context_patch.yaml
-- manager_webhook_patch.yaml
-- prometheus_service_monitor_patch.yaml
images:
- name: controller
newName: quay.io/openshift-logging/loki-operator
newTag: 0.1.0
+
+patches:
+- path: auth_proxy_service_annotations_patch.yaml
+- path: manager_auth_proxy_patch.yaml
+- path: manager_related_image_patch.yaml
+- path: manager_run_flags_patch.yaml
+- path: manager_security_context_patch.yaml
+- path: manager_webhook_patch.yaml
+- path: prometheus_service_monitor_patch.yaml
diff --git a/operator/config/overlays/openshift/manager_related_image_patch.yaml b/operator/config/overlays/openshift/manager_related_image_patch.yaml
index f7ae10c2aaf3c..1c11739871c61 100644
--- a/operator/config/overlays/openshift/manager_related_image_patch.yaml
+++ b/operator/config/overlays/openshift/manager_related_image_patch.yaml
@@ -9,7 +9,7 @@ spec:
- name: manager
env:
- name: RELATED_IMAGE_LOKI
- value: quay.io/openshift-logging/loki:v3.2.0
+ value: quay.io/openshift-logging/loki:v3.2.1
- name: RELATED_IMAGE_GATEWAY
value: quay.io/observatorium/api:latest
- name: RELATED_IMAGE_OPA
diff --git a/operator/config/scorecard/kustomization.yaml b/operator/config/scorecard/kustomization.yaml
index d73509ee73fa9..814b71282d560 100644
--- a/operator/config/scorecard/kustomization.yaml
+++ b/operator/config/scorecard/kustomization.yaml
@@ -1,16 +1,19 @@
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
resources:
- bases/config.yaml
-patchesJson6902:
+
+patches:
- path: patches/basic.config.yaml
target:
group: scorecard.operatorframework.io
- version: v1alpha3
kind: Configuration
name: config
+ version: v1alpha3
- path: patches/olm.config.yaml
target:
group: scorecard.operatorframework.io
- version: v1alpha3
kind: Configuration
name: config
-# +kubebuilder:scaffold:patchesJson6902
+ version: v1alpha3
diff --git a/operator/config/webhook/kustomizeconfig.yaml b/operator/config/webhook/kustomizeconfig.yaml
index 25e21e3c963f0..30293f0f5d90f 100644
--- a/operator/config/webhook/kustomizeconfig.yaml
+++ b/operator/config/webhook/kustomizeconfig.yaml
@@ -20,6 +20,3 @@ namespace:
group: admissionregistration.k8s.io
path: webhooks/clientConfig/service/namespace
create: true
-
-varReference:
-- path: metadata/annotations
diff --git a/operator/docs/operator/api.md b/operator/docs/operator/api.md
index 54c105bf143db..7816be60e804c 100644
--- a/operator/docs/operator/api.md
+++ b/operator/docs/operator/api.md
@@ -1007,7 +1007,7 @@ BlockedQueryTypes
## BlockedQueryTypes { #loki-grafana-com-v1-BlockedQueryTypes }
-([]github.com/grafana/loki/operator/apis/loki/v1.BlockedQueryType
alias)
+([]github.com/grafana/loki/operator/api/loki/v1.BlockedQueryType
alias)
(Appears on: BlockedQuerySpec )
@@ -1146,51 +1146,6 @@ a secret. This mode is only supported for certain object storage types in certai
-## GlobalOTLPSpec { #loki-grafana-com-v1-GlobalOTLPSpec }
-
-(Appears on: LimitsTemplateSpec )
-
-
-
GlobalOTLPSpec defines which resource, scope and log attributes to
-be stored as index or structured metadata or drop altogether for all
-tenants.
-
-
-
-
-Field
-Description
-
-
-
-
-
-indexedResourceAttributes
-
-[]string
-
-
-
-(Optional)
-IndexedResourceAttributes contains the global configuration for resource attributes
-to store them as index labels.
-
-
-
-
-OTLPSpec
-
-
-OTLPSpec
-
-
-
-
-
-
-
-
-
## HashRingSpec { #loki-grafana-com-v1-HashRingSpec }
(Appears on: LokiStackSpec )
@@ -1461,7 +1416,7 @@ LimitsTemplateSpec
tenants
-map[string]github.com/grafana/loki/operator/apis/loki/v1.PerTenantLimitsTemplateSpec
+map[string]github.com/grafana/loki/operator/api/loki/v1.PerTenantLimitsTemplateSpec
@@ -1520,16 +1475,16 @@ QueryLimitSpec
otlp
-
-GlobalOTLPSpec
+
+OTLPSpec
(Optional)
-OTLP to configure which resource, scope and log attributes
-to store as labels or structured metadata or drop them altogether
-for all tenants.
+OTLP to configure which resource, scope and log attributes are stored as stream labels or structured metadata.
+Tenancy modes can provide a default OTLP configuration, when no custom OTLP configuration is set or even
+enforce the use of some required attributes.
@@ -1961,9 +1916,9 @@ DO NOT USE THIS IN PRODUCTION!
"1x.extra-small"
SizeOneXExtraSmall defines the size of a single Loki deployment
-with extra small resources/limits requirements and without HA support.
-This size is ultimately dedicated for development and demo purposes.
-DO NOT USE THIS IN PRODUCTION!
+with extra small resources/limits requirements and HA support for all
+Loki components. This size is dedicated for setup without the
+requirement for single replication factor and auto-compaction.
FIXME: Add clear description of ingestion/query performance expectations.
"1x.medium"
@@ -1973,6 +1928,13 @@ Loki components. This size is dedicated for setup with the
requirement for single replication factor and auto-compaction.
FIXME: Add clear description of ingestion/query performance expectations.
+"1x.pico"
+SizeOneXPico defines the size of a single Loki deployment
+with extra small resources/limits requirements and HA support for all
+Loki components. This size is dedicated for setup without the
+requirement for single replication factor and auto-compaction.
+FIXME: Add clear description of ingestion/query performance expectations.
+
"1x.small"
SizeOneXSmall defines the size of a single Loki deployment
with small resources/limits requirements and HA support for all
@@ -2663,42 +2625,11 @@ string
-## OTLPAttributeAction { #loki-grafana-com-v1-OTLPAttributeAction }
-(string
alias)
-
-(Appears on: OTLPAttributesSpec , OTLPResourceAttributesConfigSpec )
-
-
-
OTLPAttributeAction defines the action to executed when indexing
-OTLP resource attributes. Resource attributes can be either added
-to the index, the chunk structured metadata or entirely dropped.
-
-
-
-
-Value
-Description
-
-
-"drop"
-OTLPAttributeActionDrop removes the matching attributes from the log entry.
-
-"indexLabel"
-OTLPAttributeActionIndexLabel stores a resource attribute as a label, which is part of the index identifying streams.
-
-"structuredMetadata"
-OTLPAttributeActionStructuredMetadata stores an attribute as structured metadata with each log entry.
-
-
-
-
-## OTLPAttributesSpec { #loki-grafana-com-v1-OTLPAttributesSpec }
+## OTLPAttributeReference { #loki-grafana-com-v1-OTLPAttributeReference }
-(Appears on: OTLPSpec )
+(Appears on: OTLPMetadataSpec , OTLPStreamLabelSpec )
-
OTLPAttributesSpec contains the configuration for a set of attributes
-to store them as index labels or structured metadata or drop them altogether.
@@ -2710,52 +2641,35 @@ to store them as index labels or structured metadata or drop them altogether.
-action
-
-
-OTLPAttributeAction
-
-
-
-
-Action defines the indexing action for the selected attributes. They
-can be either added to structured metadata or drop altogether.
-
-
-
-
-attributes
+name
-[]string
+string
-(Optional)
-Attributes allows choosing the attributes by listing their names.
+Name contains either a verbatim name of an attribute or a regular expression matching many attributes.
regex
-string
+bool
(Optional)
-Regex allows choosing the attributes by matching a regular expression.
+If Regex is true, then Name is treated as a regular expression instead of as a verbatim attribute name.
-## OTLPResourceAttributesConfigSpec { #loki-grafana-com-v1-OTLPResourceAttributesConfigSpec }
+## OTLPMetadataSpec { #loki-grafana-com-v1-OTLPMetadataSpec }
-(Appears on: OTLPResourceAttributesSpec )
+(Appears on: OTLPSpec )
-
OTLPResourceAttributesConfigSpec contains the configuration for a set of resource attributes
-to store them as index labels or structured metadata or drop them altogether.
@@ -2767,53 +2681,56 @@ to store them as index labels or structured metadata or drop them altogether.
-action
+resourceAttributes
-
-OTLPAttributeAction
+
+[]OTLPAttributeReference
-Action defines the indexing action for the selected resoure attributes. They
-can be either indexed as labels, added to structured metadata or drop altogether.
+(Optional)
+ResourceAttributes lists the names of resource attributes that should be included in structured metadata.
-attributes
+scopeAttributes
-[]string
+
+[]OTLPAttributeReference
+
(Optional)
-Attributes is the list of attributes to configure indexing or drop them
-altogether.
+ScopeAttributes lists the names of scope attributes that should be included in structured metadata.
-regex
+logAttributes
-string
+
+[]OTLPAttributeReference
+
(Optional)
-Regex allows choosing the attributes by matching a regular expression.
+LogAttributes lists the names of log attributes that should be included in structured metadata.
-## OTLPResourceAttributesSpec { #loki-grafana-com-v1-OTLPResourceAttributesSpec }
+## OTLPSpec { #loki-grafana-com-v1-OTLPSpec }
-(Appears on: OTLPSpec )
+(Appears on: LimitsTemplateSpec , PerTenantLimitsTemplateSpec )
-
OTLPResourceAttributesSpec contains the configuration for resource attributes
-to store them as index labels or structured metadata or drop them altogether.
+
OTLPSpec defines which resource, scope and log attributes should be used as stream labels or
+stored as structured metadata.
@@ -2825,43 +2742,40 @@ to store them as index labels or structured metadata or drop them altogether.
-ignoreDefaults
+streamLabels
-bool
+
+OTLPStreamLabelSpec
+
(Optional)
-IgnoreDefaults controls whether to ignore the global configuration for resource attributes
-indexed as labels.
-If IgnoreDefaults is true, then this spec needs to contain at least one mapping to a index label.
+StreamLabels configures which resource attributes are converted to Loki stream labels.
-attributes
+structuredMetadata
-
-[]OTLPResourceAttributesConfigSpec
+
+OTLPMetadataSpec
(Optional)
-Attributes contains the configuration for resource attributes
-to store them as index labels or structured metadata or drop them altogether.
+StructuredMetadata configures which attributes are saved in structured metadata.
-## OTLPSpec { #loki-grafana-com-v1-OTLPSpec }
+## OTLPStreamLabelSpec { #loki-grafana-com-v1-OTLPStreamLabelSpec }
-(Appears on: GlobalOTLPSpec , PerTenantLimitsTemplateSpec )
+(Appears on: OTLPSpec )
-
OTLPSpec defines which resource, scope and log attributes to
-be stored as index or structured metadata or drop altogether
@@ -2875,45 +2789,14 @@ be stored as index or structured metadata or drop altogether
resourceAttributes
-
-OTLPResourceAttributesSpec
+
+[]OTLPAttributeReference
(Optional)
-ResourceAttributes contains the configuration for resource attributes
-to store them as index labels or structured metadata or drop them altogether.
-
-
-
-
-scopeAttributes
-
-
-[]OTLPAttributesSpec
-
-
-
-
-(Optional)
-ScopeAttributes contains the configuration for scope attributes
-to store them as structured metadata or drop them altogether.
-
-
-
-
-logAttributes
-
-
-[]OTLPAttributesSpec
-
-
-
-
-(Optional)
-LogAttributes contains the configuration for log attributes
-to store them as structured metadata or drop them altogether.
+ResourceAttributes lists the names of the resource attributes that should be converted into Loki stream labels.
@@ -3191,6 +3074,44 @@ It needs to be in the same namespace as the LokiStack custom resource.
+## OpenshiftOTLPConfig { #loki-grafana-com-v1-OpenshiftOTLPConfig }
+
+(Appears on: OpenshiftTenantSpec )
+
+
+
OpenshiftOTLPConfig defines configuration specific to users using OTLP together with an OpenShift tenancy mode.
+
+
+
+
+Field
+Description
+
+
+
+
+
+disableRecommendedAttributes
+
+bool
+
+
+
+(Optional)
+DisableRecommendedAttributes can be used to reduce the number of attributes used for stream labels and structured
+metadata.
+Enabling this setting removes the “recommended attributes” from the generated Loki configuration. This will cause
+meta information to not be available as stream labels or structured metadata, potentially making queries more
+expensive and less performant.
+Note that there is a set of “required attributes”, needed for OpenShift Logging to work properly. Those will be
+added to the configuration, even if this field is set to true.
+This option is supposed to be combined with a custom label configuration customizing the labels for the specific
+usecase.
+
+
+
+
+
## OpenshiftTenantSpec { #loki-grafana-com-v1-OpenshiftTenantSpec }
(Appears on: TenantsSpec )
@@ -3223,6 +3144,20 @@ Setting this to an empty array disables admin groups.
- dedicated-admin
+
+
+otlp
+
+
+OpenshiftOTLPConfig
+
+
+
+
+(Optional)
+OTLP contains settings for ingesting data using OTLP in the OpenShift tenancy mode.
+
+
@@ -3231,7 +3166,7 @@ Setting this to an empty array disables admin groups.
(Appears on: LimitsSpec )
-
LimitsTemplateSpec defines the limits applied at ingestion or query path.
+
PerTenantLimitsTemplateSpec defines the limits applied at ingestion or query path.
@@ -3280,9 +3215,10 @@ OTLPSpec
(Optional)
-OTLP to configure which resource, scope and log attributes
-to store as labels or structured metadata or drop them altogether
-for a single tenants.
+OTLP to configure which resource, scope and log attributes are stored as stream labels or structured metadata.
+Tenancy modes can provide a default OTLP configuration, when no custom OTLP configuration is set or even
+enforce the use of some required attributes.
+The per-tenant configuration for OTLP attributes will be merged with the global configuration.
@@ -3405,7 +3341,7 @@ At least one container is still running or is in the process of being restarted.
## PodStatusMap { #loki-grafana-com-v1-PodStatusMap }
-(map[github.com/grafana/loki/operator/apis/loki/v1.PodStatus][]string
alias)
+(map[github.com/grafana/loki/operator/api/loki/v1.PodStatus][]string
alias)
(Appears on: LokiStackComponentStatus )
@@ -4594,7 +4530,7 @@ RemoteWriteSpec
overrides
-map[string]github.com/grafana/loki/operator/apis/loki/v1.RulerOverrides
+map[string]github.com/grafana/loki/operator/api/loki/v1.RulerOverrides
@@ -5967,7 +5903,7 @@ LimitsTemplateSpec
tenants
-map[string]github.com/grafana/loki/operator/apis/loki/v1beta1.LimitsTemplateSpec
+map[string]github.com/grafana/loki/operator/api/loki/v1beta1.LimitsTemplateSpec
@@ -8188,7 +8124,7 @@ RemoteWriteSpec
overrides
-map[string]github.com/grafana/loki/operator/apis/loki/v1beta1.RulerOverrides
+map[string]github.com/grafana/loki/operator/api/loki/v1beta1.RulerOverrides
diff --git a/operator/docs/operator/compatibility.md b/operator/docs/operator/compatibility.md
index 5f573dc89906e..04e26b50de670 100644
--- a/operator/docs/operator/compatibility.md
+++ b/operator/docs/operator/compatibility.md
@@ -30,3 +30,4 @@ The versions of Loki compatible to be run with the Loki Operator are:
* v3.1.0
* v3.1.1
* v3.2.0
+* v3.2.1
diff --git a/operator/docs/user-guides/open-telemetry.md b/operator/docs/user-guides/open-telemetry.md
new file mode 100644
index 0000000000000..fd09b77bab19e
--- /dev/null
+++ b/operator/docs/user-guides/open-telemetry.md
@@ -0,0 +1,175 @@
+---
+title: "OpenTelemetry / OTLP"
+description: ""
+lead: ""
+date: 2024-10-25T12:43:23+02:00
+lastmod: 2024-10-25T12:43:23+02:00
+draft: false
+images: []
+menu:
+ docs:
+ parent: "user-guides"
+weight: 100
+toc: true
+---
+
+## Introduction
+
+Loki 3.0 introduced an API endpoint using the OpenTelemetry Protocol (OTLP) as a new way of ingesting log entries into Loki. This endpoint is an addition to the standard Push API that was available in Loki from the start.
+
+Because OTLP is not specifically geared towards Loki but is a standard format, it needs additional configuration on Loki's side to map the OpenTelemetry data format to Loki's data model.
+
+Specifically, OTLP has no concept of "stream labels" or "structured metadata". Instead, OTLP provides metadata about a log entry in _attributes_ that are grouped into three buckets (resource, scope and log), which allows setting metadata for many log entries at once or just on a single entry depending on what's needed.
+
+## Prerequisites
+
+Log ingestion using OTLP depends on structured metadata being available in Loki. This capability was introduced with schema version 13, which is available in Loki Operator when using `v13` in a schema configuration entry.
+
+If you are creating a new `LokiStack`, make sure to set `version: v13` in the storage schema configuration.
+
+If there is an existing schema configuration, a new schema version entry needs to be added, so that it becomes active in the future (see [Upgrading Schemas](loki-upgrading-schemas) in the Loki documentation).
+
+```yaml
+# [...]
+spec:
+ storage:
+ schemas:
+ - version: v13
+ effectiveDate: 2024-10-25
+```
+
+Once the `effectiveDate` has passed your `LokiStack` will be using the new schema configuration and is ready to store structured metadata.
+
+## Attribute Mapping
+
+Loki splits the configuration for mapping OTLP attributes to stream labels and structured metadata into two places:
+
+- `default_resource_attributes_as_index_labels` in the [`distributor` configuration](loki-docs-distributor-config)
+- `otlp_config` in the `limits_config` (see [Loki documentation](loki-docs-limits-config))
+
+By default, `default_resource_attributes_as_index_labels` provides a set of resource-attributes that are mapped to stream-labels on the Loki side.
+
+As the field in the distributor configuration is limited to resource-level attributes and can only produce stream-labels as an output, the `otlp_config` needs to be used to map resource, scope or log level attributes to structured metadata.
+
+The Loki Operator does not use the same approach for configuring the attributes as Loki itself does. The most visible difference is that there is no distinction between the `distributor` and `limits` configuration in the Operator.
+
+Instead, the Loki Operator only uses the `limits` configuration for all its attributes. The structure of the `limits` configuration also differs from the structure in the Loki configuration file. See [Custom Attribute Mapping](#custom-attribute-mapping) below for an explanation of the configuration options available in the Operator.
+
+### Picking Stream Labels and Structured Metadata
+
+Whether you choose to map an attribute to a stream label or to structured metadata depends on what data is present in the attribute. Stream labels are used to identify a set of log entries "belonging together" in a stream of events. These labels are used for indexing and identifying the streams and so should not contain information that changes between different log entries of the same application. They should also not contain values that have a high number of different values ("cardinality").
+
+Structured metadata on the other hand is just saved together with the log entries and only read when querying for logs, so it's more suitable to store "any data". Both stream labels and structured metadata can be used to filter log entries during a query.
+
+**Note:** Attributes that are not mapped to either a stream label or structured metadata will not be stored into Loki.
+
+### Loki Operator Defaults
+
+When using the Loki Operator the default attribute mappings depend on the [tenancy mode]({{< ref "api.md#loki-grafana-com-v1-ModeType" >}}) used for the `LokiStack`:
+
+- `static` and `dynamic` use the Grafana defaults
+- `openshift-logging` uses OpenShift defaults
+
+### Custom Attribute Mapping
+
+All tenancy modes support customization of the attribute mapping configuration. This can be done globally (for all tenants) or on a per-tenant basis. When a custom attribute mapping configuration is defined, then the Grafana defaults are not used. If the default labels are desired as well, they need to be added to the custom configuration. See also the section about [Customizing OpenShift Defaults](#customizing-openshift-defaults) below.
+
+**Note:** A major difference between the Operator and Loki is how it handles inheritance. Loki by default only copies the attributes defined in the `default_resource_attributes_as_index_labels` setting to the tenants whereas the Operator will copy all global configuration into every tenant.
+
+The attribute mapping configuration in `LokiStack` is done through the limits configuration:
+
+```yaml
+# [...]
+spec:
+ limits:
+ global:
+ otlp: {} # Global OTLP Attribute Configuration
+ tenants:
+ example-tenant:
+ otlp: {} # OTLP Attribute Configuration for tenant "example-tenant"
+```
+
+Both global and per-tenant OTLP configurations can map attributes to stream-labels or structured-metadata. At least _one stream-label_ is needed for successfully saving a log entry to Loki storage, so the configuration should account for that.
+
+Stream labels can only be generated from resource-level attributes, which is mirrored in the data structure of the `LokiStack` resource:
+
+```yaml
+# [...]
+spec:
+ limits:
+ global:
+ otlp:
+ streamLabels:
+ resourceAttributes:
+ - name: "k8s.namespace.name"
+ - name: "k8s.pod.name"
+ - name: "k8s.container.name"
+```
+
+Structured metadata on the other hand can be generated from all types of attributes (resource, scope and log):
+
+```yaml
+# [...]
+spec:
+ limits:
+ global:
+ otlp:
+ streamLabels:
+ # [...]
+ structuredMetadata:
+ resourceAttributes:
+ - name: "process.command_line"
+ - name: "k8s\\.pod\\.labels\\..+"
+ regex: true
+ scopeAttributes:
+ - name: "service.name"
+ logAttributes:
+ - name: "http.route"
+```
+
+The previous example also shows that the attribute names can be expressed as _regular expressions_ by setting `regex: true`.
+
+Using a regular expression makes sense when there are many attributes with similar names that should be mapped into Loki. It is not recommended to be used for stream labels, as it can potentially create a lot of data.
+
+### Customizing OpenShift Defaults
+
+The `openshift-logging` tenancy mode contains its own set of default attributes. Some of these attributes (called "required attributes") can not be removed by applying a custom configuration, because they are needed for other OpenShift components to function properly. Other attributes (called "recommended attributes") are provided but can be disabled in case they influence performance negatively. The complete set of attributes is documented in the [data model](rhobs-data-model) repository.
+
+Because the OpenShift attribute configuration is applied based on the tenancy mode, the simplest configuration is to just set the tenancy mode and not apply any custom attributes. This will provide instant compatibility with the other OpenShift tools.
+
+In case additional attributes are needed, either as stream labels or structured metadata, the normal custom attribute configuration mentioned above can be used. Attributes defined in the custom configuration will be **merged** with the default configuration.
+
+#### Removing Recommended Attributes
+
+In case of issues with the default set of attributes, there is a way to slim down the default set of attributes applied to a LokiStack operating in `openshift-logging` tenancy mode:
+
+```yaml
+# [...]
+spec:
+ tenants:
+ mode: openshift-logging
+ openshift:
+ otlp:
+ disableRecommendedAttributes: true # Set this to remove recommended attributes
+```
+
+Setting `disableRecommendedAttributes: true` reduces the set of default attributes to only the "required attributes".
+
+This option is meant for situations when some of the default attributes cause performance issues during ingestion of logs or if the default set causes excessive use of storage.
+
+Because the set of required attributes only contains a subset of the default stream labels, only setting this option will negatively affect query performance. It needs to be combined with a custom attribute configuration that reintroduces attributes that are needed for queries so that the data contained in those attributes is available again.
+
+## References
+
+- [Loki Labels](loki-labels)
+- [Structured Metadata](loki-structured-metadata)
+- [OpenTelemetry Attribute](otel-attributes)
+- [OpenShift Default Attributes](rhobs-data-model)
+
+[loki-docs-distributor-config]: https://grafana.com/docs/loki/latest/configure/#distributor
+[loki-docs-limits-config]: https://grafana.com/docs/loki/latest/configure/#limits_config
+[loki-labels]: https://grafana.com/docs/loki/latest/get-started/labels/
+[loki-structured-metadata]: https://grafana.com/docs/loki/latest/get-started/labels/structured-metadata/
+[loki-upgrading-schemas]: https://grafana.com/docs/loki/latest/configure/storage/#upgrading-schemas
+[otel-attributes]: https://opentelemetry.io/docs/specs/otel/common/#attribute
+[rhobs-data-model]: https://github.com/rhobs/observability-data-model/blob/main/cluster-logging.md#attributes
diff --git a/operator/go.mod b/operator/go.mod
index 72c7c768e0ab5..5ab042bf45609 100644
--- a/operator/go.mod
+++ b/operator/go.mod
@@ -10,7 +10,7 @@ require (
github.com/google/go-cmp v0.6.0
github.com/google/uuid v1.6.0
github.com/grafana/loki v1.6.2-0.20230403212622-90888a0cc737
- github.com/grafana/loki/operator/apis/loki v0.0.0-00010101000000-000000000000
+ github.com/grafana/loki/operator/api/loki v0.0.0-00010101000000-000000000000
github.com/imdario/mergo v0.3.16
github.com/maxbrunsfeld/counterfeiter/v6 v6.9.0
github.com/openshift/api v0.0.0-20240912201240-0a8800162826 // release-4.17
@@ -162,7 +162,7 @@ require (
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
)
-replace github.com/grafana/loki/operator/apis/loki => ./apis/loki
+replace github.com/grafana/loki/operator/api/loki => ./api/loki
// Replace v2.4.0+incompatible indirect refs with v5.1.1 for compatibility with google.golang.org/grpc >=v1.56.3
replace github.com/sercand/kuberesolver => github.com/sercand/kuberesolver/v5 v5.1.1
diff --git a/operator/hack/addons_dev.yaml b/operator/hack/addons_dev.yaml
index 9c67cfc65b6f1..dda953958ae0a 100644
--- a/operator/hack/addons_dev.yaml
+++ b/operator/hack/addons_dev.yaml
@@ -29,7 +29,7 @@ spec:
spec:
containers:
- name: logcli
- image: docker.io/grafana/logcli:3.2.0-amd64
+ image: docker.io/grafana/logcli:3.2.1-amd64
imagePullPolicy: IfNotPresent
command:
- /bin/sh
@@ -73,7 +73,7 @@ spec:
spec:
containers:
- name: promtail
- image: docker.io/grafana/promtail:3.2.0
+ image: docker.io/grafana/promtail:3.2.1
args:
- -config.file=/etc/promtail/promtail.yaml
- -log.level=info
diff --git a/operator/hack/addons_ocp.yaml b/operator/hack/addons_ocp.yaml
index 0a00d83044624..435febc7d5c05 100644
--- a/operator/hack/addons_ocp.yaml
+++ b/operator/hack/addons_ocp.yaml
@@ -29,7 +29,7 @@ spec:
spec:
containers:
- name: logcli
- image: docker.io/grafana/logcli:3.2.0-amd64
+ image: docker.io/grafana/logcli:3.2.1-amd64
imagePullPolicy: IfNotPresent
command:
- /bin/sh
@@ -70,7 +70,7 @@ spec:
spec:
containers:
- name: promtail
- image: docker.io/grafana/promtail:3.2.0
+ image: docker.io/grafana/promtail:3.2.1
args:
- -config.file=/etc/promtail/promtail.yaml
- -log.level=info
diff --git a/operator/internal/certrotation/build.go b/operator/internal/certrotation/build.go
index 8def711c62dbc..b93c8db3e06bf 100644
--- a/operator/internal/certrotation/build.go
+++ b/operator/internal/certrotation/build.go
@@ -8,7 +8,7 @@ import (
"k8s.io/apiserver/pkg/authentication/user"
"sigs.k8s.io/controller-runtime/pkg/client"
- configv1 "github.com/grafana/loki/operator/apis/config/v1"
+ configv1 "github.com/grafana/loki/operator/api/config/v1"
)
var defaultUserInfo = &user.DefaultInfo{Name: "system:lokistacks", Groups: []string{"system:logging"}}
diff --git a/operator/internal/certrotation/build_test.go b/operator/internal/certrotation/build_test.go
index 47845765eb49c..0e02d83b5bb94 100644
--- a/operator/internal/certrotation/build_test.go
+++ b/operator/internal/certrotation/build_test.go
@@ -10,7 +10,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
- configv1 "github.com/grafana/loki/operator/apis/config/v1"
+ configv1 "github.com/grafana/loki/operator/api/config/v1"
)
func TestBuildAll(t *testing.T) {
diff --git a/operator/internal/certrotation/options.go b/operator/internal/certrotation/options.go
index 5954ec678517d..b6a878ff15b64 100644
--- a/operator/internal/certrotation/options.go
+++ b/operator/internal/certrotation/options.go
@@ -8,7 +8,7 @@ import (
"github.com/openshift/library-go/pkg/crypto"
corev1 "k8s.io/api/core/v1"
- configv1 "github.com/grafana/loki/operator/apis/config/v1"
+ configv1 "github.com/grafana/loki/operator/api/config/v1"
)
// ComponentCertificates is a map of lokistack component names to TLS certificates
diff --git a/operator/internal/certrotation/target_test.go b/operator/internal/certrotation/target_test.go
index 71efc5e70cfdf..dfefb329eea3d 100644
--- a/operator/internal/certrotation/target_test.go
+++ b/operator/internal/certrotation/target_test.go
@@ -10,7 +10,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/util/cert"
- configv1 "github.com/grafana/loki/operator/apis/config/v1"
+ configv1 "github.com/grafana/loki/operator/api/config/v1"
)
func TestCertificatesExpired(t *testing.T) {
diff --git a/operator/internal/config/loader.go b/operator/internal/config/loader.go
index b5af090ddb887..d3e7fc21dddd5 100644
--- a/operator/internal/config/loader.go
+++ b/operator/internal/config/loader.go
@@ -8,7 +8,7 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
- configv1 "github.com/grafana/loki/operator/apis/config/v1"
+ configv1 "github.com/grafana/loki/operator/api/config/v1"
)
var errConfigFileLoading = errors.New("could not read file at path")
diff --git a/operator/internal/config/options.go b/operator/internal/config/options.go
index 33ce3cfde91f2..8c2f1b42ae864 100644
--- a/operator/internal/config/options.go
+++ b/operator/internal/config/options.go
@@ -12,7 +12,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/webhook"
- configv1 "github.com/grafana/loki/operator/apis/config/v1"
+ configv1 "github.com/grafana/loki/operator/api/config/v1"
)
// LoadConfig initializes the controller configuration, optionally overriding the defaults
diff --git a/operator/controllers/loki/alertingrule_controller.go b/operator/internal/controller/loki/alertingrule_controller.go
similarity index 92%
rename from operator/controllers/loki/alertingrule_controller.go
rename to operator/internal/controller/loki/alertingrule_controller.go
index 8840141d63e5e..84890d553100c 100644
--- a/operator/controllers/loki/alertingrule_controller.go
+++ b/operator/internal/controller/loki/alertingrule_controller.go
@@ -1,4 +1,4 @@
-package controllers
+package loki
import (
"context"
@@ -11,8 +11,8 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/handler"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
- "github.com/grafana/loki/operator/controllers/loki/internal/lokistack"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
+ "github.com/grafana/loki/operator/internal/controller/loki/internal/lokistack"
)
// AlertingRuleReconciler reconciles a AlertingRule object
diff --git a/operator/controllers/loki/certrotation_controller.go b/operator/internal/controller/loki/certrotation_controller.go
similarity index 91%
rename from operator/controllers/loki/certrotation_controller.go
rename to operator/internal/controller/loki/certrotation_controller.go
index d682d489c6691..b677a32b5990e 100644
--- a/operator/controllers/loki/certrotation_controller.go
+++ b/operator/internal/controller/loki/certrotation_controller.go
@@ -1,4 +1,4 @@
-package controllers
+package loki
import (
"context"
@@ -11,11 +11,11 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
- configv1 "github.com/grafana/loki/operator/apis/config/v1"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
- "github.com/grafana/loki/operator/controllers/loki/internal/lokistack"
- "github.com/grafana/loki/operator/controllers/loki/internal/management/state"
+ configv1 "github.com/grafana/loki/operator/api/config/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/certrotation"
+ "github.com/grafana/loki/operator/internal/controller/loki/internal/lokistack"
+ "github.com/grafana/loki/operator/internal/controller/loki/internal/management/state"
"github.com/grafana/loki/operator/internal/external/k8s"
"github.com/grafana/loki/operator/internal/handlers"
)
diff --git a/operator/controllers/loki/certrotation_controller_test.go b/operator/internal/controller/loki/certrotation_controller_test.go
similarity index 95%
rename from operator/controllers/loki/certrotation_controller_test.go
rename to operator/internal/controller/loki/certrotation_controller_test.go
index a33b8226216e3..7e1936b924877 100644
--- a/operator/controllers/loki/certrotation_controller_test.go
+++ b/operator/internal/controller/loki/certrotation_controller_test.go
@@ -1,4 +1,4 @@
-package controllers
+package loki
import (
"testing"
@@ -7,7 +7,7 @@ import (
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
)
diff --git a/operator/controllers/loki/dashboards_controller.go b/operator/internal/controller/loki/dashboards_controller.go
similarity index 97%
rename from operator/controllers/loki/dashboards_controller.go
rename to operator/internal/controller/loki/dashboards_controller.go
index fde3f107bcd6a..87d2ee15c0dd3 100644
--- a/operator/controllers/loki/dashboards_controller.go
+++ b/operator/internal/controller/loki/dashboards_controller.go
@@ -1,4 +1,4 @@
-package controllers
+package loki
import (
"context"
@@ -14,7 +14,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/predicate"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/handlers"
)
diff --git a/operator/controllers/loki/internal/lokistack/certrotation_discovery.go b/operator/internal/controller/loki/internal/lokistack/certrotation_discovery.go
similarity index 95%
rename from operator/controllers/loki/internal/lokistack/certrotation_discovery.go
rename to operator/internal/controller/loki/internal/lokistack/certrotation_discovery.go
index 1b16b580e6059..377aadf6ee826 100644
--- a/operator/controllers/loki/internal/lokistack/certrotation_discovery.go
+++ b/operator/internal/controller/loki/internal/lokistack/certrotation_discovery.go
@@ -9,7 +9,7 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
"sigs.k8s.io/controller-runtime/pkg/client"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/external/k8s"
)
diff --git a/operator/controllers/loki/internal/lokistack/ruler_config_discovery.go b/operator/internal/controller/loki/internal/lokistack/ruler_config_discovery.go
similarity index 97%
rename from operator/controllers/loki/internal/lokistack/ruler_config_discovery.go
rename to operator/internal/controller/loki/internal/lokistack/ruler_config_discovery.go
index 6d1852ca03e54..06249caa03230 100644
--- a/operator/controllers/loki/internal/lokistack/ruler_config_discovery.go
+++ b/operator/internal/controller/loki/internal/lokistack/ruler_config_discovery.go
@@ -8,7 +8,7 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
"sigs.k8s.io/controller-runtime/pkg/client"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/external/k8s"
)
diff --git a/operator/controllers/loki/internal/lokistack/rules_discovery.go b/operator/internal/controller/loki/internal/lokistack/rules_discovery.go
similarity index 95%
rename from operator/controllers/loki/internal/lokistack/rules_discovery.go
rename to operator/internal/controller/loki/internal/lokistack/rules_discovery.go
index ed51722d07bc9..9ea2821ec03df 100644
--- a/operator/controllers/loki/internal/lokistack/rules_discovery.go
+++ b/operator/internal/controller/loki/internal/lokistack/rules_discovery.go
@@ -8,7 +8,7 @@ import (
"k8s.io/apimachinery/pkg/labels"
"sigs.k8s.io/controller-runtime/pkg/client"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/external/k8s"
)
diff --git a/operator/controllers/loki/internal/lokistack/update.go b/operator/internal/controller/loki/internal/lokistack/update.go
similarity index 96%
rename from operator/controllers/loki/internal/lokistack/update.go
rename to operator/internal/controller/loki/internal/lokistack/update.go
index b16ed815a7310..24fd3dafe6cd6 100644
--- a/operator/controllers/loki/internal/lokistack/update.go
+++ b/operator/internal/controller/loki/internal/lokistack/update.go
@@ -7,7 +7,7 @@ import (
"k8s.io/client-go/util/retry"
"sigs.k8s.io/controller-runtime/pkg/client"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/external/k8s"
)
diff --git a/operator/controllers/loki/internal/management/state/state.go b/operator/internal/controller/loki/internal/management/state/state.go
similarity index 92%
rename from operator/controllers/loki/internal/management/state/state.go
rename to operator/internal/controller/loki/internal/management/state/state.go
index 759c26e57ba43..13204043ea67c 100644
--- a/operator/controllers/loki/internal/management/state/state.go
+++ b/operator/internal/controller/loki/internal/management/state/state.go
@@ -7,7 +7,7 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
ctrl "sigs.k8s.io/controller-runtime"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/external/k8s"
)
diff --git a/operator/controllers/loki/internal/management/state/state_test.go b/operator/internal/controller/loki/internal/management/state/state_test.go
similarity index 91%
rename from operator/controllers/loki/internal/management/state/state_test.go
rename to operator/internal/controller/loki/internal/management/state/state_test.go
index ab9f00f8b3545..f6f8a9241d0ed 100644
--- a/operator/controllers/loki/internal/management/state/state_test.go
+++ b/operator/internal/controller/loki/internal/management/state/state_test.go
@@ -1,4 +1,4 @@
-package state_test
+package state
import (
"context"
@@ -13,8 +13,7 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
- "github.com/grafana/loki/operator/controllers/loki/internal/management/state"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
)
@@ -73,7 +72,7 @@ func TestIsManaged(t *testing.T) {
k.SetClientObject(object, &tst.stack)
return nil
}
- ok, err := state.IsManaged(context.TODO(), r, k)
+ ok, err := IsManaged(context.TODO(), r, k)
require.NoError(t, err)
require.Equal(t, ok, tst.wantOk)
})
@@ -109,7 +108,7 @@ func TestIsManaged_WhenError_ReturnNotManagedWithError(t *testing.T) {
for _, tst := range table {
t.Run(tst.name, func(t *testing.T) {
k.GetReturns(tst.apierror)
- ok, err := state.IsManaged(context.TODO(), r, k)
+ ok, err := IsManaged(context.TODO(), r, k)
require.Equal(t, tst.wantErr, err)
require.False(t, ok)
})
diff --git a/operator/controllers/loki/lokistack_controller.go b/operator/internal/controller/loki/lokistack_controller.go
similarity index 98%
rename from operator/controllers/loki/lokistack_controller.go
rename to operator/internal/controller/loki/lokistack_controller.go
index 59c9965cb9fc5..5c7c376d3bd06 100644
--- a/operator/controllers/loki/lokistack_controller.go
+++ b/operator/internal/controller/loki/lokistack_controller.go
@@ -1,4 +1,4 @@
-package controllers
+package loki
import (
"context"
@@ -26,10 +26,10 @@ import (
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
- configv1 "github.com/grafana/loki/operator/apis/config/v1"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
- "github.com/grafana/loki/operator/controllers/loki/internal/management/state"
+ configv1 "github.com/grafana/loki/operator/api/config/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/config"
+ "github.com/grafana/loki/operator/internal/controller/loki/internal/management/state"
"github.com/grafana/loki/operator/internal/external/k8s"
"github.com/grafana/loki/operator/internal/handlers"
manifestsocp "github.com/grafana/loki/operator/internal/manifests/openshift"
diff --git a/operator/controllers/loki/lokistack_controller_test.go b/operator/internal/controller/loki/lokistack_controller_test.go
similarity index 98%
rename from operator/controllers/loki/lokistack_controller_test.go
rename to operator/internal/controller/loki/lokistack_controller_test.go
index a4efc5d3ed45c..0af66748f41d5 100644
--- a/operator/controllers/loki/lokistack_controller_test.go
+++ b/operator/internal/controller/loki/lokistack_controller_test.go
@@ -1,4 +1,4 @@
-package controllers
+package loki
import (
"flag"
@@ -22,8 +22,8 @@ import (
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
- configv1 "github.com/grafana/loki/operator/apis/config/v1"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ configv1 "github.com/grafana/loki/operator/api/config/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
)
diff --git a/operator/controllers/loki/lokistack_zone_labeling_controller.go b/operator/internal/controller/loki/lokistack_zone_labeling_controller.go
similarity index 97%
rename from operator/controllers/loki/lokistack_zone_labeling_controller.go
rename to operator/internal/controller/loki/lokistack_zone_labeling_controller.go
index 1012f6a2f5dc6..a45c06a860f3f 100644
--- a/operator/controllers/loki/lokistack_zone_labeling_controller.go
+++ b/operator/internal/controller/loki/lokistack_zone_labeling_controller.go
@@ -1,4 +1,4 @@
-package controllers
+package loki
import (
"context"
@@ -14,7 +14,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/predicate"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/external/k8s"
"github.com/grafana/loki/operator/internal/handlers"
)
diff --git a/operator/controllers/loki/lokistack_zone_labeling_controller_test.go b/operator/internal/controller/loki/lokistack_zone_labeling_controller_test.go
similarity index 98%
rename from operator/controllers/loki/lokistack_zone_labeling_controller_test.go
rename to operator/internal/controller/loki/lokistack_zone_labeling_controller_test.go
index 5fffbe894bf91..a0ec40498de8e 100644
--- a/operator/controllers/loki/lokistack_zone_labeling_controller_test.go
+++ b/operator/internal/controller/loki/lokistack_zone_labeling_controller_test.go
@@ -1,4 +1,4 @@
-package controllers
+package loki
import (
"testing"
diff --git a/operator/controllers/loki/recordingrule_controller.go b/operator/internal/controller/loki/recordingrule_controller.go
similarity index 92%
rename from operator/controllers/loki/recordingrule_controller.go
rename to operator/internal/controller/loki/recordingrule_controller.go
index adb6dbf80f194..75a2c971f1eb9 100644
--- a/operator/controllers/loki/recordingrule_controller.go
+++ b/operator/internal/controller/loki/recordingrule_controller.go
@@ -1,4 +1,4 @@
-package controllers
+package loki
import (
"context"
@@ -11,8 +11,8 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/handler"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
- "github.com/grafana/loki/operator/controllers/loki/internal/lokistack"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
+ "github.com/grafana/loki/operator/internal/controller/loki/internal/lokistack"
)
// RecordingRuleReconciler reconciles a RecordingRule object
diff --git a/operator/controllers/loki/rulerconfig_controller.go b/operator/internal/controller/loki/rulerconfig_controller.go
similarity index 92%
rename from operator/controllers/loki/rulerconfig_controller.go
rename to operator/internal/controller/loki/rulerconfig_controller.go
index 5e827b54e2ffa..901ca5b65d0e1 100644
--- a/operator/controllers/loki/rulerconfig_controller.go
+++ b/operator/internal/controller/loki/rulerconfig_controller.go
@@ -1,4 +1,4 @@
-package controllers
+package loki
import (
"context"
@@ -8,8 +8,8 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
- "github.com/grafana/loki/operator/controllers/loki/internal/lokistack"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
+ "github.com/grafana/loki/operator/internal/controller/loki/internal/lokistack"
)
// RulerConfigReconciler reconciles a RulerConfig object
diff --git a/operator/internal/handlers/credentialsrequest.go b/operator/internal/handlers/credentialsrequest.go
index 9096f15d4026f..7d25bde2c9d27 100644
--- a/operator/internal/handlers/credentialsrequest.go
+++ b/operator/internal/handlers/credentialsrequest.go
@@ -13,7 +13,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/config"
"github.com/grafana/loki/operator/internal/external/k8s"
"github.com/grafana/loki/operator/internal/manifests"
diff --git a/operator/internal/handlers/credentialsrequest_test.go b/operator/internal/handlers/credentialsrequest_test.go
index dfb63cf166820..d71995d3de2b7 100644
--- a/operator/internal/handlers/credentialsrequest_test.go
+++ b/operator/internal/handlers/credentialsrequest_test.go
@@ -13,7 +13,7 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/config"
"github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
)
diff --git a/operator/internal/handlers/dashboards_create_test.go b/operator/internal/handlers/dashboards_create_test.go
index f897b45841543..71001c4760bd7 100644
--- a/operator/internal/handlers/dashboards_create_test.go
+++ b/operator/internal/handlers/dashboards_create_test.go
@@ -13,7 +13,7 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
)
diff --git a/operator/internal/handlers/internal/certificates/options.go b/operator/internal/handlers/internal/certificates/options.go
index 4aa9d1871112e..b978ae3b0e9f0 100644
--- a/operator/internal/handlers/internal/certificates/options.go
+++ b/operator/internal/handlers/internal/certificates/options.go
@@ -10,7 +10,7 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/certrotation"
"github.com/grafana/loki/operator/internal/external/k8s"
)
diff --git a/operator/internal/handlers/internal/certificates/options_test.go b/operator/internal/handlers/internal/certificates/options_test.go
index 56f2080f74a86..de578a6ca5b68 100644
--- a/operator/internal/handlers/internal/certificates/options_test.go
+++ b/operator/internal/handlers/internal/certificates/options_test.go
@@ -15,7 +15,7 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
)
diff --git a/operator/internal/handlers/internal/gateway/base_domain.go b/operator/internal/handlers/internal/gateway/base_domain.go
index 893659ca5d29b..c1170b70f7edc 100644
--- a/operator/internal/handlers/internal/gateway/base_domain.go
+++ b/operator/internal/handlers/internal/gateway/base_domain.go
@@ -8,7 +8,7 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
"sigs.k8s.io/controller-runtime/pkg/client"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/external/k8s"
"github.com/grafana/loki/operator/internal/status"
)
diff --git a/operator/internal/handlers/internal/gateway/gateway.go b/operator/internal/handlers/internal/gateway/gateway.go
index 0b05801f2e9aa..d37c82866af35 100644
--- a/operator/internal/handlers/internal/gateway/gateway.go
+++ b/operator/internal/handlers/internal/gateway/gateway.go
@@ -6,8 +6,8 @@ import (
"github.com/go-logr/logr"
- configv1 "github.com/grafana/loki/operator/apis/config/v1"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ configv1 "github.com/grafana/loki/operator/api/config/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/external/k8s"
"github.com/grafana/loki/operator/internal/handlers/internal/openshift"
"github.com/grafana/loki/operator/internal/manifests"
diff --git a/operator/internal/handlers/internal/gateway/gateway_test.go b/operator/internal/handlers/internal/gateway/gateway_test.go
index 2c8f846f55825..59dcf5821ab40 100644
--- a/operator/internal/handlers/internal/gateway/gateway_test.go
+++ b/operator/internal/handlers/internal/gateway/gateway_test.go
@@ -15,8 +15,8 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
- configv1 "github.com/grafana/loki/operator/apis/config/v1"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ configv1 "github.com/grafana/loki/operator/api/config/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
"github.com/grafana/loki/operator/internal/status"
)
diff --git a/operator/internal/handlers/internal/gateway/modes.go b/operator/internal/handlers/internal/gateway/modes.go
index 8fd9855b352dc..46a271361b939 100644
--- a/operator/internal/handlers/internal/gateway/modes.go
+++ b/operator/internal/handlers/internal/gateway/modes.go
@@ -3,7 +3,7 @@ package gateway
import (
"github.com/ViaQ/logerr/v2/kverrors"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
)
func validateModes(stack *lokiv1.LokiStack) error {
diff --git a/operator/internal/handlers/internal/gateway/modes_test.go b/operator/internal/handlers/internal/gateway/modes_test.go
index 0d6dd9eb88cd6..20ae7c86ca4da 100644
--- a/operator/internal/handlers/internal/gateway/modes_test.go
+++ b/operator/internal/handlers/internal/gateway/modes_test.go
@@ -6,7 +6,7 @@ import (
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
)
func TestValidateModes_StaticMode(t *testing.T) {
diff --git a/operator/internal/handlers/internal/gateway/tenant_configsecret.go b/operator/internal/handlers/internal/gateway/tenant_configsecret.go
index f4e6c493bc069..252ebe7b34495 100644
--- a/operator/internal/handlers/internal/gateway/tenant_configsecret.go
+++ b/operator/internal/handlers/internal/gateway/tenant_configsecret.go
@@ -9,7 +9,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/yaml"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/external/k8s"
"github.com/grafana/loki/operator/internal/manifests"
)
diff --git a/operator/internal/handlers/internal/gateway/tenant_configsecret_test.go b/operator/internal/handlers/internal/gateway/tenant_configsecret_test.go
index 15e85a2295465..5c1020fd48b3d 100644
--- a/operator/internal/handlers/internal/gateway/tenant_configsecret_test.go
+++ b/operator/internal/handlers/internal/gateway/tenant_configsecret_test.go
@@ -12,7 +12,7 @@ import (
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
"github.com/grafana/loki/operator/internal/manifests"
)
diff --git a/operator/internal/handlers/internal/gateway/tenant_secrets.go b/operator/internal/handlers/internal/gateway/tenant_secrets.go
index 6cc39ae05e254..84bcf1b8c07f1 100644
--- a/operator/internal/handlers/internal/gateway/tenant_secrets.go
+++ b/operator/internal/handlers/internal/gateway/tenant_secrets.go
@@ -9,7 +9,7 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
"sigs.k8s.io/controller-runtime/pkg/client"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/external/k8s"
"github.com/grafana/loki/operator/internal/manifests"
"github.com/grafana/loki/operator/internal/status"
diff --git a/operator/internal/handlers/internal/gateway/tenant_secrets_test.go b/operator/internal/handlers/internal/gateway/tenant_secrets_test.go
index 52c1476059174..7094416b03028 100644
--- a/operator/internal/handlers/internal/gateway/tenant_secrets_test.go
+++ b/operator/internal/handlers/internal/gateway/tenant_secrets_test.go
@@ -11,7 +11,7 @@ import (
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
"github.com/grafana/loki/operator/internal/manifests"
)
diff --git a/operator/internal/handlers/internal/openshift/alertmanager.go b/operator/internal/handlers/internal/openshift/alertmanager.go
index d470bf6ffe6df..0ee614d596549 100644
--- a/operator/internal/handlers/internal/openshift/alertmanager.go
+++ b/operator/internal/handlers/internal/openshift/alertmanager.go
@@ -8,7 +8,7 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
"sigs.k8s.io/controller-runtime/pkg/client"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/external/k8s"
"github.com/grafana/loki/operator/internal/manifests/openshift"
)
diff --git a/operator/internal/handlers/internal/openshift/proxy.go b/operator/internal/handlers/internal/openshift/proxy.go
index 4f5c2c022e901..10d2330c104e6 100644
--- a/operator/internal/handlers/internal/openshift/proxy.go
+++ b/operator/internal/handlers/internal/openshift/proxy.go
@@ -7,7 +7,7 @@ import (
"k8s.io/apimachinery/pkg/api/errors"
"sigs.k8s.io/controller-runtime/pkg/client"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/external/k8s"
)
diff --git a/operator/internal/handlers/internal/rules/cleanup.go b/operator/internal/handlers/internal/rules/cleanup.go
index abd5bacd5c032..faabe70d4ea82 100644
--- a/operator/internal/handlers/internal/rules/cleanup.go
+++ b/operator/internal/handlers/internal/rules/cleanup.go
@@ -11,7 +11,7 @@ import (
"k8s.io/apimachinery/pkg/labels"
"sigs.k8s.io/controller-runtime/pkg/client"
- v1 "github.com/grafana/loki/operator/apis/loki/v1"
+ v1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/external/k8s"
"github.com/grafana/loki/operator/internal/manifests"
)
diff --git a/operator/internal/handlers/internal/rules/cleanup_test.go b/operator/internal/handlers/internal/rules/cleanup_test.go
index 5ecd9f69c345f..18242877c578b 100644
--- a/operator/internal/handlers/internal/rules/cleanup_test.go
+++ b/operator/internal/handlers/internal/rules/cleanup_test.go
@@ -17,7 +17,7 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
)
diff --git a/operator/internal/handlers/internal/rules/config.go b/operator/internal/handlers/internal/rules/config.go
index ec4413fc49ecd..e983be441f75f 100644
--- a/operator/internal/handlers/internal/rules/config.go
+++ b/operator/internal/handlers/internal/rules/config.go
@@ -7,7 +7,7 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
"sigs.k8s.io/controller-runtime/pkg/client"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/external/k8s"
)
diff --git a/operator/internal/handlers/internal/rules/rules.go b/operator/internal/handlers/internal/rules/rules.go
index e21335e98c095..93196a98c0526 100644
--- a/operator/internal/handlers/internal/rules/rules.go
+++ b/operator/internal/handlers/internal/rules/rules.go
@@ -11,7 +11,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/external/k8s"
"github.com/grafana/loki/operator/internal/handlers/internal/openshift"
"github.com/grafana/loki/operator/internal/manifests"
diff --git a/operator/internal/handlers/internal/rules/rules_test.go b/operator/internal/handlers/internal/rules/rules_test.go
index e33a2ac928a6a..c5f4b17ea8047 100644
--- a/operator/internal/handlers/internal/rules/rules_test.go
+++ b/operator/internal/handlers/internal/rules/rules_test.go
@@ -14,7 +14,7 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
"github.com/grafana/loki/operator/internal/status"
)
diff --git a/operator/internal/handlers/internal/rules/secrets.go b/operator/internal/handlers/internal/rules/secrets.go
index 311bd139c615c..09b103c41cb46 100644
--- a/operator/internal/handlers/internal/rules/secrets.go
+++ b/operator/internal/handlers/internal/rules/secrets.go
@@ -4,7 +4,7 @@ import (
"github.com/ViaQ/logerr/v2/kverrors"
corev1 "k8s.io/api/core/v1"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/manifests"
)
diff --git a/operator/internal/handlers/internal/rules/secrets_test.go b/operator/internal/handlers/internal/rules/secrets_test.go
index 118cc0c8e0fb9..b59905ca93451 100644
--- a/operator/internal/handlers/internal/rules/secrets_test.go
+++ b/operator/internal/handlers/internal/rules/secrets_test.go
@@ -6,7 +6,7 @@ import (
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/handlers/internal/rules"
"github.com/grafana/loki/operator/internal/manifests"
)
diff --git a/operator/internal/handlers/internal/storage/ca_configmap.go b/operator/internal/handlers/internal/storage/ca_configmap.go
index 904e63373a207..4935a31355de8 100644
--- a/operator/internal/handlers/internal/storage/ca_configmap.go
+++ b/operator/internal/handlers/internal/storage/ca_configmap.go
@@ -10,7 +10,7 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
"sigs.k8s.io/controller-runtime/pkg/client"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/external/k8s"
"github.com/grafana/loki/operator/internal/status"
)
diff --git a/operator/internal/handlers/internal/storage/secrets.go b/operator/internal/handlers/internal/storage/secrets.go
index 2b591ba34f3f1..36065afb4c8ab 100644
--- a/operator/internal/handlers/internal/storage/secrets.go
+++ b/operator/internal/handlers/internal/storage/secrets.go
@@ -17,8 +17,8 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
"sigs.k8s.io/controller-runtime/pkg/client"
- configv1 "github.com/grafana/loki/operator/apis/config/v1"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ configv1 "github.com/grafana/loki/operator/api/config/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/external/k8s"
"github.com/grafana/loki/operator/internal/manifests/storage"
"github.com/grafana/loki/operator/internal/status"
diff --git a/operator/internal/handlers/internal/storage/secrets_test.go b/operator/internal/handlers/internal/storage/secrets_test.go
index a3d1428809466..0688b099f3a86 100644
--- a/operator/internal/handlers/internal/storage/secrets_test.go
+++ b/operator/internal/handlers/internal/storage/secrets_test.go
@@ -7,8 +7,8 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- configv1 "github.com/grafana/loki/operator/apis/config/v1"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ configv1 "github.com/grafana/loki/operator/api/config/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/manifests/storage"
)
diff --git a/operator/internal/handlers/internal/storage/storage.go b/operator/internal/handlers/internal/storage/storage.go
index 6276942fe32e8..af7dbc726437d 100644
--- a/operator/internal/handlers/internal/storage/storage.go
+++ b/operator/internal/handlers/internal/storage/storage.go
@@ -5,8 +5,8 @@ import (
"fmt"
"time"
- configv1 "github.com/grafana/loki/operator/apis/config/v1"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ configv1 "github.com/grafana/loki/operator/api/config/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/external/k8s"
"github.com/grafana/loki/operator/internal/manifests/storage"
"github.com/grafana/loki/operator/internal/status"
diff --git a/operator/internal/handlers/internal/storage/storage_test.go b/operator/internal/handlers/internal/storage/storage_test.go
index 19a7147c877a0..fe483656e7147 100644
--- a/operator/internal/handlers/internal/storage/storage_test.go
+++ b/operator/internal/handlers/internal/storage/storage_test.go
@@ -15,8 +15,8 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
- configv1 "github.com/grafana/loki/operator/apis/config/v1"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ configv1 "github.com/grafana/loki/operator/api/config/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
"github.com/grafana/loki/operator/internal/status"
)
diff --git a/operator/internal/handlers/internal/tlsprofile/tlsprofile.go b/operator/internal/handlers/internal/tlsprofile/tlsprofile.go
index 01d6d907c86dd..0b5493596ba82 100644
--- a/operator/internal/handlers/internal/tlsprofile/tlsprofile.go
+++ b/operator/internal/handlers/internal/tlsprofile/tlsprofile.go
@@ -7,7 +7,7 @@ import (
openshiftconfigv1 "github.com/openshift/api/config/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
- configv1 "github.com/grafana/loki/operator/apis/config/v1"
+ configv1 "github.com/grafana/loki/operator/api/config/v1"
"github.com/grafana/loki/operator/internal/external/k8s"
)
diff --git a/operator/internal/handlers/internal/tlsprofile/tlsprofile_test.go b/operator/internal/handlers/internal/tlsprofile/tlsprofile_test.go
index bdecb62ee4154..1a0397e75a319 100644
--- a/operator/internal/handlers/internal/tlsprofile/tlsprofile_test.go
+++ b/operator/internal/handlers/internal/tlsprofile/tlsprofile_test.go
@@ -12,7 +12,7 @@ import (
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
- configv1 "github.com/grafana/loki/operator/apis/config/v1"
+ configv1 "github.com/grafana/loki/operator/api/config/v1"
"github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
"github.com/grafana/loki/operator/internal/handlers/internal/tlsprofile"
)
diff --git a/operator/internal/handlers/lokistack_check_cert_expiry.go b/operator/internal/handlers/lokistack_check_cert_expiry.go
index 4fe8a275b4da4..70d0b6fa75a4a 100644
--- a/operator/internal/handlers/lokistack_check_cert_expiry.go
+++ b/operator/internal/handlers/lokistack_check_cert_expiry.go
@@ -8,8 +8,8 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
ctrl "sigs.k8s.io/controller-runtime"
- configv1 "github.com/grafana/loki/operator/apis/config/v1"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ configv1 "github.com/grafana/loki/operator/api/config/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/certrotation"
"github.com/grafana/loki/operator/internal/external/k8s"
"github.com/grafana/loki/operator/internal/handlers/internal/certificates"
diff --git a/operator/internal/handlers/lokistack_check_cert_expiry_test.go b/operator/internal/handlers/lokistack_check_cert_expiry_test.go
index 6b6d35c8b6bcd..84cb54f78a68d 100644
--- a/operator/internal/handlers/lokistack_check_cert_expiry_test.go
+++ b/operator/internal/handlers/lokistack_check_cert_expiry_test.go
@@ -16,7 +16,7 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/certrotation"
"github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
)
diff --git a/operator/internal/handlers/lokistack_create_or_update.go b/operator/internal/handlers/lokistack_create_or_update.go
index ee0eea1513af0..e834fac0282ab 100644
--- a/operator/internal/handlers/lokistack_create_or_update.go
+++ b/operator/internal/handlers/lokistack_create_or_update.go
@@ -15,8 +15,8 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
- configv1 "github.com/grafana/loki/operator/apis/config/v1"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ configv1 "github.com/grafana/loki/operator/api/config/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/external/k8s"
"github.com/grafana/loki/operator/internal/handlers/internal/gateway"
"github.com/grafana/loki/operator/internal/handlers/internal/rules"
diff --git a/operator/internal/handlers/lokistack_create_or_update_test.go b/operator/internal/handlers/lokistack_create_or_update_test.go
index c7677e49a05a4..b2a64e39fa922 100644
--- a/operator/internal/handlers/lokistack_create_or_update_test.go
+++ b/operator/internal/handlers/lokistack_create_or_update_test.go
@@ -25,8 +25,8 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
- configv1 "github.com/grafana/loki/operator/apis/config/v1"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ configv1 "github.com/grafana/loki/operator/api/config/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
"github.com/grafana/loki/operator/internal/status"
)
diff --git a/operator/internal/handlers/lokistack_enable_zone_awareness.go b/operator/internal/handlers/lokistack_enable_zone_awareness.go
index c66f54af5bc04..97e178d819e64 100644
--- a/operator/internal/handlers/lokistack_enable_zone_awareness.go
+++ b/operator/internal/handlers/lokistack_enable_zone_awareness.go
@@ -12,7 +12,7 @@ import (
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/external/k8s"
)
diff --git a/operator/internal/handlers/lokistack_enable_zone_awareness_test.go b/operator/internal/handlers/lokistack_enable_zone_awareness_test.go
index f0c0f02be342b..dd3767408d83c 100644
--- a/operator/internal/handlers/lokistack_enable_zone_awareness_test.go
+++ b/operator/internal/handlers/lokistack_enable_zone_awareness_test.go
@@ -13,7 +13,7 @@ import (
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
)
diff --git a/operator/internal/handlers/lokistack_rotate_certs.go b/operator/internal/handlers/lokistack_rotate_certs.go
index 2f7b088abd043..7ed6d85827ed1 100644
--- a/operator/internal/handlers/lokistack_rotate_certs.go
+++ b/operator/internal/handlers/lokistack_rotate_certs.go
@@ -12,8 +12,8 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
- configv1 "github.com/grafana/loki/operator/apis/config/v1"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ configv1 "github.com/grafana/loki/operator/api/config/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/certrotation"
"github.com/grafana/loki/operator/internal/external/k8s"
"github.com/grafana/loki/operator/internal/handlers/internal/certificates"
diff --git a/operator/internal/handlers/lokistack_rotate_certs_test.go b/operator/internal/handlers/lokistack_rotate_certs_test.go
index 7ace87de85cd1..e204c99b9217b 100644
--- a/operator/internal/handlers/lokistack_rotate_certs_test.go
+++ b/operator/internal/handlers/lokistack_rotate_certs_test.go
@@ -16,7 +16,7 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
)
diff --git a/operator/internal/manifests/build.go b/operator/internal/manifests/build.go
index cc0da5771c0c1..99b0d5daa597e 100644
--- a/operator/internal/manifests/build.go
+++ b/operator/internal/manifests/build.go
@@ -7,7 +7,7 @@ import (
"github.com/openshift/library-go/pkg/crypto"
"sigs.k8s.io/controller-runtime/pkg/client"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/manifests/internal"
)
diff --git a/operator/internal/manifests/build_test.go b/operator/internal/manifests/build_test.go
index dd8d8d22a5aa6..536e40df04629 100644
--- a/operator/internal/manifests/build_test.go
+++ b/operator/internal/manifests/build_test.go
@@ -11,8 +11,8 @@ import (
corev1 "k8s.io/api/core/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
- configv1 "github.com/grafana/loki/operator/apis/config/v1"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ configv1 "github.com/grafana/loki/operator/api/config/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/manifests/internal"
)
diff --git a/operator/internal/manifests/compactor_test.go b/operator/internal/manifests/compactor_test.go
index 6839d64a74263..ac636b364a6f4 100644
--- a/operator/internal/manifests/compactor_test.go
+++ b/operator/internal/manifests/compactor_test.go
@@ -5,7 +5,7 @@ import (
"github.com/stretchr/testify/require"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/manifests/storage"
)
diff --git a/operator/internal/manifests/config.go b/operator/internal/manifests/config.go
index e212e6d6a30fe..fcaac3ab9a2bb 100644
--- a/operator/internal/manifests/config.go
+++ b/operator/internal/manifests/config.go
@@ -8,7 +8,7 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/manifests/internal/config"
)
@@ -194,6 +194,7 @@ func ConfigOptions(opt Options) config.Options {
ObjectStorage: opt.ObjectStorage,
HTTPTimeouts: opt.Timeouts.Loki,
EnableRemoteReporting: opt.Gates.GrafanaLabsUsageReport,
+ DiscoverLogLevels: discoverLogLevels(&opt.Stack),
Ruler: config.Ruler{
Enabled: rulerEnabled,
RulesStorageDirectory: rulesStorageDirectory,
@@ -202,8 +203,9 @@ func ConfigOptions(opt Options) config.Options {
AlertManager: amConfig,
RemoteWrite: rwConfig,
},
- Retention: retentionConfig(&opt.Stack),
- Overrides: overrides,
+ Retention: retentionConfig(&opt.Stack),
+ OTLPAttributes: otlpAttributeConfig(&opt.Stack),
+ Overrides: overrides,
}
}
@@ -395,3 +397,16 @@ func retentionConfig(ls *lokiv1.LokiStackSpec) config.RetentionOptions {
DeleteWorkerCount: deleteWorkerCountMap[ls.Size],
}
}
+
+func discoverLogLevels(ls *lokiv1.LokiStackSpec) bool {
+ if ls.Tenants == nil {
+ return true
+ }
+
+ if ls.Tenants.Mode == lokiv1.OpenshiftLogging ||
+ ls.Tenants.Mode == lokiv1.OpenshiftNetwork {
+ return false
+ }
+
+ return true
+}
diff --git a/operator/internal/manifests/config_otlp.go b/operator/internal/manifests/config_otlp.go
new file mode 100644
index 0000000000000..8530b7e63cfb6
--- /dev/null
+++ b/operator/internal/manifests/config_otlp.go
@@ -0,0 +1,273 @@
+package manifests
+
+import (
+ "slices"
+ "strings"
+
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
+ "github.com/grafana/loki/operator/internal/manifests/internal/config"
+ "github.com/grafana/loki/operator/internal/manifests/openshift"
+)
+
+func defaultOTLPAttributeConfig(ts *lokiv1.TenantsSpec) config.OTLPAttributeConfig {
+ if ts == nil || ts.Mode != lokiv1.OpenshiftLogging {
+ return config.OTLPAttributeConfig{}
+ }
+
+ disableRecommended := false
+ if ts.Openshift != nil && ts.Openshift.OTLP != nil {
+ disableRecommended = ts.Openshift.OTLP.DisableRecommendedAttributes
+ }
+
+ return openshift.DefaultOTLPAttributes(disableRecommended)
+}
+
+func convertAttributeReferences(refs []lokiv1.OTLPAttributeReference, action config.OTLPAttributeAction) []config.OTLPAttribute {
+ var (
+ names []string
+ result []config.OTLPAttribute
+ )
+
+ for _, attr := range refs {
+ if attr.Regex {
+ result = append(result, config.OTLPAttribute{
+ Action: action,
+ Regex: attr.Name,
+ })
+ continue
+ }
+
+ names = append(names, attr.Name)
+ }
+
+ if len(names) > 0 {
+ result = append(result, config.OTLPAttribute{
+ Action: action,
+ Names: names,
+ })
+ }
+
+ return result
+}
+
+func copyOTLPAttributes(in []config.OTLPAttribute) []config.OTLPAttribute {
+ result := make([]config.OTLPAttribute, 0, len(in))
+ for _, attr := range in {
+ result = append(result, config.OTLPAttribute{
+ Action: attr.Action,
+ Names: slices.Clone(attr.Names),
+ Regex: attr.Regex,
+ })
+ }
+
+ return result
+}
+
+func copyTenantAttributeConfig(in *config.OTLPTenantAttributeConfig) *config.OTLPTenantAttributeConfig {
+ result := &config.OTLPTenantAttributeConfig{}
+ if in == nil {
+ return result
+ }
+
+ if len(in.ResourceAttributes) > 0 {
+ result.ResourceAttributes = copyOTLPAttributes(in.ResourceAttributes)
+ }
+
+ if len(in.ScopeAttributes) > 0 {
+ result.ScopeAttributes = copyOTLPAttributes(in.ScopeAttributes)
+ }
+
+ if len(in.LogAttributes) > 0 {
+ result.LogAttributes = copyOTLPAttributes(in.LogAttributes)
+ }
+
+ return result
+}
+
+func convertTenantAttributeReferences(otlpSpec *lokiv1.OTLPSpec, base *config.OTLPTenantAttributeConfig) *config.OTLPTenantAttributeConfig {
+ result := copyTenantAttributeConfig(base)
+
+ if streamLabels := otlpSpec.StreamLabels; streamLabels != nil {
+ result.ResourceAttributes = append(result.ResourceAttributes,
+ convertAttributeReferences(streamLabels.ResourceAttributes, config.OTLPAttributeActionStreamLabel)...)
+ }
+
+ if structuredMetadata := otlpSpec.StructuredMetadata; structuredMetadata != nil {
+ if resAttr := structuredMetadata.ResourceAttributes; len(resAttr) > 0 {
+ result.ResourceAttributes = append(result.ResourceAttributes,
+ convertAttributeReferences(resAttr, config.OTLPAttributeActionMetadata)...)
+ }
+
+ if scopeAttr := structuredMetadata.ScopeAttributes; len(scopeAttr) > 0 {
+ result.ScopeAttributes = append(result.ScopeAttributes,
+ convertAttributeReferences(scopeAttr, config.OTLPAttributeActionMetadata)...)
+ }
+
+ if logAttr := structuredMetadata.LogAttributes; len(logAttr) > 0 {
+ result.LogAttributes = append(result.LogAttributes,
+ convertAttributeReferences(logAttr, config.OTLPAttributeActionMetadata)...)
+ }
+ }
+
+ return result
+}
+
+func sortAndDeduplicateOTLPConfig(cfg config.OTLPAttributeConfig) config.OTLPAttributeConfig {
+ if cfg.Global != nil {
+ if len(cfg.Global.ResourceAttributes) > 0 {
+ cfg.Global.ResourceAttributes = sortAndDeduplicateOTLPAttributes(cfg.Global.ResourceAttributes)
+ }
+
+ if len(cfg.Global.ScopeAttributes) > 0 {
+ cfg.Global.ScopeAttributes = sortAndDeduplicateOTLPAttributes(cfg.Global.ScopeAttributes)
+ }
+
+ if len(cfg.Global.LogAttributes) > 0 {
+ cfg.Global.LogAttributes = sortAndDeduplicateOTLPAttributes(cfg.Global.LogAttributes)
+ }
+ }
+
+ for _, t := range cfg.Tenants {
+ if len(t.ResourceAttributes) > 0 {
+ t.ResourceAttributes = sortAndDeduplicateOTLPAttributes(t.ResourceAttributes)
+ }
+
+ if len(t.ScopeAttributes) > 0 {
+ t.ScopeAttributes = sortAndDeduplicateOTLPAttributes(t.ScopeAttributes)
+ }
+
+ if len(t.LogAttributes) > 0 {
+ t.LogAttributes = sortAndDeduplicateOTLPAttributes(t.LogAttributes)
+ }
+ }
+
+ return cfg
+}
+
+func sortAndDeduplicateOTLPAttributes(attrs []config.OTLPAttribute) []config.OTLPAttribute {
+ if len(attrs) == 0 {
+ // Skip everything for zero items
+ return attrs
+ }
+
+ if len(attrs[0].Names) > 1 {
+ // If the first OTLPAttribute has names, then sort and de-duplicate them
+ slices.Sort(attrs[0].Names)
+ attrs[0].Names = slices.Compact(attrs[0].Names)
+ }
+
+ if len(attrs) == 1 {
+ // If there's only one item, then skip the complex sorting
+ return attrs
+ }
+
+ slices.SortFunc(attrs, func(a, b config.OTLPAttribute) int {
+ action := strings.Compare(string(a.Action), string(b.Action))
+ if action != 0 {
+ return action
+ }
+
+ if a.Regex != "" && b.Regex != "" {
+ return strings.Compare(a.Regex, b.Regex)
+ }
+
+ if a.Regex != "" && b.Regex == "" {
+ return 1
+ }
+
+ if a.Regex == "" && b.Regex != "" {
+ return -1
+ }
+
+ return 0
+ })
+
+ for i := 0; i < len(attrs)-1; i++ {
+ a := attrs[i]
+ if a.Regex != "" {
+ continue
+ }
+
+ slices.Sort(a.Names)
+ attrs[i] = a
+
+ next := attrs[i+1]
+ if next.Regex != "" {
+ continue
+ }
+
+ if a.Action != next.Action {
+ continue
+ }
+
+ // Combine attribute definitions if they have the same action and just contain names
+ a.Names = append(a.Names, next.Names...)
+ slices.Sort(a.Names)
+ a.Names = slices.Compact(a.Names)
+
+ // Remove the "next" attribute definition
+ attrs[i] = a
+ attrs = append(attrs[:i+1], attrs[i+2:]...)
+ i--
+ }
+
+ return attrs
+}
+
+func otlpAttributeConfig(ls *lokiv1.LokiStackSpec) config.OTLPAttributeConfig {
+ result := defaultOTLPAttributeConfig(ls.Tenants)
+
+ if ls.Limits != nil {
+ if ls.Limits.Global != nil && ls.Limits.Global.OTLP != nil {
+ result.RemoveDefaultLabels = true
+ globalOTLP := ls.Limits.Global.OTLP
+
+ if streamLabels := globalOTLP.StreamLabels; streamLabels != nil {
+ if result.Global == nil {
+ result.Global = &config.OTLPTenantAttributeConfig{}
+ }
+
+ if resAttr := streamLabels.ResourceAttributes; len(resAttr) > 0 {
+ result.Global.ResourceAttributes = append(result.Global.ResourceAttributes,
+ convertAttributeReferences(resAttr, config.OTLPAttributeActionStreamLabel)...)
+ }
+ }
+
+ if structuredMetadata := globalOTLP.StructuredMetadata; structuredMetadata != nil {
+ if result.Global == nil {
+ result.Global = &config.OTLPTenantAttributeConfig{}
+ }
+
+ if resAttr := structuredMetadata.ResourceAttributes; len(resAttr) > 0 {
+ result.Global.ResourceAttributes = append(result.Global.ResourceAttributes,
+ convertAttributeReferences(resAttr, config.OTLPAttributeActionMetadata)...)
+ }
+
+ if scopeAttr := structuredMetadata.ScopeAttributes; len(scopeAttr) > 0 {
+ result.Global.ScopeAttributes = append(result.Global.ScopeAttributes,
+ convertAttributeReferences(scopeAttr, config.OTLPAttributeActionMetadata)...)
+ }
+
+ if logAttr := structuredMetadata.LogAttributes; len(logAttr) > 0 {
+ result.Global.LogAttributes = append(result.Global.LogAttributes,
+ convertAttributeReferences(logAttr, config.OTLPAttributeActionMetadata)...)
+ }
+ }
+ }
+
+ for tenant, tenantLimits := range ls.Limits.Tenants {
+ if tenantLimits.OTLP != nil {
+ result.RemoveDefaultLabels = true
+
+ if result.Tenants == nil {
+ result.Tenants = map[string]*config.OTLPTenantAttributeConfig{}
+ }
+
+ tenantResult := convertTenantAttributeReferences(tenantLimits.OTLP, result.Global)
+ result.Tenants[tenant] = tenantResult
+ }
+ }
+ }
+
+ return sortAndDeduplicateOTLPConfig(result)
+}
diff --git a/operator/internal/manifests/config_otlp_test.go b/operator/internal/manifests/config_otlp_test.go
new file mode 100644
index 0000000000000..73f4970c8dbf0
--- /dev/null
+++ b/operator/internal/manifests/config_otlp_test.go
@@ -0,0 +1,799 @@
+package manifests
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
+ "github.com/grafana/loki/operator/internal/manifests/internal/config"
+ "github.com/grafana/loki/operator/internal/manifests/openshift"
+)
+
+func TestOtlpAttributeConfig(t *testing.T) {
+ tt := []struct {
+ desc string
+ spec lokiv1.LokiStackSpec
+ wantConfig config.OTLPAttributeConfig
+ }{
+ {
+ desc: "empty",
+ },
+ {
+ desc: "global stream label",
+ spec: lokiv1.LokiStackSpec{
+ Limits: &lokiv1.LimitsSpec{
+ Global: &lokiv1.LimitsTemplateSpec{
+ OTLP: &lokiv1.OTLPSpec{
+ StreamLabels: &lokiv1.OTLPStreamLabelSpec{
+ ResourceAttributes: []lokiv1.OTLPAttributeReference{
+ {
+ Name: "stream.label",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ wantConfig: config.OTLPAttributeConfig{
+ RemoveDefaultLabels: true,
+ Global: &config.OTLPTenantAttributeConfig{
+ ResourceAttributes: []config.OTLPAttribute{
+ {
+ Action: config.OTLPAttributeActionStreamLabel,
+ Names: []string{
+ "stream.label",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ desc: "global stream label regex",
+ spec: lokiv1.LokiStackSpec{
+ Limits: &lokiv1.LimitsSpec{
+ Global: &lokiv1.LimitsTemplateSpec{
+ OTLP: &lokiv1.OTLPSpec{
+ StreamLabels: &lokiv1.OTLPStreamLabelSpec{
+ ResourceAttributes: []lokiv1.OTLPAttributeReference{
+ {
+ Name: "stream\\.label\\.regex\\..+",
+ Regex: true,
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ wantConfig: config.OTLPAttributeConfig{
+ RemoveDefaultLabels: true,
+ Global: &config.OTLPTenantAttributeConfig{
+ ResourceAttributes: []config.OTLPAttribute{
+ {
+ Action: config.OTLPAttributeActionStreamLabel,
+ Regex: "stream\\.label\\.regex\\..+",
+ },
+ },
+ },
+ },
+ },
+ {
+ desc: "global metadata",
+ spec: lokiv1.LokiStackSpec{
+ Limits: &lokiv1.LimitsSpec{
+ Global: &lokiv1.LimitsTemplateSpec{
+ OTLP: &lokiv1.OTLPSpec{
+ StructuredMetadata: &lokiv1.OTLPMetadataSpec{
+ ResourceAttributes: []lokiv1.OTLPAttributeReference{
+ {
+ Name: "metadata",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ wantConfig: config.OTLPAttributeConfig{
+ RemoveDefaultLabels: true,
+ Global: &config.OTLPTenantAttributeConfig{
+ ResourceAttributes: []config.OTLPAttribute{
+ {
+ Action: config.OTLPAttributeActionMetadata,
+ Names: []string{"metadata"},
+ },
+ },
+ },
+ },
+ },
+ {
+ desc: "global combined",
+ spec: lokiv1.LokiStackSpec{
+ Limits: &lokiv1.LimitsSpec{
+ Global: &lokiv1.LimitsTemplateSpec{
+ OTLP: &lokiv1.OTLPSpec{
+ StreamLabels: &lokiv1.OTLPStreamLabelSpec{
+ ResourceAttributes: []lokiv1.OTLPAttributeReference{
+ {
+ Name: "stream.label",
+ },
+ {
+ Name: "stream\\.label\\.regex\\..+",
+ Regex: true,
+ },
+ },
+ },
+ StructuredMetadata: &lokiv1.OTLPMetadataSpec{
+ ResourceAttributes: []lokiv1.OTLPAttributeReference{
+ {
+ Name: "resource.metadata",
+ },
+ {
+ Name: "resource.metadata\\.other\\..+",
+ Regex: true,
+ },
+ },
+ ScopeAttributes: []lokiv1.OTLPAttributeReference{
+ {
+ Name: "scope.metadata",
+ },
+ {
+ Name: "scope.metadata\\.other\\..+",
+ Regex: true,
+ },
+ },
+ LogAttributes: []lokiv1.OTLPAttributeReference{
+ {
+ Name: "log.metadata",
+ },
+ {
+ Name: "log.metadata\\.other\\..+",
+ Regex: true,
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ wantConfig: config.OTLPAttributeConfig{
+ RemoveDefaultLabels: true,
+ Global: &config.OTLPTenantAttributeConfig{
+ ResourceAttributes: []config.OTLPAttribute{
+ {
+ Action: config.OTLPAttributeActionStreamLabel,
+ Names: []string{"stream.label"},
+ },
+ {
+ Action: config.OTLPAttributeActionStreamLabel,
+ Regex: "stream\\.label\\.regex\\..+",
+ },
+ {
+ Action: config.OTLPAttributeActionMetadata,
+ Names: []string{"resource.metadata"},
+ },
+ {
+ Action: config.OTLPAttributeActionMetadata,
+ Regex: "resource.metadata\\.other\\..+",
+ },
+ },
+ ScopeAttributes: []config.OTLPAttribute{
+ {
+ Action: config.OTLPAttributeActionMetadata,
+ Names: []string{"scope.metadata"},
+ },
+ {
+ Action: config.OTLPAttributeActionMetadata,
+ Regex: "scope.metadata\\.other\\..+",
+ },
+ },
+ LogAttributes: []config.OTLPAttribute{
+ {
+ Action: config.OTLPAttributeActionMetadata,
+ Names: []string{"log.metadata"},
+ },
+ {
+ Action: config.OTLPAttributeActionMetadata,
+ Regex: "log.metadata\\.other\\..+",
+ },
+ },
+ },
+ },
+ },
+ {
+ desc: "tenant stream label",
+ spec: lokiv1.LokiStackSpec{
+ Limits: &lokiv1.LimitsSpec{
+ Tenants: map[string]lokiv1.PerTenantLimitsTemplateSpec{
+ "test-tenant": {
+ OTLP: &lokiv1.OTLPSpec{
+ StreamLabels: &lokiv1.OTLPStreamLabelSpec{
+ ResourceAttributes: []lokiv1.OTLPAttributeReference{
+ {
+ Name: "tenant.stream.label",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ wantConfig: config.OTLPAttributeConfig{
+ RemoveDefaultLabels: true,
+ Tenants: map[string]*config.OTLPTenantAttributeConfig{
+ "test-tenant": {
+ ResourceAttributes: []config.OTLPAttribute{
+ {
+ Action: config.OTLPAttributeActionStreamLabel,
+ Names: []string{"tenant.stream.label"},
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ desc: "tenant stream label regex",
+ spec: lokiv1.LokiStackSpec{
+ Limits: &lokiv1.LimitsSpec{
+ Tenants: map[string]lokiv1.PerTenantLimitsTemplateSpec{
+ "test-tenant": {
+ OTLP: &lokiv1.OTLPSpec{
+ StreamLabels: &lokiv1.OTLPStreamLabelSpec{
+ ResourceAttributes: []lokiv1.OTLPAttributeReference{
+ {
+ Name: "tenant\\.stream\\.label\\.regex\\..+",
+ Regex: true,
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ wantConfig: config.OTLPAttributeConfig{
+ RemoveDefaultLabels: true,
+ Tenants: map[string]*config.OTLPTenantAttributeConfig{
+ "test-tenant": {
+ ResourceAttributes: []config.OTLPAttribute{
+ {
+ Action: config.OTLPAttributeActionStreamLabel,
+ Regex: "tenant\\.stream\\.label\\.regex\\..+",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ desc: "tenant metadata",
+ spec: lokiv1.LokiStackSpec{
+ Limits: &lokiv1.LimitsSpec{
+ Tenants: map[string]lokiv1.PerTenantLimitsTemplateSpec{
+ "test-tenant": {
+ OTLP: &lokiv1.OTLPSpec{
+ StructuredMetadata: &lokiv1.OTLPMetadataSpec{
+ ResourceAttributes: []lokiv1.OTLPAttributeReference{
+ {
+ Name: "tenant.metadata",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ wantConfig: config.OTLPAttributeConfig{
+ RemoveDefaultLabels: true,
+ Tenants: map[string]*config.OTLPTenantAttributeConfig{
+ "test-tenant": {
+ ResourceAttributes: []config.OTLPAttribute{
+ {
+ Action: config.OTLPAttributeActionMetadata,
+ Names: []string{"tenant.metadata"},
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ desc: "tenant combined",
+ spec: lokiv1.LokiStackSpec{
+ Limits: &lokiv1.LimitsSpec{
+ Tenants: map[string]lokiv1.PerTenantLimitsTemplateSpec{
+ "test-tenant": {
+ OTLP: &lokiv1.OTLPSpec{
+ StreamLabels: &lokiv1.OTLPStreamLabelSpec{
+ ResourceAttributes: []lokiv1.OTLPAttributeReference{
+ {
+ Name: "tenant.stream.label",
+ },
+ {
+ Name: `tenant\.stream\.label\.regex\..+`,
+ Regex: true,
+ },
+ },
+ },
+ StructuredMetadata: &lokiv1.OTLPMetadataSpec{
+ ResourceAttributes: []lokiv1.OTLPAttributeReference{
+ {
+ Name: "tenant.resource.metadata",
+ },
+ {
+ Name: `tenant\.resource.metadata\.other\..+`,
+ Regex: true,
+ },
+ },
+ ScopeAttributes: []lokiv1.OTLPAttributeReference{
+ {
+ Name: "tenant.scope.metadata",
+ },
+ {
+ Name: `tenant\.scope\.metadata\.other\..+`,
+ Regex: true,
+ },
+ },
+ LogAttributes: []lokiv1.OTLPAttributeReference{
+ {
+ Name: "tenant.log.metadata",
+ },
+ {
+ Name: `tenant\.log\.metadata\.other\..+`,
+ Regex: true,
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ wantConfig: config.OTLPAttributeConfig{
+ RemoveDefaultLabels: true,
+ Tenants: map[string]*config.OTLPTenantAttributeConfig{
+ "test-tenant": {
+ ResourceAttributes: []config.OTLPAttribute{
+ {
+ Action: config.OTLPAttributeActionStreamLabel,
+ Names: []string{"tenant.stream.label"},
+ },
+ {
+ Action: config.OTLPAttributeActionStreamLabel,
+ Regex: "tenant\\.stream\\.label\\.regex\\..+",
+ },
+ {
+ Action: config.OTLPAttributeActionMetadata,
+ Names: []string{"tenant.resource.metadata"},
+ },
+ {
+ Action: config.OTLPAttributeActionMetadata,
+ Regex: `tenant\.resource.metadata\.other\..+`,
+ },
+ },
+ ScopeAttributes: []config.OTLPAttribute{
+ {
+ Action: config.OTLPAttributeActionMetadata,
+ Names: []string{"tenant.scope.metadata"},
+ },
+ {
+ Action: config.OTLPAttributeActionMetadata,
+ Regex: `tenant\.scope\.metadata\.other\..+`,
+ },
+ },
+ LogAttributes: []config.OTLPAttribute{
+ {
+ Action: config.OTLPAttributeActionMetadata,
+ Names: []string{"tenant.log.metadata"},
+ },
+ {
+ Action: config.OTLPAttributeActionMetadata,
+ Regex: `tenant\.log\.metadata\.other\..+`,
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ desc: "global and tenant configuration with de-duplication",
+ spec: lokiv1.LokiStackSpec{
+ Limits: &lokiv1.LimitsSpec{
+ Global: &lokiv1.LimitsTemplateSpec{
+ OTLP: &lokiv1.OTLPSpec{
+ StreamLabels: &lokiv1.OTLPStreamLabelSpec{
+ ResourceAttributes: []lokiv1.OTLPAttributeReference{
+ {
+ Name: "global.stream.label",
+ },
+ {
+ Name: "another.stream.label",
+ },
+ },
+ },
+ },
+ },
+ Tenants: map[string]lokiv1.PerTenantLimitsTemplateSpec{
+ "test-tenant": {
+ OTLP: &lokiv1.OTLPSpec{
+ StreamLabels: &lokiv1.OTLPStreamLabelSpec{
+ ResourceAttributes: []lokiv1.OTLPAttributeReference{
+ {
+ Name: "tenant.stream.label",
+ },
+ {
+ Name: "another.stream.label",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ wantConfig: config.OTLPAttributeConfig{
+ RemoveDefaultLabels: true,
+ Global: &config.OTLPTenantAttributeConfig{
+ ResourceAttributes: []config.OTLPAttribute{
+ {
+ Action: config.OTLPAttributeActionStreamLabel,
+ Names: []string{
+ "another.stream.label",
+ "global.stream.label",
+ },
+ },
+ },
+ },
+ Tenants: map[string]*config.OTLPTenantAttributeConfig{
+ "test-tenant": {
+ ResourceAttributes: []config.OTLPAttribute{
+ {
+ Action: config.OTLPAttributeActionStreamLabel,
+ Names: []string{
+ "another.stream.label",
+ "global.stream.label",
+ "tenant.stream.label",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ desc: "openshift-logging defaults",
+ spec: lokiv1.LokiStackSpec{
+ Tenants: &lokiv1.TenantsSpec{
+ Mode: lokiv1.OpenshiftLogging,
+ },
+ },
+ wantConfig: openshift.DefaultOTLPAttributes(false),
+ },
+ {
+ desc: "openshift-logging defaults without recommended",
+ spec: lokiv1.LokiStackSpec{
+ Tenants: &lokiv1.TenantsSpec{
+ Mode: lokiv1.OpenshiftLogging,
+ Openshift: &lokiv1.OpenshiftTenantSpec{
+ OTLP: &lokiv1.OpenshiftOTLPConfig{
+ DisableRecommendedAttributes: true,
+ },
+ },
+ },
+ },
+ wantConfig: openshift.DefaultOTLPAttributes(true),
+ },
+ {
+ desc: "openshift-logging defaults with additional custom attributes",
+ spec: lokiv1.LokiStackSpec{
+ Limits: &lokiv1.LimitsSpec{
+ Global: &lokiv1.LimitsTemplateSpec{
+ OTLP: &lokiv1.OTLPSpec{
+ StreamLabels: &lokiv1.OTLPStreamLabelSpec{
+ ResourceAttributes: []lokiv1.OTLPAttributeReference{
+ {
+ Name: "custom.stream.label",
+ },
+ },
+ },
+ StructuredMetadata: &lokiv1.OTLPMetadataSpec{
+ LogAttributes: []lokiv1.OTLPAttributeReference{
+ {
+ Name: "custom.log.metadata",
+ },
+ },
+ },
+ },
+ },
+ Tenants: map[string]lokiv1.PerTenantLimitsTemplateSpec{
+ "application": {
+ OTLP: &lokiv1.OTLPSpec{
+ StreamLabels: &lokiv1.OTLPStreamLabelSpec{
+ ResourceAttributes: []lokiv1.OTLPAttributeReference{
+ {
+ Name: "custom.application.label",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ Tenants: &lokiv1.TenantsSpec{
+ Mode: lokiv1.OpenshiftLogging,
+ Openshift: &lokiv1.OpenshiftTenantSpec{
+ OTLP: &lokiv1.OpenshiftOTLPConfig{
+ DisableRecommendedAttributes: true,
+ },
+ },
+ },
+ },
+ wantConfig: config.OTLPAttributeConfig{
+ RemoveDefaultLabels: true,
+ Global: &config.OTLPTenantAttributeConfig{
+ ResourceAttributes: []config.OTLPAttribute{
+ {
+ Action: config.OTLPAttributeActionStreamLabel,
+ Names: []string{
+ "custom.stream.label",
+ "k8s.namespace.name",
+ "kubernetes.namespace_name",
+ "log_source",
+ "log_type",
+ "openshift.cluster.uid",
+ "openshift.log.source",
+ "openshift.log.type",
+ },
+ },
+ },
+ LogAttributes: []config.OTLPAttribute{
+ {
+ Action: config.OTLPAttributeActionMetadata,
+ Names: []string{"custom.log.metadata"},
+ },
+ },
+ },
+ Tenants: map[string]*config.OTLPTenantAttributeConfig{
+ "application": {
+ ResourceAttributes: []config.OTLPAttribute{
+ {
+ Action: config.OTLPAttributeActionStreamLabel,
+ Names: []string{
+ "custom.application.label",
+ "custom.stream.label",
+ "k8s.namespace.name",
+ "kubernetes.namespace_name",
+ "log_source",
+ "log_type",
+ "openshift.cluster.uid",
+ "openshift.log.source",
+ "openshift.log.type",
+ },
+ },
+ },
+ LogAttributes: []config.OTLPAttribute{
+ {
+ Action: config.OTLPAttributeActionMetadata,
+ Names: []string{"custom.log.metadata"},
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ desc: "openshift-logging defaults with de-duplication",
+ spec: lokiv1.LokiStackSpec{
+ Limits: &lokiv1.LimitsSpec{
+ Global: &lokiv1.LimitsTemplateSpec{
+ OTLP: &lokiv1.OTLPSpec{
+ StreamLabels: &lokiv1.OTLPStreamLabelSpec{
+ ResourceAttributes: []lokiv1.OTLPAttributeReference{
+ {
+ Name: "custom.stream.label",
+ },
+ {
+ Name: "k8s.namespace.name",
+ },
+ },
+ },
+ StructuredMetadata: &lokiv1.OTLPMetadataSpec{
+ LogAttributes: []lokiv1.OTLPAttributeReference{
+ {
+ Name: "custom.log.metadata",
+ },
+ },
+ },
+ },
+ },
+ },
+ Tenants: &lokiv1.TenantsSpec{
+ Mode: lokiv1.OpenshiftLogging,
+ Openshift: &lokiv1.OpenshiftTenantSpec{
+ OTLP: &lokiv1.OpenshiftOTLPConfig{
+ DisableRecommendedAttributes: true,
+ },
+ },
+ },
+ },
+ wantConfig: config.OTLPAttributeConfig{
+ RemoveDefaultLabels: true,
+ Global: &config.OTLPTenantAttributeConfig{
+ ResourceAttributes: []config.OTLPAttribute{
+ {
+ Action: config.OTLPAttributeActionStreamLabel,
+ Names: []string{
+ "custom.stream.label",
+ "k8s.namespace.name",
+ "kubernetes.namespace_name",
+ "log_source",
+ "log_type",
+ "openshift.cluster.uid",
+ "openshift.log.source",
+ "openshift.log.type",
+ },
+ },
+ },
+ LogAttributes: []config.OTLPAttribute{
+ {
+ Action: config.OTLPAttributeActionMetadata,
+ Names: []string{"custom.log.metadata"},
+ },
+ },
+ },
+ },
+ },
+ }
+
+ for _, tc := range tt {
+ t.Run(tc.desc, func(t *testing.T) {
+ t.Parallel()
+
+ cfg := otlpAttributeConfig(&tc.spec)
+
+ assert.Equal(t, tc.wantConfig, cfg)
+ })
+ }
+}
+
+func TestSortOTLPAttributes(t *testing.T) {
+ tt := []struct {
+ desc string
+ attrs []config.OTLPAttribute
+ wantAttrs []config.OTLPAttribute
+ }{
+ {
+ desc: "sort",
+ attrs: []config.OTLPAttribute{
+ {
+ Action: config.OTLPAttributeActionMetadata,
+ Names: []string{"test.a"},
+ },
+ {
+ Action: config.OTLPAttributeActionMetadata,
+ Regex: "test.regex.c",
+ },
+ {
+ Action: config.OTLPAttributeActionStreamLabel,
+ Names: []string{"test.b"},
+ },
+ {
+ Action: config.OTLPAttributeActionMetadata,
+ Regex: "test.regex.a",
+ },
+ {
+ Action: config.OTLPAttributeActionStreamLabel,
+ Regex: "test.regex.b",
+ },
+ },
+ wantAttrs: []config.OTLPAttribute{
+ {
+ Action: config.OTLPAttributeActionStreamLabel,
+ Names: []string{"test.b"},
+ },
+ {
+ Action: config.OTLPAttributeActionStreamLabel,
+ Regex: "test.regex.b",
+ },
+ {
+ Action: config.OTLPAttributeActionMetadata,
+ Names: []string{"test.a"},
+ },
+ {
+ Action: config.OTLPAttributeActionMetadata,
+ Regex: "test.regex.a",
+ },
+ {
+ Action: config.OTLPAttributeActionMetadata,
+ Regex: "test.regex.c",
+ },
+ },
+ },
+ {
+ desc: "simple combine",
+ attrs: []config.OTLPAttribute{
+ {
+ Action: config.OTLPAttributeActionStreamLabel,
+ Names: []string{"test.a"},
+ },
+ {
+ Action: config.OTLPAttributeActionStreamLabel,
+ Names: []string{"test.b"},
+ },
+ },
+ wantAttrs: []config.OTLPAttribute{
+ {
+ Action: config.OTLPAttributeActionStreamLabel,
+ Names: []string{"test.a", "test.b"},
+ },
+ },
+ },
+ {
+ desc: "complex combine",
+ attrs: []config.OTLPAttribute{
+ {
+ Action: config.OTLPAttributeActionStreamLabel,
+ Names: []string{"test.a"},
+ },
+ {
+ Action: config.OTLPAttributeActionStreamLabel,
+ Names: []string{"test.c"},
+ },
+ {
+ Action: config.OTLPAttributeActionMetadata,
+ Names: []string{"test.d", "test.e"},
+ },
+ {
+ Action: config.OTLPAttributeActionStreamLabel,
+ Names: []string{"test.b"},
+ },
+ {
+ Action: config.OTLPAttributeActionMetadata,
+ Names: []string{"test.f"},
+ },
+ {
+ Action: config.OTLPAttributeActionStreamLabel,
+ Regex: "test.regex.b",
+ },
+ {
+ Action: config.OTLPAttributeActionMetadata,
+ Regex: "test.regex.a",
+ },
+ },
+ wantAttrs: []config.OTLPAttribute{
+ {
+ Action: config.OTLPAttributeActionStreamLabel,
+ Names: []string{"test.a", "test.b", "test.c"},
+ },
+ {
+ Action: config.OTLPAttributeActionStreamLabel,
+ Regex: "test.regex.b",
+ },
+ {
+ Action: config.OTLPAttributeActionMetadata,
+ Names: []string{"test.d", "test.e", "test.f"},
+ },
+ {
+ Action: config.OTLPAttributeActionMetadata,
+ Regex: "test.regex.a",
+ },
+ },
+ },
+ }
+
+ for _, tc := range tt {
+ t.Run(tc.desc, func(t *testing.T) {
+ t.Parallel()
+
+ attrs := sortAndDeduplicateOTLPAttributes(tc.attrs)
+
+ assert.Equal(t, tc.wantAttrs, attrs)
+ })
+ }
+}
diff --git a/operator/internal/manifests/config_test.go b/operator/internal/manifests/config_test.go
index faebb2f73b3a0..ec55aa21bc60b 100644
--- a/operator/internal/manifests/config_test.go
+++ b/operator/internal/manifests/config_test.go
@@ -12,7 +12,7 @@ import (
corev1 "k8s.io/api/core/v1"
"k8s.io/utils/ptr"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/manifests/internal/config"
"github.com/grafana/loki/operator/internal/manifests/openshift"
)
diff --git a/operator/internal/manifests/distributor_test.go b/operator/internal/manifests/distributor_test.go
index a3b9e6abc932d..ec10e34472386 100644
--- a/operator/internal/manifests/distributor_test.go
+++ b/operator/internal/manifests/distributor_test.go
@@ -10,7 +10,7 @@ import (
policyv1 "k8s.io/api/policy/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/manifests/storage"
)
diff --git a/operator/internal/manifests/gateway_tenants.go b/operator/internal/manifests/gateway_tenants.go
index 467f0a1fe5c25..1a4610a94114f 100644
--- a/operator/internal/manifests/gateway_tenants.go
+++ b/operator/internal/manifests/gateway_tenants.go
@@ -12,8 +12,8 @@ import (
networkingv1 "k8s.io/api/networking/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
- configv1 "github.com/grafana/loki/operator/apis/config/v1"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ configv1 "github.com/grafana/loki/operator/api/config/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/manifests/internal/config"
"github.com/grafana/loki/operator/internal/manifests/openshift"
)
diff --git a/operator/internal/manifests/gateway_tenants_test.go b/operator/internal/manifests/gateway_tenants_test.go
index 8399eaaf352ec..051b6a653aab8 100644
--- a/operator/internal/manifests/gateway_tenants_test.go
+++ b/operator/internal/manifests/gateway_tenants_test.go
@@ -12,8 +12,8 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
- configv1 "github.com/grafana/loki/operator/apis/config/v1"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ configv1 "github.com/grafana/loki/operator/api/config/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/manifests/openshift"
)
diff --git a/operator/internal/manifests/gateway_test.go b/operator/internal/manifests/gateway_test.go
index b7e7b3270824d..02ad588974858 100644
--- a/operator/internal/manifests/gateway_test.go
+++ b/operator/internal/manifests/gateway_test.go
@@ -14,8 +14,8 @@ import (
policyv1 "k8s.io/api/policy/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- configv1 "github.com/grafana/loki/operator/apis/config/v1"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ configv1 "github.com/grafana/loki/operator/api/config/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/manifests/internal/gateway"
"github.com/grafana/loki/operator/internal/manifests/openshift"
"github.com/grafana/loki/operator/internal/manifests/storage"
diff --git a/operator/internal/manifests/indexgateway_test.go b/operator/internal/manifests/indexgateway_test.go
index 93ab7a033e147..35f46ae88777c 100644
--- a/operator/internal/manifests/indexgateway_test.go
+++ b/operator/internal/manifests/indexgateway_test.go
@@ -9,7 +9,7 @@ import (
policyv1 "k8s.io/api/policy/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/manifests/storage"
)
diff --git a/operator/internal/manifests/ingester_test.go b/operator/internal/manifests/ingester_test.go
index 83b0d94111301..c491b580ad7cf 100644
--- a/operator/internal/manifests/ingester_test.go
+++ b/operator/internal/manifests/ingester_test.go
@@ -10,8 +10,8 @@ import (
policyv1 "k8s.io/api/policy/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- v1 "github.com/grafana/loki/operator/apis/config/v1"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ v1 "github.com/grafana/loki/operator/api/config/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/manifests/internal"
"github.com/grafana/loki/operator/internal/manifests/storage"
)
diff --git a/operator/internal/manifests/internal/config/build_test.go b/operator/internal/manifests/internal/config/build_test.go
index 016e42253f21b..1bf465b5e08c0 100644
--- a/operator/internal/manifests/internal/config/build_test.go
+++ b/operator/internal/manifests/internal/config/build_test.go
@@ -8,8 +8,8 @@ import (
"github.com/stretchr/testify/require"
"k8s.io/utils/ptr"
- configv1 "github.com/grafana/loki/operator/apis/config/v1"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ configv1 "github.com/grafana/loki/operator/api/config/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/manifests/storage"
)
@@ -96,6 +96,8 @@ limits_config:
max_streams_per_user: 0
max_line_size: 256000
max_entries_limit_per_query: 5000
+ discover_service_name: []
+ discover_log_levels: true
max_global_streams_per_user: 0
max_chunks_per_query: 2000000
max_query_length: 721h
@@ -258,6 +260,7 @@ overrides:
},
Shippers: []string{"boltdb"},
EnableRemoteReporting: true,
+ DiscoverLogLevels: true,
HTTPTimeouts: HTTPTimeoutConfig{
IdleTimeout: 30 * time.Second,
ReadTimeout: 30 * time.Second,
@@ -353,6 +356,8 @@ limits_config:
max_streams_per_user: 0
max_line_size: 256000
max_entries_limit_per_query: 5000
+ discover_service_name: []
+ discover_log_levels: false
max_global_streams_per_user: 0
max_chunks_per_query: 2000000
max_query_length: 721h
@@ -779,6 +784,8 @@ limits_config:
max_streams_per_user: 0
max_line_size: 256000
max_entries_limit_per_query: 5000
+ discover_service_name: []
+ discover_log_levels: false
max_global_streams_per_user: 0
max_chunks_per_query: 2000000
max_query_length: 721h
@@ -1137,6 +1144,8 @@ limits_config:
max_streams_per_user: 0
max_line_size: 256000
max_entries_limit_per_query: 5000
+ discover_service_name: []
+ discover_log_levels: false
max_global_streams_per_user: 0
max_chunks_per_query: 2000000
max_query_length: 721h
@@ -1496,6 +1505,8 @@ limits_config:
max_streams_per_user: 0
max_line_size: 256000
max_entries_limit_per_query: 5000
+ discover_service_name: []
+ discover_log_levels: false
max_global_streams_per_user: 0
max_chunks_per_query: 2000000
max_query_length: 721h
@@ -1889,6 +1900,8 @@ limits_config:
max_streams_per_user: 0
max_line_size: 256000
max_entries_limit_per_query: 5000
+ discover_service_name: []
+ discover_log_levels: false
max_global_streams_per_user: 0
max_chunks_per_query: 2000000
max_query_length: 721h
@@ -2224,6 +2237,8 @@ limits_config:
max_streams_per_user: 0
max_line_size: 256000
max_entries_limit_per_query: 5000
+ discover_service_name: []
+ discover_log_levels: false
max_global_streams_per_user: 0
max_chunks_per_query: 2000000
max_query_length: 721h
@@ -2663,6 +2678,8 @@ limits_config:
max_streams_per_user: 0
max_line_size: 256000
max_entries_limit_per_query: 5000
+ discover_service_name: []
+ discover_log_levels: false
max_global_streams_per_user: 0
max_chunks_per_query: 2000000
max_query_length: 721h
@@ -2987,6 +3004,8 @@ limits_config:
max_streams_per_user: 0
max_line_size: 256000
max_entries_limit_per_query: 5000
+ discover_service_name: []
+ discover_log_levels: false
max_global_streams_per_user: 0
max_chunks_per_query: 2000000
max_query_length: 721h
@@ -3484,6 +3503,8 @@ limits_config:
max_streams_per_user: 0
max_line_size: 256000
max_entries_limit_per_query: 5000
+ discover_service_name: []
+ discover_log_levels: false
max_global_streams_per_user: 0
max_chunks_per_query: 2000000
max_query_length: 721h
@@ -3745,6 +3766,8 @@ limits_config:
max_streams_per_user: 0
max_line_size: 256000
max_entries_limit_per_query: 5000
+ discover_service_name: []
+ discover_log_levels: false
max_global_streams_per_user: 0
max_chunks_per_query: 2000000
max_query_length: 721h
@@ -4007,6 +4030,8 @@ limits_config:
max_streams_per_user: 0
max_line_size: 256000
max_entries_limit_per_query: 5000
+ discover_service_name: []
+ discover_log_levels: false
max_global_streams_per_user: 0
max_chunks_per_query: 2000000
max_query_length: 721h
@@ -4270,6 +4295,8 @@ limits_config:
max_streams_per_user: 0
max_line_size: 256000
max_entries_limit_per_query: 5000
+ discover_service_name: []
+ discover_log_levels: false
max_global_streams_per_user: 0
max_chunks_per_query: 2000000
max_query_length: 721h
@@ -4569,6 +4596,8 @@ limits_config:
max_streams_per_user: 0
max_line_size: 256000
max_entries_limit_per_query: 5000
+ discover_service_name: []
+ discover_log_levels: false
max_global_streams_per_user: 0
max_chunks_per_query: 2000000
max_query_length: 721h
@@ -4866,6 +4895,8 @@ limits_config:
max_streams_per_user: 0
max_line_size: 256000
max_entries_limit_per_query: 5000
+ discover_service_name: []
+ discover_log_levels: false
max_global_streams_per_user: 0
max_chunks_per_query: 2000000
max_query_length: 721h
@@ -5364,6 +5395,8 @@ limits_config:
max_streams_per_user: 0
max_line_size: 256000
max_entries_limit_per_query: 5000
+ discover_service_name: []
+ discover_log_levels: false
max_global_streams_per_user: 0
max_chunks_per_query: 2000000
max_query_length: 721h
@@ -5540,6 +5573,8 @@ limits_config:
max_streams_per_user: 0
max_line_size: 256000
max_entries_limit_per_query: 5000
+ discover_service_name: []
+ discover_log_levels: false
max_global_streams_per_user: 0
max_chunks_per_query: 2000000
max_query_length: 721h
@@ -5709,6 +5744,8 @@ limits_config:
max_streams_per_user: 0
max_line_size: 256000
max_entries_limit_per_query: 5000
+ discover_service_name: []
+ discover_log_levels: false
max_global_streams_per_user: 0
max_chunks_per_query: 2000000
max_query_length: 721h
@@ -6043,9 +6080,7 @@ compactor:
working_directory: /tmp/loki/compactor
distributor:
otlp_config:
- default_resource_attributes_as_index_labels:
- - foo.bar
- - bar.baz
+ default_resource_attributes_as_index_labels: []
frontend:
tail_proxy_url: http://loki-querier-http-lokistack-dev.default.svc.cluster.local:3100
compress_responses: true
@@ -6101,6 +6136,8 @@ limits_config:
max_streams_per_user: 0
max_line_size: 256000
max_entries_limit_per_query: 5000
+ discover_service_name: []
+ discover_log_levels: false
max_global_streams_per_user: 0
max_chunks_per_query: 2000000
max_query_length: 721h
@@ -6122,37 +6159,24 @@ limits_config:
allow_structured_metadata: true
otlp_config:
resource_attributes:
- ignore_defaults: true
attributes_config:
- action: index_label
attributes:
- res.foo.bar
- res.bar.baz
- regex: .*
- action: structured_metadata
attributes:
- res.service.env
- regex: .*
scope_attributes:
- - action: index_label
+ - action: structured_metadata
attributes:
- scope.foo.bar
- scope.bar.baz
- regex: .*
- - action: structured_metadata
- attributes:
- - scope.service.env
- regex: .*
log_attributes:
- - action: index_label
+ - action: structured_metadata
attributes:
- log.foo.bar
- log.bar.baz
- regex: .*
- - action: structured_metadata
- attributes:
- - log.service.env
- regex: .*
memberlist:
abort_if_cluster_join_fails: true
advertise_port: 7946
@@ -6284,37 +6308,24 @@ overrides:
test-a:
otlp_config:
resource_attributes:
- ignore_defaults: true
attributes_config:
- action: index_label
attributes:
- res.foo.bar
- res.bar.baz
- regex: .*
- action: structured_metadata
attributes:
- res.service.env
- regex: .*
scope_attributes:
- - action: index_label
+ - action: structured_metadata
attributes:
- scope.foo.bar
- scope.bar.baz
- regex: .*
- - action: structured_metadata
- attributes:
- - scope.service.env
- regex: .*
log_attributes:
- - action: index_label
+ - action: structured_metadata
attributes:
- log.foo.bar
- log.bar.baz
- regex: .*
- - action: structured_metadata
- attributes:
- - log.service.env
- regex: .*
`
opts := Options{
Stack: lokiv1.LokiStackSpec{
@@ -6335,68 +6346,6 @@ overrides:
PerStreamRateLimitBurst: 15,
PerStreamDesiredRate: 3,
},
- OTLP: &lokiv1.GlobalOTLPSpec{
- IndexedResourceAttributes: []string{
- "foo.bar",
- "bar.baz",
- },
- OTLPSpec: lokiv1.OTLPSpec{
- ResourceAttributes: &lokiv1.OTLPResourceAttributesSpec{
- IgnoreDefaults: true,
- Attributes: []lokiv1.OTLPResourceAttributesConfigSpec{
- {
- Action: lokiv1.OTLPAttributeActionIndexLabel,
- Attributes: []string{
- "res.foo.bar",
- "res.bar.baz",
- },
- Regex: ".*",
- },
- {
- Action: lokiv1.OTLPAttributeActionStructuredMetadata,
- Attributes: []string{
- "res.service.env",
- },
- Regex: ".*",
- },
- },
- },
- ScopeAttributes: []lokiv1.OTLPAttributesSpec{
- {
- Action: lokiv1.OTLPAttributeActionIndexLabel,
- Attributes: []string{
- "scope.foo.bar",
- "scope.bar.baz",
- },
- Regex: ".*",
- },
- {
- Action: lokiv1.OTLPAttributeActionStructuredMetadata,
- Attributes: []string{
- "scope.service.env",
- },
- Regex: ".*",
- },
- },
- LogAttributes: []lokiv1.OTLPAttributesSpec{
- {
- Action: lokiv1.OTLPAttributeActionIndexLabel,
- Attributes: []string{
- "log.foo.bar",
- "log.bar.baz",
- },
- Regex: ".*",
- },
- {
- Action: lokiv1.OTLPAttributeActionStructuredMetadata,
- Attributes: []string{
- "log.service.env",
- },
- Regex: ".*",
- },
- },
- },
- },
QueryLimits: &lokiv1.QueryLimitSpec{
MaxEntriesLimitPerQuery: 5000,
MaxChunksPerQuery: 2000000,
@@ -6532,58 +6481,81 @@ overrides:
"test-a": {
Limits: lokiv1.PerTenantLimitsTemplateSpec{
OTLP: &lokiv1.OTLPSpec{
- ResourceAttributes: &lokiv1.OTLPResourceAttributesSpec{
- IgnoreDefaults: true,
- Attributes: []lokiv1.OTLPResourceAttributesConfigSpec{
- {
- Action: lokiv1.OTLPAttributeActionIndexLabel,
- Attributes: []string{
- "res.foo.bar",
- "res.bar.baz",
- },
- Regex: ".*",
- },
- {
- Action: lokiv1.OTLPAttributeActionStructuredMetadata,
- Attributes: []string{
- "res.service.env",
- },
- Regex: ".*",
- },
- },
+ // This part of the spec is not actually used in this step.
+ // It has already been pre-processed into the OTLPAttributes below.
+ },
+ },
+ },
+ },
+ OTLPAttributes: OTLPAttributeConfig{
+ RemoveDefaultLabels: true,
+ Global: &OTLPTenantAttributeConfig{
+ ResourceAttributes: []OTLPAttribute{
+ {
+ Action: OTLPAttributeActionStreamLabel,
+ Names: []string{
+ "res.foo.bar",
+ "res.bar.baz",
},
- ScopeAttributes: []lokiv1.OTLPAttributesSpec{
- {
- Action: lokiv1.OTLPAttributeActionIndexLabel,
- Attributes: []string{
- "scope.foo.bar",
- "scope.bar.baz",
- },
- Regex: ".*",
+ },
+ {
+ Action: OTLPAttributeActionMetadata,
+ Names: []string{
+ "res.service.env",
+ },
+ },
+ },
+ ScopeAttributes: []OTLPAttribute{
+ {
+ Action: OTLPAttributeActionMetadata,
+ Names: []string{
+ "scope.foo.bar",
+ "scope.bar.baz",
+ },
+ },
+ },
+ LogAttributes: []OTLPAttribute{
+ {
+ Action: OTLPAttributeActionMetadata,
+ Names: []string{
+ "log.foo.bar",
+ "log.bar.baz",
+ },
+ },
+ },
+ },
+ Tenants: map[string]*OTLPTenantAttributeConfig{
+ "test-a": {
+ ResourceAttributes: []OTLPAttribute{
+ {
+ Action: OTLPAttributeActionStreamLabel,
+ Names: []string{
+ "res.foo.bar",
+ "res.bar.baz",
},
- {
- Action: lokiv1.OTLPAttributeActionStructuredMetadata,
- Attributes: []string{
- "scope.service.env",
- },
- Regex: ".*",
+ },
+ {
+ Action: OTLPAttributeActionMetadata,
+ Names: []string{
+ "res.service.env",
},
},
- LogAttributes: []lokiv1.OTLPAttributesSpec{
- {
- Action: lokiv1.OTLPAttributeActionIndexLabel,
- Attributes: []string{
- "log.foo.bar",
- "log.bar.baz",
- },
- Regex: ".*",
+ },
+ ScopeAttributes: []OTLPAttribute{
+ {
+ Action: OTLPAttributeActionMetadata,
+ Names: []string{
+ "scope.foo.bar",
+ "scope.bar.baz",
},
- {
- Action: lokiv1.OTLPAttributeActionStructuredMetadata,
- Attributes: []string{
- "log.service.env",
- },
- Regex: ".*",
+ },
+ },
+ LogAttributes: []OTLPAttribute{
+ {
+ Action: OTLPAttributeActionMetadata,
+ Names: []string{
+ "log.foo.bar",
+ "log.bar.baz",
},
},
},
diff --git a/operator/internal/manifests/internal/config/loki-config.yaml b/operator/internal/manifests/internal/config/loki-config.yaml
index 01ee6fe9b7075..dbbbd8bb127c2 100644
--- a/operator/internal/manifests/internal/config/loki-config.yaml
+++ b/operator/internal/manifests/internal/config/loki-config.yaml
@@ -107,16 +107,11 @@ compactor:
{{- end }}
delete_request_store: {{.ObjectStorage.SharedStore}}
{{- end }}
-{{- if $l := .Stack.Limits.Global }}
-{{- with $l.OTLP }}{{- with .IndexedResourceAttributes }}
+{{- if .OTLPAttributes.RemoveDefaultLabels }}
distributor:
otlp_config:
- default_resource_attributes_as_index_labels:
- {{- range . }}
- - {{ . }}
- {{- end }}
+ default_resource_attributes_as_index_labels: []
{{- end }}
-{{- end }}{{- end }}
frontend:
tail_proxy_url: {{ .Querier.Protocol }}://{{ .Querier.FQDN }}:{{ .Querier.Port }}
{{- if .Gates.HTTPEncryption }}
@@ -202,6 +197,8 @@ limits_config:
max_streams_per_user: 0
max_line_size: {{ .Stack.Limits.Global.IngestionLimits.MaxLineSize }}
max_entries_limit_per_query: {{ .Stack.Limits.Global.QueryLimits.MaxEntriesLimitPerQuery }}
+ discover_service_name: []
+ discover_log_levels: {{ .DiscoverLogLevels }}
max_global_streams_per_user: {{ .Stack.Limits.Global.IngestionLimits.MaxGlobalStreamsPerTenant }}
max_chunks_per_query: {{ .Stack.Limits.Global.QueryLimits.MaxChunksPerQuery }}
max_query_length: 721h
@@ -233,19 +230,18 @@ limits_config:
enabled: true
desired_rate: {{ . }}MB
{{- end }}
-{{- with .Stack.Limits.Global.OTLP }}
+{{- with .OTLPAttributes.Global }}
otlp_config:
- {{- with .ResourceAttributes }}
+ {{- if .ResourceAttributes }}
resource_attributes:
- ignore_defaults: {{ .IgnoreDefaults }}
- {{- with .Attributes}}
+ {{- with .ResourceAttributes}}
attributes_config:
{{- range . }}
- - action: {{ .Action.Value }}
+ - action: {{ .Action }}
{{- with .Regex }}
regex: {{ . }}
{{- end }}
- {{- with .Attributes }}
+ {{- with .Names }}
attributes:
{{- range . }}
- {{ . }}
@@ -257,11 +253,11 @@ limits_config:
{{- with .ScopeAttributes }}
scope_attributes:
{{- range . }}
- - action: {{ .Action.Value }}
+ - action: {{ .Action }}
{{- with .Regex }}
regex: {{ . }}
{{- end }}
- {{- with .Attributes }}
+ {{- with .Names }}
attributes:
{{- range . }}
- {{ . }}
@@ -272,11 +268,11 @@ limits_config:
{{- with .LogAttributes }}
log_attributes:
{{- range . }}
- - action: {{ .Action.Value }}
+ - action: {{ .Action }}
{{- with .Regex }}
regex: {{ . }}
{{- end }}
- {{- with .Attributes }}
+ {{- with .Names }}
attributes:
{{- range . }}
- {{ . }}
diff --git a/operator/internal/manifests/internal/config/loki-runtime-config.yaml b/operator/internal/manifests/internal/config/loki-runtime-config.yaml
index 21b75508dc7aa..0eb458660a67d 100644
--- a/operator/internal/manifests/internal/config/loki-runtime-config.yaml
+++ b/operator/internal/manifests/internal/config/loki-runtime-config.yaml
@@ -1,3 +1,4 @@
+{{- /*gotype: github.com/grafana/loki/operator/internal/manifests/internal/config.Options*/ -}}
---
overrides:
{{- $opts := . }}
@@ -73,19 +74,18 @@ overrides:
{{- end }}
{{- end}}
{{- end -}}
- {{- if $l := $spec.OTLP }}
+ {{- if $otlp := index $opts.OTLPAttributes.Tenants $tenant }}
otlp_config:
- {{- with $l.ResourceAttributes }}
+ {{- if $otlp.ResourceAttributes }}
resource_attributes:
- ignore_defaults: {{ .IgnoreDefaults }}
- {{- with .Attributes}}
+ {{- with $otlp.ResourceAttributes}}
attributes_config:
{{- range . }}
- - action: {{ .Action.Value }}
+ - action: {{ .Action }}
{{- with .Regex }}
regex: {{ . }}
{{- end }}
- {{- with .Attributes }}
+ {{- with .Names }}
attributes:
{{- range . }}
- {{ . }}
@@ -94,14 +94,14 @@ overrides:
{{- end }}
{{- end }}
{{- end}}
- {{- with $l.ScopeAttributes }}
+ {{- with $otlp.ScopeAttributes }}
scope_attributes:
{{- range . }}
- - action: {{ .Action.Value }}
+ - action: {{ .Action }}
{{- with .Regex }}
regex: {{ . }}
{{- end }}
- {{- with .Attributes }}
+ {{- with .Names }}
attributes:
{{- range . }}
- {{ . }}
@@ -109,14 +109,14 @@ overrides:
{{- end }}
{{- end }}
{{- end }}
- {{- with $l.LogAttributes }}
+ {{- with $otlp.LogAttributes }}
log_attributes:
{{- range . }}
- - action: {{ .Action.Value }}
+ - action: {{ .Action }}
{{- with .Regex }}
regex: {{ . }}
{{- end }}
- {{- with .Attributes }}
+ {{- with .Names }}
attributes:
{{- range . }}
- {{ . }}
diff --git a/operator/internal/manifests/internal/config/options.go b/operator/internal/manifests/internal/config/options.go
index e19d4c34f3262..365a768724eba 100644
--- a/operator/internal/manifests/internal/config/options.go
+++ b/operator/internal/manifests/internal/config/options.go
@@ -6,8 +6,8 @@ import (
"strings"
"time"
- configv1 "github.com/grafana/loki/operator/apis/config/v1"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ configv1 "github.com/grafana/loki/operator/api/config/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/manifests/storage"
)
@@ -29,6 +29,7 @@ type Options struct {
MaxConcurrent MaxConcurrent
WriteAheadLog WriteAheadLog
EnableRemoteReporting bool
+ DiscoverLogLevels bool
Shippers []string
ObjectStorage storage.Options
@@ -37,6 +38,8 @@ type Options struct {
Retention RetentionOptions
+ OTLPAttributes OTLPAttributeConfig
+
Overrides map[string]LokiOverrides
}
@@ -246,6 +249,34 @@ type RetentionOptions struct {
DeleteWorkerCount uint
}
+// OTLPAttributeConfig contains the rendered OTLP label configuration.
+// This is both influenced by the tenancy mode and the custom OTLP configuration on the LokiStack and might
+// contain more labels than the user has configured if some labels are deemed "required".
+type OTLPAttributeConfig struct {
+ RemoveDefaultLabels bool
+ Global *OTLPTenantAttributeConfig
+ Tenants map[string]*OTLPTenantAttributeConfig
+}
+
+type OTLPTenantAttributeConfig struct {
+ ResourceAttributes []OTLPAttribute
+ ScopeAttributes []OTLPAttribute
+ LogAttributes []OTLPAttribute
+}
+
+type OTLPAttributeAction string
+
+const (
+ OTLPAttributeActionStreamLabel OTLPAttributeAction = "index_label"
+ OTLPAttributeActionMetadata OTLPAttributeAction = "structured_metadata"
+)
+
+type OTLPAttribute struct {
+ Action OTLPAttributeAction
+ Names []string
+ Regex string
+}
+
type TLSOptions struct {
Ciphers []string
MinTLSVersion string
diff --git a/operator/internal/manifests/internal/gateway/build.go b/operator/internal/manifests/internal/gateway/build.go
index 41de2a9282fd2..6b2b00c8ae339 100644
--- a/operator/internal/manifests/internal/gateway/build.go
+++ b/operator/internal/manifests/internal/gateway/build.go
@@ -8,7 +8,7 @@ import (
"github.com/ViaQ/logerr/v2/kverrors"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
)
const (
diff --git a/operator/internal/manifests/internal/gateway/build_test.go b/operator/internal/manifests/internal/gateway/build_test.go
index 77d7fc1873a24..2b089f464ed08 100644
--- a/operator/internal/manifests/internal/gateway/build_test.go
+++ b/operator/internal/manifests/internal/gateway/build_test.go
@@ -5,7 +5,7 @@ import (
"github.com/stretchr/testify/require"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/manifests/openshift"
)
diff --git a/operator/internal/manifests/internal/gateway/options.go b/operator/internal/manifests/internal/gateway/options.go
index eefefedcfbca6..9854f01071f7b 100644
--- a/operator/internal/manifests/internal/gateway/options.go
+++ b/operator/internal/manifests/internal/gateway/options.go
@@ -1,7 +1,7 @@
package gateway
import (
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/manifests/openshift"
)
diff --git a/operator/internal/manifests/internal/rules/marshal.go b/operator/internal/manifests/internal/rules/marshal.go
index 192f7d8ba7ef4..288faf3b35e29 100644
--- a/operator/internal/manifests/internal/rules/marshal.go
+++ b/operator/internal/manifests/internal/rules/marshal.go
@@ -4,7 +4,7 @@ import (
"github.com/ViaQ/logerr/v2/kverrors"
"gopkg.in/yaml.v2"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
)
const tenantLabel = "tenantId"
diff --git a/operator/internal/manifests/internal/rules/marshal_test.go b/operator/internal/manifests/internal/rules/marshal_test.go
index 1620f050a7652..f9f14580470a5 100644
--- a/operator/internal/manifests/internal/rules/marshal_test.go
+++ b/operator/internal/manifests/internal/rules/marshal_test.go
@@ -6,7 +6,7 @@ import (
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/manifests/internal/rules"
)
diff --git a/operator/internal/manifests/internal/sizes.go b/operator/internal/manifests/internal/sizes.go
index 4962e4b3e762c..28ffe4fb3c319 100644
--- a/operator/internal/manifests/internal/sizes.go
+++ b/operator/internal/manifests/internal/sizes.go
@@ -4,7 +4,7 @@ import (
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
)
// ComponentResources is a map of component->requests/limits
@@ -48,6 +48,64 @@ var ResourceRequirementsTable = map[lokiv1.LokiStackSizeType]ComponentResources{
PVCSize: resource.MustParse("10Gi"),
},
},
+ lokiv1.SizeOneXPico: {
+ Querier: corev1.ResourceRequirements{
+ Requests: map[corev1.ResourceName]resource.Quantity{
+ corev1.ResourceCPU: resource.MustParse("750m"),
+ corev1.ResourceMemory: resource.MustParse("1.5Gi"),
+ },
+ },
+ Ruler: ResourceRequirements{
+ Requests: map[corev1.ResourceName]resource.Quantity{
+ corev1.ResourceCPU: resource.MustParse("500m"),
+ corev1.ResourceMemory: resource.MustParse("1Gi"),
+ },
+ PVCSize: resource.MustParse("10Gi"),
+ },
+ Ingester: ResourceRequirements{
+ PVCSize: resource.MustParse("10Gi"),
+ Requests: map[corev1.ResourceName]resource.Quantity{
+ corev1.ResourceCPU: resource.MustParse("500m"),
+ corev1.ResourceMemory: resource.MustParse("3Gi"),
+ },
+ PDBMinAvailable: 1,
+ },
+ Distributor: corev1.ResourceRequirements{
+ Requests: map[corev1.ResourceName]resource.Quantity{
+ corev1.ResourceCPU: resource.MustParse("500m"),
+ corev1.ResourceMemory: resource.MustParse("500Mi"),
+ },
+ },
+ QueryFrontend: corev1.ResourceRequirements{
+ Requests: map[corev1.ResourceName]resource.Quantity{
+ corev1.ResourceCPU: resource.MustParse("500m"),
+ corev1.ResourceMemory: resource.MustParse("500Mi"),
+ },
+ },
+ Compactor: ResourceRequirements{
+ PVCSize: resource.MustParse("10Gi"),
+ Requests: map[corev1.ResourceName]resource.Quantity{
+ corev1.ResourceCPU: resource.MustParse("500m"),
+ corev1.ResourceMemory: resource.MustParse("500Mi"),
+ },
+ },
+ Gateway: corev1.ResourceRequirements{
+ Requests: map[corev1.ResourceName]resource.Quantity{
+ corev1.ResourceCPU: resource.MustParse("500m"),
+ corev1.ResourceMemory: resource.MustParse("500Mi"),
+ },
+ },
+ IndexGateway: ResourceRequirements{
+ PVCSize: resource.MustParse("50Gi"),
+ Requests: map[corev1.ResourceName]resource.Quantity{
+ corev1.ResourceCPU: resource.MustParse("150m"),
+ corev1.ResourceMemory: resource.MustParse("250Mi"),
+ },
+ },
+ WALStorage: ResourceRequirements{
+ PVCSize: resource.MustParse("150Gi"),
+ },
+ },
lokiv1.SizeOneXExtraSmall: {
Querier: corev1.ResourceRequirements{
Requests: map[corev1.ResourceName]resource.Quantity{
@@ -283,6 +341,66 @@ var StackSizeTable = map[lokiv1.LokiStackSizeType]lokiv1.LokiStackSpec{
},
},
},
+
+ lokiv1.SizeOneXPico: {
+ Size: lokiv1.SizeOneXPico,
+ Replication: &lokiv1.ReplicationSpec{
+ Factor: 2,
+ },
+ Limits: &lokiv1.LimitsSpec{
+ Global: &lokiv1.LimitsTemplateSpec{
+ IngestionLimits: &lokiv1.IngestionLimitSpec{
+ // Defaults from Loki docs
+ IngestionRate: 4,
+ IngestionBurstSize: 6,
+ MaxGlobalStreamsPerTenant: 10000,
+ MaxLabelNameLength: 1024,
+ MaxLabelValueLength: 2048,
+ MaxLabelNamesPerSeries: 30,
+ MaxLineSize: 256000,
+ PerStreamDesiredRate: 3,
+ PerStreamRateLimit: 5,
+ PerStreamRateLimitBurst: 15,
+ },
+ QueryLimits: &lokiv1.QueryLimitSpec{
+ // Defaults from Loki docs
+ MaxEntriesLimitPerQuery: 5000,
+ MaxChunksPerQuery: 2000000,
+ MaxQuerySeries: 500,
+ QueryTimeout: "3m",
+ CardinalityLimit: 100000,
+ MaxVolumeSeries: 1000,
+ },
+ },
+ },
+ Template: &lokiv1.LokiTemplateSpec{
+ Compactor: &lokiv1.LokiComponentSpec{
+ Replicas: 1,
+ },
+ Distributor: &lokiv1.LokiComponentSpec{
+ Replicas: 2,
+ },
+ Ingester: &lokiv1.LokiComponentSpec{
+ Replicas: 3,
+ },
+ Querier: &lokiv1.LokiComponentSpec{
+ Replicas: 2,
+ },
+ QueryFrontend: &lokiv1.LokiComponentSpec{
+ Replicas: 2,
+ },
+ Gateway: &lokiv1.LokiComponentSpec{
+ Replicas: 2,
+ },
+ IndexGateway: &lokiv1.LokiComponentSpec{
+ Replicas: 2,
+ },
+ Ruler: &lokiv1.LokiComponentSpec{
+ Replicas: 2,
+ },
+ },
+ },
+
lokiv1.SizeOneXExtraSmall: {
Size: lokiv1.SizeOneXExtraSmall,
Replication: &lokiv1.ReplicationSpec{
@@ -292,15 +410,16 @@ var StackSizeTable = map[lokiv1.LokiStackSizeType]lokiv1.LokiStackSpec{
Global: &lokiv1.LimitsTemplateSpec{
IngestionLimits: &lokiv1.IngestionLimitSpec{
// Defaults from Loki docs
- IngestionRate: 4,
- IngestionBurstSize: 6,
- MaxLabelNameLength: 1024,
- MaxLabelValueLength: 2048,
- MaxLabelNamesPerSeries: 30,
- MaxLineSize: 256000,
- PerStreamDesiredRate: 3,
- PerStreamRateLimit: 5,
- PerStreamRateLimitBurst: 15,
+ IngestionRate: 4,
+ IngestionBurstSize: 6,
+ MaxGlobalStreamsPerTenant: 10000,
+ MaxLabelNameLength: 1024,
+ MaxLabelValueLength: 2048,
+ MaxLabelNamesPerSeries: 30,
+ MaxLineSize: 256000,
+ PerStreamDesiredRate: 3,
+ PerStreamRateLimit: 5,
+ PerStreamRateLimitBurst: 15,
},
QueryLimits: &lokiv1.QueryLimitSpec{
// Defaults from Loki docs
diff --git a/operator/internal/manifests/memberlist.go b/operator/internal/manifests/memberlist.go
index dd279b3b48e47..451ce39618edd 100644
--- a/operator/internal/manifests/memberlist.go
+++ b/operator/internal/manifests/memberlist.go
@@ -8,7 +8,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
)
// BuildLokiGossipRingService creates a k8s service for the gossip/memberlist members of the cluster
diff --git a/operator/internal/manifests/memberlist_test.go b/operator/internal/manifests/memberlist_test.go
index f7d2b0686f518..e9bfbded964fa 100644
--- a/operator/internal/manifests/memberlist_test.go
+++ b/operator/internal/manifests/memberlist_test.go
@@ -6,7 +6,7 @@ import (
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
)
func TestConfigureHashRingEnv_UseDefaults_NoHashRingSpec(t *testing.T) {
diff --git a/operator/internal/manifests/node_placement.go b/operator/internal/manifests/node_placement.go
index 7a74cc877225b..f965a093458b2 100644
--- a/operator/internal/manifests/node_placement.go
+++ b/operator/internal/manifests/node_placement.go
@@ -8,7 +8,7 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
)
const (
diff --git a/operator/internal/manifests/node_placement_test.go b/operator/internal/manifests/node_placement_test.go
index 79d4c53ba316c..a400ad50d0bfc 100644
--- a/operator/internal/manifests/node_placement_test.go
+++ b/operator/internal/manifests/node_placement_test.go
@@ -8,8 +8,8 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- configv1 "github.com/grafana/loki/operator/apis/config/v1"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ configv1 "github.com/grafana/loki/operator/api/config/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/manifests/storage"
)
diff --git a/operator/internal/manifests/openshift/alertingrule.go b/operator/internal/manifests/openshift/alertingrule.go
index e4869e9f7ca5c..5d03db6d0f924 100644
--- a/operator/internal/manifests/openshift/alertingrule.go
+++ b/operator/internal/manifests/openshift/alertingrule.go
@@ -1,6 +1,6 @@
package openshift
-import lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+import lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
func AlertingRuleTenantLabels(ar *lokiv1.AlertingRule) {
switch ar.Spec.TenantID {
diff --git a/operator/internal/manifests/openshift/alertingrule_test.go b/operator/internal/manifests/openshift/alertingrule_test.go
index 89519af3cb14f..afd50dc3d2d73 100644
--- a/operator/internal/manifests/openshift/alertingrule_test.go
+++ b/operator/internal/manifests/openshift/alertingrule_test.go
@@ -6,7 +6,7 @@ import (
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
)
func TestAlertingRuleTenantLabels(t *testing.T) {
diff --git a/operator/internal/manifests/openshift/build_test.go b/operator/internal/manifests/openshift/build_test.go
index 9cf050ed92fab..5ce0cba1473c2 100644
--- a/operator/internal/manifests/openshift/build_test.go
+++ b/operator/internal/manifests/openshift/build_test.go
@@ -9,7 +9,7 @@ import (
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
)
func TestBuildGatewayTenantModeObjects_ClusterRoleRefMatches(t *testing.T) {
diff --git a/operator/internal/manifests/openshift/configure.go b/operator/internal/manifests/openshift/configure.go
index cac8dc77c64f4..75ecc65973bf5 100644
--- a/operator/internal/manifests/openshift/configure.go
+++ b/operator/internal/manifests/openshift/configure.go
@@ -10,7 +10,7 @@ import (
corev1 "k8s.io/api/core/v1"
"k8s.io/utils/ptr"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/manifests/internal/config"
)
diff --git a/operator/internal/manifests/openshift/internal/dashboards/static/grafana-dashboard-lokistack-reads.json b/operator/internal/manifests/openshift/internal/dashboards/static/grafana-dashboard-lokistack-reads.json
index ec1656e27bd7d..5e5921a1150a1 100644
--- a/operator/internal/manifests/openshift/internal/dashboards/static/grafana-dashboard-lokistack-reads.json
+++ b/operator/internal/manifests/openshift/internal/dashboards/static/grafana-dashboard-lokistack-reads.json
@@ -1746,351 +1746,6 @@
"showTitle": true,
"title": "TSBD Index",
"titleSize": "h6"
- },
- {
- "collapse": false,
- "height": "250px",
- "panels": [
- {
- "aliasColors": {
- "1xx": "#EAB839",
- "2xx": "#7EB26D",
- "3xx": "#6ED0E0",
- "4xx": "#EF843C",
- "5xx": "#E24D42",
- "error": "#E24D42",
- "success": "#7EB26D"
- },
- "datasource": "$datasource",
- "fieldConfig": {
- "defaults": {
- "custom": {
- "drawStyle": "line",
- "fillOpacity": 100,
- "lineWidth": 0,
- "pointSize": 5,
- "showPoints": "never",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "normal"
- }
- },
- "min": 0,
- "thresholds": {
- "mode": "absolute",
- "steps": [ ]
- },
- "unit": "short"
- },
- "overrides": [
- {
- "matcher": {
- "id": "byName",
- "options": "1xx"
- },
- "properties": [
- {
- "id": "color",
- "value": {
- "fixedColor": "#EAB839",
- "mode": "fixed"
- }
- }
- ]
- },
- {
- "matcher": {
- "id": "byName",
- "options": "2xx"
- },
- "properties": [
- {
- "id": "color",
- "value": {
- "fixedColor": "#7EB26D",
- "mode": "fixed"
- }
- }
- ]
- },
- {
- "matcher": {
- "id": "byName",
- "options": "3xx"
- },
- "properties": [
- {
- "id": "color",
- "value": {
- "fixedColor": "#6ED0E0",
- "mode": "fixed"
- }
- }
- ]
- },
- {
- "matcher": {
- "id": "byName",
- "options": "4xx"
- },
- "properties": [
- {
- "id": "color",
- "value": {
- "fixedColor": "#EF843C",
- "mode": "fixed"
- }
- }
- ]
- },
- {
- "matcher": {
- "id": "byName",
- "options": "5xx"
- },
- "properties": [
- {
- "id": "color",
- "value": {
- "fixedColor": "#E24D42",
- "mode": "fixed"
- }
- }
- ]
- },
- {
- "matcher": {
- "id": "byName",
- "options": "OK"
- },
- "properties": [
- {
- "id": "color",
- "value": {
- "fixedColor": "#7EB26D",
- "mode": "fixed"
- }
- }
- ]
- },
- {
- "matcher": {
- "id": "byName",
- "options": "cancel"
- },
- "properties": [
- {
- "id": "color",
- "value": {
- "fixedColor": "#A9A9A9",
- "mode": "fixed"
- }
- }
- ]
- },
- {
- "matcher": {
- "id": "byName",
- "options": "error"
- },
- "properties": [
- {
- "id": "color",
- "value": {
- "fixedColor": "#E24D42",
- "mode": "fixed"
- }
- }
- ]
- },
- {
- "matcher": {
- "id": "byName",
- "options": "success"
- },
- "properties": [
- {
- "id": "color",
- "value": {
- "fixedColor": "#7EB26D",
- "mode": "fixed"
- }
- }
- ]
- }
- ]
- },
- "fill": 10,
- "id": 22,
- "linewidth": 0,
- "links": [ ],
- "options": {
- "legend": {
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
- }
- },
- "span": 4,
- "stack": true,
- "targets": [
- {
- "expr": "sum by (status) (\n label_replace(label_replace(rate(loki_boltdb_shipper_request_duration_seconds_count{namespace=\"$namespace\",job=~\".+-index-gateway-http\", operation=\"Shipper.Query\"}[$__rate_interval]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n \"status\", \"${1}\", \"status_code\", \"([a-z]+)\"))\n",
- "format": "time_series",
- "intervalFactor": 2,
- "legendFormat": "{{status}}",
- "refId": "A",
- "step": 10
- }
- ],
- "title": "QPS",
- "type": "graph"
- },
- {
- "datasource": "$datasource",
- "fieldConfig": {
- "defaults": {
- "custom": {
- "drawStyle": "line",
- "fillOpacity": 10,
- "lineWidth": 1,
- "pointSize": 5,
- "showPoints": "never",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- }
- },
- "thresholds": {
- "mode": "absolute",
- "steps": [ ]
- },
- "unit": "ms"
- },
- "overrides": [ ]
- },
- "id": 23,
- "links": [ ],
- "nullPointMode": "null as zero",
- "options": {
- "legend": {
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
- }
- },
- "span": 4,
- "targets": [
- {
- "expr": "histogram_quantile(0.99, sum(rate(loki_boltdb_shipper_request_duration_seconds_bucket{namespace=\"$namespace\",job=~\".+-index-gateway-http\", operation=\"Shipper.Query\"}[$__rate_interval])) by (le)) * 1e3",
- "format": "time_series",
- "intervalFactor": 2,
- "legendFormat": "99th Percentile",
- "refId": "A",
- "step": 10
- },
- {
- "expr": "histogram_quantile(0.50, sum(rate(loki_boltdb_shipper_request_duration_seconds_bucket{namespace=\"$namespace\",job=~\".+-index-gateway-http\", operation=\"Shipper.Query\"}[$__rate_interval])) by (le)) * 1e3",
- "format": "time_series",
- "intervalFactor": 2,
- "legendFormat": "50th Percentile",
- "refId": "B",
- "step": 10
- },
- {
- "expr": "sum(rate(loki_boltdb_shipper_request_duration_seconds_sum{namespace=\"$namespace\",job=~\".+-index-gateway-http\", operation=\"Shipper.Query\"}[$__rate_interval])) * 1e3 / sum(rate(loki_boltdb_shipper_request_duration_seconds_count{namespace=\"$namespace\",job=~\".+-index-gateway-http\", operation=\"Shipper.Query\"}[$__rate_interval]))",
- "format": "time_series",
- "intervalFactor": 2,
- "legendFormat": "Average",
- "refId": "C",
- "step": 10
- }
- ],
- "title": "Latency",
- "type": "graph",
- "yaxes": [
- {
- "format": "ms",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
- },
- {
- "datasource": "$datasource",
- "fieldConfig": {
- "defaults": {
- "custom": {
- "drawStyle": "line",
- "fillOpacity": 10,
- "lineWidth": 1,
- "pointSize": 5,
- "showPoints": "never",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- }
- },
- "thresholds": {
- "mode": "absolute",
- "steps": [ ]
- },
- "unit": "ms"
- },
- "overrides": [ ]
- },
- "id": 24,
- "links": [ ],
- "nullPointMode": "null as zero",
- "options": {
- "legend": {
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
- }
- },
- "span": 4,
- "targets": [
- {
- "expr": "histogram_quantile(0.99, sum(rate(loki_boltdb_shipper_request_duration_seconds_bucket{namespace=\"$namespace\",job=~\".+-index-gateway-http\", operation=\"Shipper.Query\"}[$__rate_interval])) by (le,pod)) * 1e3",
- "format": "time_series",
- "interval": "1m",
- "intervalFactor": 2,
- "legendFormat": "{{pod}}",
- "refId": "A",
- "step": 10
- }
- ],
- "title": "Per Pod Latency (p99)",
- "type": "graph"
- }
- ],
- "repeat": null,
- "repeatIteration": null,
- "repeatRowId": null,
- "showTitle": true,
- "title": "BoltDB Index",
- "titleSize": "h6"
}
],
"schemaVersion": 14,
diff --git a/operator/internal/manifests/openshift/internal/dashboards/static/grafana-dashboard-lokistack-writes.json b/operator/internal/manifests/openshift/internal/dashboards/static/grafana-dashboard-lokistack-writes.json
index 50f5f492e8896..3b28d54ee2ffe 100644
--- a/operator/internal/manifests/openshift/internal/dashboards/static/grafana-dashboard-lokistack-writes.json
+++ b/operator/internal/manifests/openshift/internal/dashboards/static/grafana-dashboard-lokistack-writes.json
@@ -1185,351 +1185,6 @@
"showTitle": true,
"title": "Index",
"titleSize": "h6"
- },
- {
- "collapse": false,
- "height": "250px",
- "panels": [
- {
- "aliasColors": {
- "1xx": "#EAB839",
- "2xx": "#7EB26D",
- "3xx": "#6ED0E0",
- "4xx": "#EF843C",
- "5xx": "#E24D42",
- "error": "#E24D42",
- "success": "#7EB26D"
- },
- "datasource": "$datasource",
- "fieldConfig": {
- "defaults": {
- "custom": {
- "drawStyle": "line",
- "fillOpacity": 100,
- "lineWidth": 0,
- "pointSize": 5,
- "showPoints": "never",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "normal"
- }
- },
- "min": 0,
- "thresholds": {
- "mode": "absolute",
- "steps": [ ]
- },
- "unit": "short"
- },
- "overrides": [
- {
- "matcher": {
- "id": "byName",
- "options": "1xx"
- },
- "properties": [
- {
- "id": "color",
- "value": {
- "fixedColor": "#EAB839",
- "mode": "fixed"
- }
- }
- ]
- },
- {
- "matcher": {
- "id": "byName",
- "options": "2xx"
- },
- "properties": [
- {
- "id": "color",
- "value": {
- "fixedColor": "#7EB26D",
- "mode": "fixed"
- }
- }
- ]
- },
- {
- "matcher": {
- "id": "byName",
- "options": "3xx"
- },
- "properties": [
- {
- "id": "color",
- "value": {
- "fixedColor": "#6ED0E0",
- "mode": "fixed"
- }
- }
- ]
- },
- {
- "matcher": {
- "id": "byName",
- "options": "4xx"
- },
- "properties": [
- {
- "id": "color",
- "value": {
- "fixedColor": "#EF843C",
- "mode": "fixed"
- }
- }
- ]
- },
- {
- "matcher": {
- "id": "byName",
- "options": "5xx"
- },
- "properties": [
- {
- "id": "color",
- "value": {
- "fixedColor": "#E24D42",
- "mode": "fixed"
- }
- }
- ]
- },
- {
- "matcher": {
- "id": "byName",
- "options": "OK"
- },
- "properties": [
- {
- "id": "color",
- "value": {
- "fixedColor": "#7EB26D",
- "mode": "fixed"
- }
- }
- ]
- },
- {
- "matcher": {
- "id": "byName",
- "options": "cancel"
- },
- "properties": [
- {
- "id": "color",
- "value": {
- "fixedColor": "#A9A9A9",
- "mode": "fixed"
- }
- }
- ]
- },
- {
- "matcher": {
- "id": "byName",
- "options": "error"
- },
- "properties": [
- {
- "id": "color",
- "value": {
- "fixedColor": "#E24D42",
- "mode": "fixed"
- }
- }
- ]
- },
- {
- "matcher": {
- "id": "byName",
- "options": "success"
- },
- "properties": [
- {
- "id": "color",
- "value": {
- "fixedColor": "#7EB26D",
- "mode": "fixed"
- }
- }
- ]
- }
- ]
- },
- "fill": 10,
- "id": 15,
- "linewidth": 0,
- "links": [ ],
- "options": {
- "legend": {
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
- }
- },
- "span": 4,
- "stack": true,
- "targets": [
- {
- "expr": "sum by (status) (\n label_replace(label_replace(rate(loki_boltdb_shipper_request_duration_seconds_count{namespace=\"$namespace\",job=~\".+-ingester-http\", operation=\"WRITE\"}[$__rate_interval]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n \"status\", \"${1}\", \"status_code\", \"([a-z]+)\"))\n",
- "format": "time_series",
- "intervalFactor": 2,
- "legendFormat": "{{status}}",
- "refId": "A",
- "step": 10
- }
- ],
- "title": "QPS",
- "type": "graph"
- },
- {
- "datasource": "$datasource",
- "fieldConfig": {
- "defaults": {
- "custom": {
- "drawStyle": "line",
- "fillOpacity": 10,
- "lineWidth": 1,
- "pointSize": 5,
- "showPoints": "never",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- }
- },
- "thresholds": {
- "mode": "absolute",
- "steps": [ ]
- },
- "unit": "ms"
- },
- "overrides": [ ]
- },
- "id": 16,
- "links": [ ],
- "nullPointMode": "null as zero",
- "options": {
- "legend": {
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
- }
- },
- "span": 4,
- "targets": [
- {
- "expr": "histogram_quantile(0.99, sum(rate(loki_boltdb_shipper_request_duration_seconds_bucket{namespace=\"$namespace\",job=~\".+-ingester-http\", operation=\"WRITE\"}[$__rate_interval])) by (le)) * 1e3",
- "format": "time_series",
- "intervalFactor": 2,
- "legendFormat": "99th Percentile",
- "refId": "A",
- "step": 10
- },
- {
- "expr": "histogram_quantile(0.50, sum(rate(loki_boltdb_shipper_request_duration_seconds_bucket{namespace=\"$namespace\",job=~\".+-ingester-http\", operation=\"WRITE\"}[$__rate_interval])) by (le)) * 1e3",
- "format": "time_series",
- "intervalFactor": 2,
- "legendFormat": "50th Percentile",
- "refId": "B",
- "step": 10
- },
- {
- "expr": "sum(rate(loki_boltdb_shipper_request_duration_seconds_sum{namespace=\"$namespace\",job=~\".+-ingester-http\", operation=\"WRITE\"}[$__rate_interval])) * 1e3 / sum(rate(loki_boltdb_shipper_request_duration_seconds_count{namespace=\"$namespace\",job=~\".+-ingester-http\", operation=\"WRITE\"}[$__rate_interval]))",
- "format": "time_series",
- "intervalFactor": 2,
- "legendFormat": "Average",
- "refId": "C",
- "step": 10
- }
- ],
- "title": "Latency",
- "type": "graph",
- "yaxes": [
- {
- "format": "ms",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
- },
- {
- "datasource": "$datasource",
- "fieldConfig": {
- "defaults": {
- "custom": {
- "drawStyle": "line",
- "fillOpacity": 10,
- "lineWidth": 1,
- "pointSize": 5,
- "showPoints": "never",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- }
- },
- "thresholds": {
- "mode": "absolute",
- "steps": [ ]
- },
- "unit": "ms"
- },
- "overrides": [ ]
- },
- "id": 17,
- "links": [ ],
- "nullPointMode": "null as zero",
- "options": {
- "legend": {
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
- }
- },
- "span": 4,
- "targets": [
- {
- "expr": "histogram_quantile(0.99, sum(rate(loki_boltdb_shipper_request_duration_seconds_bucket{namespace=\"$namespace\",job=~\".+-ingester-http\", operation=\"WRITE\"}[$__rate_interval])) by (le,pod)) * 1e3",
- "format": "time_series",
- "interval": "1m",
- "intervalFactor": 2,
- "legendFormat": "__auto",
- "refId": "A",
- "step": 10
- }
- ],
- "title": "Per Pod Latency (p99)",
- "type": "graph"
- }
- ],
- "repeat": null,
- "repeatIteration": null,
- "repeatRowId": null,
- "showTitle": true,
- "title": "BoltDB Index",
- "titleSize": "h6"
}
],
"schemaVersion": 14,
diff --git a/operator/internal/manifests/openshift/opa_openshift.go b/operator/internal/manifests/openshift/opa_openshift.go
index ccf5eac09b7a7..bc804c11b11ed 100644
--- a/operator/internal/manifests/openshift/opa_openshift.go
+++ b/operator/internal/manifests/openshift/opa_openshift.go
@@ -9,7 +9,7 @@ import (
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/intstr"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
)
const (
diff --git a/operator/internal/manifests/openshift/options.go b/operator/internal/manifests/openshift/options.go
index 7cba773841b82..2d6276446dfb8 100644
--- a/operator/internal/manifests/openshift/options.go
+++ b/operator/internal/manifests/openshift/options.go
@@ -5,7 +5,7 @@ import (
"math/rand"
"time"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/config"
)
diff --git a/operator/internal/manifests/openshift/otlp.go b/operator/internal/manifests/openshift/otlp.go
new file mode 100644
index 0000000000000..0261faf0ed79e
--- /dev/null
+++ b/operator/internal/manifests/openshift/otlp.go
@@ -0,0 +1,107 @@
+package openshift
+
+import (
+ "slices"
+
+ "github.com/grafana/loki/operator/internal/manifests/internal/config"
+)
+
+// DefaultOTLPAttributes provides the required/recommended set of OTLP attributes for OpenShift Logging.
+func DefaultOTLPAttributes(disableRecommended bool) config.OTLPAttributeConfig {
+ result := config.OTLPAttributeConfig{
+ RemoveDefaultLabels: true,
+ Global: &config.OTLPTenantAttributeConfig{
+ ResourceAttributes: []config.OTLPAttribute{
+ {
+ Action: config.OTLPAttributeActionStreamLabel,
+ Names: []string{
+ "k8s.namespace.name",
+ "kubernetes.namespace_name",
+ "log_source",
+ "log_type",
+ "openshift.cluster.uid",
+ "openshift.log.source",
+ "openshift.log.type",
+ },
+ },
+ },
+ },
+ }
+
+ if disableRecommended {
+ return result
+ }
+
+ result.Global.ResourceAttributes[0].Names = append(result.Global.ResourceAttributes[0].Names,
+ "k8s.container.name",
+ "k8s.cronjob.name",
+ "k8s.daemonset.name",
+ "k8s.deployment.name",
+ "k8s.job.name",
+ "k8s.node.name",
+ "k8s.pod.name",
+ "k8s.statefulset.name",
+ "kubernetes.container_name",
+ "kubernetes.host",
+ "kubernetes.pod_name",
+ "service.name",
+ )
+ slices.Sort(result.Global.ResourceAttributes[0].Names)
+
+ result.Global.ResourceAttributes = append(result.Global.ResourceAttributes,
+ config.OTLPAttribute{
+ Action: config.OTLPAttributeActionMetadata,
+ Names: []string{
+ "k8s.node.uid",
+ "k8s.pod.uid",
+ "k8s.replicaset.name",
+ "process.command_line",
+ "process.executable.name",
+ "process.executable.path",
+ "process.pid",
+ },
+ },
+ config.OTLPAttribute{
+ Action: config.OTLPAttributeActionMetadata,
+ Regex: `k8s\.pod\.labels\..+`,
+ },
+ config.OTLPAttribute{
+ Action: config.OTLPAttributeActionMetadata,
+ Regex: `openshift\.labels\..+`,
+ },
+ )
+
+ result.Global.LogAttributes = []config.OTLPAttribute{
+ {
+ Action: config.OTLPAttributeActionMetadata,
+ Names: []string{
+ "k8s.event.level",
+ "k8s.event.object_ref.api.group",
+ "k8s.event.object_ref.api.version",
+ "k8s.event.object_ref.name",
+ "k8s.event.object_ref.resource",
+ "k8s.event.request.uri",
+ "k8s.event.response.code",
+ "k8s.event.stage",
+ "k8s.event.user_agent",
+ "k8s.user.groups",
+ "k8s.user.username",
+ "log.iostream",
+ },
+ },
+ {
+ Action: config.OTLPAttributeActionMetadata,
+ Regex: `k8s\.event\.annotations\..+`,
+ },
+ {
+ Action: config.OTLPAttributeActionMetadata,
+ Regex: `systemd\.t\..+`,
+ },
+ {
+ Action: config.OTLPAttributeActionMetadata,
+ Regex: `systemd\.u\..+`,
+ },
+ }
+
+ return result
+}
diff --git a/operator/internal/manifests/openshift/recordingrule.go b/operator/internal/manifests/openshift/recordingrule.go
index 97be1bb4a17ec..8e8a0eccfa6de 100644
--- a/operator/internal/manifests/openshift/recordingrule.go
+++ b/operator/internal/manifests/openshift/recordingrule.go
@@ -1,6 +1,6 @@
package openshift
-import lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+import lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
func RecordingRuleTenantLabels(r *lokiv1.RecordingRule) {
switch r.Spec.TenantID {
diff --git a/operator/internal/manifests/openshift/recordingrule_test.go b/operator/internal/manifests/openshift/recordingrule_test.go
index 16e95b2310a65..901913dac2944 100644
--- a/operator/internal/manifests/openshift/recordingrule_test.go
+++ b/operator/internal/manifests/openshift/recordingrule_test.go
@@ -6,7 +6,7 @@ import (
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
)
func TestRecordingRuleTenantLabels(t *testing.T) {
diff --git a/operator/internal/manifests/options.go b/operator/internal/manifests/options.go
index b294b697c3721..c3e627b047003 100644
--- a/operator/internal/manifests/options.go
+++ b/operator/internal/manifests/options.go
@@ -4,8 +4,8 @@ import (
"strings"
"time"
- configv1 "github.com/grafana/loki/operator/apis/config/v1"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ configv1 "github.com/grafana/loki/operator/api/config/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/manifests/internal"
"github.com/grafana/loki/operator/internal/manifests/internal/config"
"github.com/grafana/loki/operator/internal/manifests/openshift"
diff --git a/operator/internal/manifests/options_test.go b/operator/internal/manifests/options_test.go
index d6fe7c5c19d4a..06cf1cb41ec2c 100644
--- a/operator/internal/manifests/options_test.go
+++ b/operator/internal/manifests/options_test.go
@@ -6,7 +6,7 @@ import (
"github.com/stretchr/testify/require"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/manifests/internal/config"
)
diff --git a/operator/internal/manifests/proxy_env.go b/operator/internal/manifests/proxy_env.go
index e0bc1841ace49..21e564d8a0d1c 100644
--- a/operator/internal/manifests/proxy_env.go
+++ b/operator/internal/manifests/proxy_env.go
@@ -6,7 +6,7 @@ import (
"github.com/imdario/mergo"
corev1 "k8s.io/api/core/v1"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
)
const (
diff --git a/operator/internal/manifests/proxy_env_test.go b/operator/internal/manifests/proxy_env_test.go
index 9a780dfe48be4..a12e0d7bccdb0 100644
--- a/operator/internal/manifests/proxy_env_test.go
+++ b/operator/internal/manifests/proxy_env_test.go
@@ -8,7 +8,7 @@ import (
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
)
func TestContainerEnvVars_ReadVarsFromCustomResource(t *testing.T) {
diff --git a/operator/internal/manifests/querier_test.go b/operator/internal/manifests/querier_test.go
index ff3e0ccd45630..565e4f82a4803 100644
--- a/operator/internal/manifests/querier_test.go
+++ b/operator/internal/manifests/querier_test.go
@@ -10,7 +10,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/manifests/storage"
)
diff --git a/operator/internal/manifests/query-frontend_test.go b/operator/internal/manifests/query-frontend_test.go
index d11fb968ce3ac..a29ee5e4977eb 100644
--- a/operator/internal/manifests/query-frontend_test.go
+++ b/operator/internal/manifests/query-frontend_test.go
@@ -9,7 +9,7 @@ import (
policyv1 "k8s.io/api/policy/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/manifests/storage"
)
diff --git a/operator/internal/manifests/ruler.go b/operator/internal/manifests/ruler.go
index bcf0c18e6e739..76e451c6f266c 100644
--- a/operator/internal/manifests/ruler.go
+++ b/operator/internal/manifests/ruler.go
@@ -14,7 +14,7 @@ import (
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/manifests/internal/config"
"github.com/grafana/loki/operator/internal/manifests/openshift"
"github.com/grafana/loki/operator/internal/manifests/storage"
diff --git a/operator/internal/manifests/ruler_test.go b/operator/internal/manifests/ruler_test.go
index b753dc090de8c..866a40b6c10e8 100644
--- a/operator/internal/manifests/ruler_test.go
+++ b/operator/internal/manifests/ruler_test.go
@@ -10,7 +10,7 @@ import (
policyv1 "k8s.io/api/policy/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/manifests/openshift"
"github.com/grafana/loki/operator/internal/manifests/storage"
)
diff --git a/operator/internal/manifests/rules_config.go b/operator/internal/manifests/rules_config.go
index 1640aedc4f10c..5cc4a3772ae3e 100644
--- a/operator/internal/manifests/rules_config.go
+++ b/operator/internal/manifests/rules_config.go
@@ -6,7 +6,7 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/manifests/internal/rules"
"github.com/grafana/loki/operator/internal/manifests/openshift"
)
diff --git a/operator/internal/manifests/rules_config_test.go b/operator/internal/manifests/rules_config_test.go
index 338fab231644e..c164c60cd5b4c 100644
--- a/operator/internal/manifests/rules_config_test.go
+++ b/operator/internal/manifests/rules_config_test.go
@@ -8,7 +8,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
)
func TestRulesConfigMap_ReturnsDataEntriesPerRule(t *testing.T) {
diff --git a/operator/internal/manifests/service_monitor_test.go b/operator/internal/manifests/service_monitor_test.go
index cf829cca07aff..4cf1979b51993 100644
--- a/operator/internal/manifests/service_monitor_test.go
+++ b/operator/internal/manifests/service_monitor_test.go
@@ -10,8 +10,8 @@ import (
corev1 "k8s.io/api/core/v1"
"k8s.io/utils/ptr"
- configv1 "github.com/grafana/loki/operator/apis/config/v1"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ configv1 "github.com/grafana/loki/operator/api/config/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
)
// Test that all serviceMonitor match the labels of their services so that we know all serviceMonitor
diff --git a/operator/internal/manifests/service_test.go b/operator/internal/manifests/service_test.go
index af3c79bd20add..87dc987d2d802 100644
--- a/operator/internal/manifests/service_test.go
+++ b/operator/internal/manifests/service_test.go
@@ -11,8 +11,8 @@ import (
corev1 "k8s.io/api/core/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
- configv1 "github.com/grafana/loki/operator/apis/config/v1"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ configv1 "github.com/grafana/loki/operator/api/config/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
)
// Test that the service ports have matching deployment/statefulset/daemonset ports on the podspec.
diff --git a/operator/internal/manifests/serviceaccount_test.go b/operator/internal/manifests/serviceaccount_test.go
index dc08b62f700c0..1a6046280fcf9 100644
--- a/operator/internal/manifests/serviceaccount_test.go
+++ b/operator/internal/manifests/serviceaccount_test.go
@@ -5,7 +5,7 @@ import (
"github.com/stretchr/testify/assert"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/manifests/storage"
)
diff --git a/operator/internal/manifests/storage/configure.go b/operator/internal/manifests/storage/configure.go
index b1c8a0df955a9..ce6fa78273349 100644
--- a/operator/internal/manifests/storage/configure.go
+++ b/operator/internal/manifests/storage/configure.go
@@ -10,7 +10,7 @@ import (
corev1 "k8s.io/api/core/v1"
"k8s.io/utils/ptr"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
)
var (
diff --git a/operator/internal/manifests/storage/configure_test.go b/operator/internal/manifests/storage/configure_test.go
index 804ca1d52bb02..3080f924c11cf 100644
--- a/operator/internal/manifests/storage/configure_test.go
+++ b/operator/internal/manifests/storage/configure_test.go
@@ -8,7 +8,7 @@ import (
corev1 "k8s.io/api/core/v1"
"k8s.io/utils/ptr"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
)
func TestConfigureDeploymentForStorageType(t *testing.T) {
diff --git a/operator/internal/manifests/storage/options.go b/operator/internal/manifests/storage/options.go
index 1adb80235b097..59618953d1f92 100644
--- a/operator/internal/manifests/storage/options.go
+++ b/operator/internal/manifests/storage/options.go
@@ -1,7 +1,7 @@
package storage
import (
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
)
// Options is used to configure Loki to integrate with
diff --git a/operator/internal/manifests/storage/schema.go b/operator/internal/manifests/storage/schema.go
index a36f4c46c6598..c9cf737a4a2ae 100644
--- a/operator/internal/manifests/storage/schema.go
+++ b/operator/internal/manifests/storage/schema.go
@@ -6,7 +6,7 @@ import (
"github.com/ViaQ/logerr/v2/kverrors"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/validation"
)
diff --git a/operator/internal/manifests/storage/schema_test.go b/operator/internal/manifests/storage/schema_test.go
index c3ca914658f9e..7490ae5aeb002 100644
--- a/operator/internal/manifests/storage/schema_test.go
+++ b/operator/internal/manifests/storage/schema_test.go
@@ -6,7 +6,7 @@ import (
"github.com/stretchr/testify/require"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
)
func TestBuildSchemaConfig_NoSchemas(t *testing.T) {
diff --git a/operator/internal/manifests/var.go b/operator/internal/manifests/var.go
index e47ff92c6a76d..9e501ee72ae99 100644
--- a/operator/internal/manifests/var.go
+++ b/operator/internal/manifests/var.go
@@ -12,7 +12,7 @@ import (
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/utils/ptr"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/manifests/openshift"
)
@@ -60,7 +60,7 @@ const (
EnvRelatedImageGateway = "RELATED_IMAGE_GATEWAY"
// DefaultContainerImage declares the default fallback for loki image.
- DefaultContainerImage = "docker.io/grafana/loki:3.2.0"
+ DefaultContainerImage = "docker.io/grafana/loki:3.2.1"
// DefaultLokiStackGatewayImage declares the default image for lokiStack-gateway.
DefaultLokiStackGatewayImage = "quay.io/observatorium/api:latest"
diff --git a/operator/internal/metrics/lokistack.go b/operator/internal/metrics/lokistack.go
index 7bdc966024956..c321562ecec86 100644
--- a/operator/internal/metrics/lokistack.go
+++ b/operator/internal/metrics/lokistack.go
@@ -8,7 +8,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
)
const (
diff --git a/operator/internal/metrics/lokistack_test.go b/operator/internal/metrics/lokistack_test.go
index 061b349c4e0e6..ba12439fa6edc 100644
--- a/operator/internal/metrics/lokistack_test.go
+++ b/operator/internal/metrics/lokistack_test.go
@@ -13,7 +13,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
)
diff --git a/operator/internal/status/components.go b/operator/internal/status/components.go
index 3e59f8a198556..78f26572831ce 100644
--- a/operator/internal/status/components.go
+++ b/operator/internal/status/components.go
@@ -7,7 +7,7 @@ import (
corev1 "k8s.io/api/core/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/external/k8s"
"github.com/grafana/loki/operator/internal/manifests"
)
diff --git a/operator/internal/status/components_test.go b/operator/internal/status/components_test.go
index 0132ac772a358..2606d704c4188 100644
--- a/operator/internal/status/components_test.go
+++ b/operator/internal/status/components_test.go
@@ -10,7 +10,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
"github.com/grafana/loki/operator/internal/manifests"
)
diff --git a/operator/internal/status/conditions_test.go b/operator/internal/status/conditions_test.go
index e011b8406460a..5cb16eb2162b9 100644
--- a/operator/internal/status/conditions_test.go
+++ b/operator/internal/status/conditions_test.go
@@ -7,7 +7,7 @@ import (
"github.com/google/go-cmp/cmp"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
)
func TestMergeConditions(t *testing.T) {
diff --git a/operator/internal/status/lokistack.go b/operator/internal/status/lokistack.go
index 17d81ab01d3aa..d32036a37f2bc 100644
--- a/operator/internal/status/lokistack.go
+++ b/operator/internal/status/lokistack.go
@@ -8,7 +8,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/external/k8s"
)
diff --git a/operator/internal/status/lokistack_test.go b/operator/internal/status/lokistack_test.go
index d3ae5e4839436..1ec64dc20c898 100644
--- a/operator/internal/status/lokistack_test.go
+++ b/operator/internal/status/lokistack_test.go
@@ -13,7 +13,7 @@ import (
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
)
diff --git a/operator/internal/status/status.go b/operator/internal/status/status.go
index c544695d3d2ea..ca86e02d6434d 100644
--- a/operator/internal/status/status.go
+++ b/operator/internal/status/status.go
@@ -10,7 +10,7 @@ import (
"k8s.io/client-go/util/retry"
ctrl "sigs.k8s.io/controller-runtime"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/external/k8s"
)
diff --git a/operator/internal/status/status_test.go b/operator/internal/status/status_test.go
index 58978f49f7745..9028fc95b99a7 100644
--- a/operator/internal/status/status_test.go
+++ b/operator/internal/status/status_test.go
@@ -12,7 +12,7 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/manifests"
)
diff --git a/operator/internal/status/storage.go b/operator/internal/status/storage.go
index 620b5d4eeb364..066fe113fa520 100644
--- a/operator/internal/status/storage.go
+++ b/operator/internal/status/storage.go
@@ -7,7 +7,7 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
ctrl "sigs.k8s.io/controller-runtime"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/external/k8s"
)
diff --git a/operator/internal/status/storage_test.go b/operator/internal/status/storage_test.go
index 86ea6641b4739..8924a7808d609 100644
--- a/operator/internal/status/storage_test.go
+++ b/operator/internal/status/storage_test.go
@@ -12,7 +12,7 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
"github.com/grafana/loki/operator/internal/status"
)
diff --git a/operator/internal/validation/alertingrule.go b/operator/internal/validation/alertingrule.go
index d72f0b1ae8d95..59727737ce914 100644
--- a/operator/internal/validation/alertingrule.go
+++ b/operator/internal/validation/alertingrule.go
@@ -13,7 +13,7 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
)
var _ admission.CustomValidator = &AlertingRuleValidator{}
diff --git a/operator/internal/validation/alertingrule_test.go b/operator/internal/validation/alertingrule_test.go
index 6eac5be9a25c0..fa29ecd252054 100644
--- a/operator/internal/validation/alertingrule_test.go
+++ b/operator/internal/validation/alertingrule_test.go
@@ -10,7 +10,7 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/validation/field"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/validation"
)
diff --git a/operator/internal/validation/lokistack.go b/operator/internal/validation/lokistack.go
index e6c84458c0802..c5eacd8d57299 100644
--- a/operator/internal/validation/lokistack.go
+++ b/operator/internal/validation/lokistack.go
@@ -12,7 +12,7 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
)
// objectStorageSchemaMap defines the type for mapping a schema version with a date
@@ -79,12 +79,10 @@ func (v *LokiStackValidator) validate(ctx context.Context, obj runtime.Object) (
}
if stack.Spec.Limits != nil {
- if stack.Spec.Limits.Global != nil && stack.Spec.Limits.Global.OTLP != nil {
- allErrs = append(allErrs, v.validateGlobalOTLPSpec(stack.Spec.Limits.Global.OTLP)...)
- }
-
- if stack.Spec.Limits.Tenants != nil {
- allErrs = append(allErrs, v.validatePerTenantOTLPSpec(stack.Spec.Limits.Tenants)...)
+ if (stack.Spec.Limits.Global != nil && stack.Spec.Limits.Global.OTLP != nil) ||
+ len(stack.Spec.Limits.Tenants) > 0 {
+ // Only need to validate custom OTLP configuration
+ allErrs = append(allErrs, v.validateOTLPConfiguration(&stack.Spec)...)
}
}
@@ -103,98 +101,86 @@ func (v *LokiStackValidator) validate(ctx context.Context, obj runtime.Object) (
)
}
-func (v *LokiStackValidator) validateGlobalOTLPSpec(s *lokiv1.GlobalOTLPSpec) field.ErrorList {
- basePath := field.NewPath("spec", "limits", "global")
+func (v *LokiStackValidator) validateOTLPConfiguration(spec *lokiv1.LokiStackSpec) field.ErrorList {
+ if spec.Tenants == nil {
+ return nil
+ }
- return v.validateOTLPSpec(basePath, &s.OTLPSpec)
-}
+ if spec.Tenants.Mode == lokiv1.OpenshiftLogging {
+ // This tenancy mode always provides stream labels
+ return nil
+ }
-func (v *LokiStackValidator) validatePerTenantOTLPSpec(tenants map[string]lokiv1.PerTenantLimitsTemplateSpec) field.ErrorList {
- var allErrs field.ErrorList
+ if spec.Tenants.Mode == lokiv1.OpenshiftNetwork {
+ // No validation defined for openshift-network tenancy mode
+ // TODO can we define a validation for this mode?
+ return nil
+ }
- for key, tenant := range tenants {
- basePath := field.NewPath("spec", "limits", "tenants").Key(key)
- allErrs = append(allErrs, v.validateOTLPSpec(basePath, tenant.OTLP)...)
+ if spec.Limits == nil {
+ return nil
}
- return allErrs
-}
+ hasGlobalStreamLabels := false
+ if spec.Limits.Global != nil && spec.Limits.Global.OTLP != nil {
+ hasGlobalStreamLabels = v.hasOTLPStreamLabel(spec.Limits.Global.OTLP)
+ }
-func (v *LokiStackValidator) validateOTLPSpec(parent *field.Path, s *lokiv1.OTLPSpec) field.ErrorList {
- var allErrs field.ErrorList
+ if hasGlobalStreamLabels {
+ // When the global configuration has at least one stream label, then the configuration will be valid
+ return nil
+ }
- if s.ResourceAttributes != nil && s.ResourceAttributes.IgnoreDefaults {
- switch {
- case len(s.ResourceAttributes.Attributes) == 0:
- allErrs = append(allErrs,
- field.Invalid(
- parent.Child("otlp", "resourceAttributes"),
- []lokiv1.OTLPAttributesSpec{},
- lokiv1.ErrOTLPResourceAttributesEmptyNotAllowed.Error(),
- ),
- )
- default:
- var indexLabelActionFound bool
- for _, attr := range s.ResourceAttributes.Attributes {
- if attr.Action == lokiv1.OTLPAttributeActionIndexLabel {
- indexLabelActionFound = true
- break
- }
- }
-
- if !indexLabelActionFound {
- allErrs = append(allErrs,
- field.Invalid(
- parent.Child("otlp", "resourceAttributes"),
- s.ResourceAttributes.Attributes,
- lokiv1.ErrOTLPResourceAttributesIndexLabelActionMissing.Error(),
- ),
- )
- }
-
- for idx, attr := range s.ResourceAttributes.Attributes {
- if len(attr.Attributes) == 0 && attr.Regex == "" {
- allErrs = append(allErrs,
- field.Invalid(
- parent.Child("otlp", "resourceAttributes").Index(idx),
- []string{},
- lokiv1.ErrOTLPAttributesSpecInvalid.Error(),
- ),
- )
- }
- }
+ if spec.Limits.Tenants == nil {
+ // No tenant config and no global stream labels -> error
+ return field.ErrorList{
+ field.Invalid(
+ field.NewPath("spec", "limits", "global", "otlp", "streamLabels", "resourceAttributes"),
+ nil,
+ lokiv1.ErrOTLPGlobalNoStreamLabel.Error(),
+ ),
}
}
- if len(s.ScopeAttributes) != 0 {
- for idx, attr := range s.ScopeAttributes {
- if len(attr.Attributes) == 0 && attr.Regex == "" {
- allErrs = append(allErrs,
- field.Invalid(
- parent.Child("otlp", "scopeAttributes").Index(idx),
- []string{},
- lokiv1.ErrOTLPAttributesSpecInvalid.Error(),
- ),
- )
- }
+ errList := field.ErrorList{}
+ for _, tenant := range spec.Tenants.Authentication {
+ tenantName := tenant.TenantName
+ tenantLimits, ok := spec.Limits.Tenants[tenantName]
+ if !ok || tenantLimits.OTLP == nil {
+ // No tenant limits defined and no global stream labels -> error
+ errList = append(errList, field.Invalid(
+ field.NewPath("spec", "limits", "tenants", tenantName, "otlp"),
+ nil,
+ lokiv1.ErrOTLPTenantMissing.Error(),
+ ))
+
+ continue
}
- }
- if len(s.LogAttributes) != 0 {
- for idx, attr := range s.LogAttributes {
- if len(attr.Attributes) == 0 && attr.Regex == "" {
- allErrs = append(allErrs,
- field.Invalid(
- parent.Child("otlp", "logAttributes").Index(idx),
- []string{},
- lokiv1.ErrOTLPAttributesSpecInvalid.Error(),
- ),
- )
- }
+ if v.hasOTLPStreamLabel(tenantLimits.OTLP) {
+ continue
}
+
+ errList = append(errList, field.Invalid(
+ field.NewPath("spec", "limits", "tenants", tenantName, "otlp", "streamLabels", "resourceAttributes"),
+ nil,
+ lokiv1.ErrOTLPTenantNoStreamLabel.Error(),
+ ))
}
- return allErrs
+ return errList
+}
+
+func (v *LokiStackValidator) hasOTLPStreamLabel(otlp *lokiv1.OTLPSpec) bool {
+ if otlp == nil {
+ return false
+ }
+
+ if otlp.StreamLabels == nil {
+ return false
+ }
+
+ return len(otlp.StreamLabels.ResourceAttributes) > 0
}
func (v *LokiStackValidator) validateHashRingSpec(s lokiv1.LokiStackSpec) field.ErrorList {
diff --git a/operator/internal/validation/lokistack_test.go b/operator/internal/validation/lokistack_test.go
index 791447e013018..18434c5fa20b2 100644
--- a/operator/internal/validation/lokistack_test.go
+++ b/operator/internal/validation/lokistack_test.go
@@ -10,7 +10,7 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/validation/field"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/validation"
)
@@ -391,64 +391,60 @@ var ltt = []struct {
),
},
{
- desc: "enabling global limits OTLP IgnoreDefaults without resource attributes",
+ desc: "lokistack with custom OTLP configuration with a global stream label",
spec: lokiv1.LokiStack{
Spec: lokiv1.LokiStackSpec{
- Storage: lokiv1.ObjectStorageSpec{
- Schemas: []lokiv1.ObjectStorageSchema{
- {
- Version: lokiv1.ObjectStorageSchemaV13,
- EffectiveDate: "2020-10-11",
- },
- },
- },
Limits: &lokiv1.LimitsSpec{
Global: &lokiv1.LimitsTemplateSpec{
- OTLP: &lokiv1.GlobalOTLPSpec{
- OTLPSpec: lokiv1.OTLPSpec{
- ResourceAttributes: &lokiv1.OTLPResourceAttributesSpec{
- IgnoreDefaults: true,
+ OTLP: &lokiv1.OTLPSpec{
+ StreamLabels: &lokiv1.OTLPStreamLabelSpec{
+ ResourceAttributes: []lokiv1.OTLPAttributeReference{
+ {
+ Name: "global.stream.label",
+ },
},
},
},
},
},
- },
- },
- err: apierrors.NewInvalid(
- schema.GroupKind{Group: "loki.grafana.com", Kind: "LokiStack"},
- "testing-stack",
- field.ErrorList{
- field.Invalid(
- field.NewPath("spec", "limits", "global", "otlp", "resourceAttributes"),
- []lokiv1.OTLPAttributesSpec{},
- lokiv1.ErrOTLPResourceAttributesEmptyNotAllowed.Error(),
- ),
- },
- ),
- },
- {
- desc: "enabling global limits OTLP IgnoreDefaults without index label action for resource attributes",
- spec: lokiv1.LokiStack{
- Spec: lokiv1.LokiStackSpec{
Storage: lokiv1.ObjectStorageSpec{
Schemas: []lokiv1.ObjectStorageSchema{
{
Version: lokiv1.ObjectStorageSchemaV13,
- EffectiveDate: "2020-10-11",
+ EffectiveDate: "2024-10-22",
},
},
},
+ Tenants: &lokiv1.TenantsSpec{
+ Mode: lokiv1.Static,
+ },
+ },
+ },
+ err: nil,
+ },
+ {
+ desc: "lokistack with custom OTLP configuration with a global stream label and a tenant with no stream label",
+ spec: lokiv1.LokiStack{
+ Spec: lokiv1.LokiStackSpec{
Limits: &lokiv1.LimitsSpec{
Global: &lokiv1.LimitsTemplateSpec{
- OTLP: &lokiv1.GlobalOTLPSpec{
- OTLPSpec: lokiv1.OTLPSpec{
- ResourceAttributes: &lokiv1.OTLPResourceAttributesSpec{
- IgnoreDefaults: true,
- Attributes: []lokiv1.OTLPResourceAttributesConfigSpec{
+ OTLP: &lokiv1.OTLPSpec{
+ StreamLabels: &lokiv1.OTLPStreamLabelSpec{
+ ResourceAttributes: []lokiv1.OTLPAttributeReference{
+ {
+ Name: "global.stream.label",
+ },
+ },
+ },
+ },
+ },
+ Tenants: map[string]lokiv1.PerTenantLimitsTemplateSpec{
+ "test-tenant": {
+ OTLP: &lokiv1.OTLPSpec{
+ StructuredMetadata: &lokiv1.OTLPMetadataSpec{
+ ResourceAttributes: []lokiv1.OTLPAttributeReference{
{
- Action: lokiv1.OTLPAttributeActionStructuredMetadata,
- Attributes: []string{"test"},
+ Name: "custom.resource.attribute",
},
},
},
@@ -456,46 +452,38 @@ var ltt = []struct {
},
},
},
- },
- },
- err: apierrors.NewInvalid(
- schema.GroupKind{Group: "loki.grafana.com", Kind: "LokiStack"},
- "testing-stack",
- field.ErrorList{
- field.Invalid(
- field.NewPath("spec", "limits", "global", "otlp", "resourceAttributes"),
- []lokiv1.OTLPResourceAttributesConfigSpec{
+ Storage: lokiv1.ObjectStorageSpec{
+ Schemas: []lokiv1.ObjectStorageSchema{
{
- Action: lokiv1.OTLPAttributeActionStructuredMetadata,
- Attributes: []string{"test"},
+ Version: lokiv1.ObjectStorageSchemaV13,
+ EffectiveDate: "2024-10-22",
},
},
- lokiv1.ErrOTLPResourceAttributesIndexLabelActionMissing.Error(),
- ),
+ },
+ Tenants: &lokiv1.TenantsSpec{
+ Mode: lokiv1.Static,
+ Authentication: []lokiv1.AuthenticationSpec{
+ {
+ TenantName: "test-tenant",
+ },
+ },
+ },
},
- ),
+ },
+ err: nil,
},
{
- desc: "enabling global limits OTLP IgnoreDefaults with invalid resource attributes config",
+ desc: "lokistack with custom OTLP configuration with no global stream label and a tenant with a stream label",
spec: lokiv1.LokiStack{
Spec: lokiv1.LokiStackSpec{
- Storage: lokiv1.ObjectStorageSpec{
- Schemas: []lokiv1.ObjectStorageSchema{
- {
- Version: lokiv1.ObjectStorageSchemaV13,
- EffectiveDate: "2020-10-11",
- },
- },
- },
Limits: &lokiv1.LimitsSpec{
- Global: &lokiv1.LimitsTemplateSpec{
- OTLP: &lokiv1.GlobalOTLPSpec{
- OTLPSpec: lokiv1.OTLPSpec{
- ResourceAttributes: &lokiv1.OTLPResourceAttributesSpec{
- IgnoreDefaults: true,
- Attributes: []lokiv1.OTLPResourceAttributesConfigSpec{
+ Tenants: map[string]lokiv1.PerTenantLimitsTemplateSpec{
+ "test-tenant": {
+ OTLP: &lokiv1.OTLPSpec{
+ StreamLabels: &lokiv1.OTLPStreamLabelSpec{
+ ResourceAttributes: []lokiv1.OTLPAttributeReference{
{
- Action: lokiv1.OTLPAttributeActionStructuredMetadata,
+ Name: "tenant.stream.label",
},
},
},
@@ -503,99 +491,54 @@ var ltt = []struct {
},
},
},
- },
- },
- err: apierrors.NewInvalid(
- schema.GroupKind{Group: "loki.grafana.com", Kind: "LokiStack"},
- "testing-stack",
- field.ErrorList{
- field.Invalid(
- field.NewPath("spec", "limits", "global", "otlp", "resourceAttributes"),
- []lokiv1.OTLPResourceAttributesConfigSpec{
- {
- Action: lokiv1.OTLPAttributeActionStructuredMetadata,
- },
- },
- lokiv1.ErrOTLPResourceAttributesIndexLabelActionMissing.Error(),
- ),
- field.Invalid(
- field.NewPath("spec", "limits", "global", "otlp", "resourceAttributes").Index(0),
- []string{},
- lokiv1.ErrOTLPAttributesSpecInvalid.Error(),
- ),
- },
- ),
- },
- {
- desc: "enabling global limits OTLP with invalid resource attributes config",
- spec: lokiv1.LokiStack{
- Spec: lokiv1.LokiStackSpec{
Storage: lokiv1.ObjectStorageSpec{
Schemas: []lokiv1.ObjectStorageSchema{
{
Version: lokiv1.ObjectStorageSchemaV13,
- EffectiveDate: "2020-10-11",
+ EffectiveDate: "2024-10-22",
},
},
},
- Limits: &lokiv1.LimitsSpec{
- Global: &lokiv1.LimitsTemplateSpec{
- OTLP: &lokiv1.GlobalOTLPSpec{
- OTLPSpec: lokiv1.OTLPSpec{
- ResourceAttributes: &lokiv1.OTLPResourceAttributesSpec{
- IgnoreDefaults: true,
- Attributes: []lokiv1.OTLPResourceAttributesConfigSpec{
- {
- Action: lokiv1.OTLPAttributeActionIndexLabel,
- },
- },
- },
- },
+ Tenants: &lokiv1.TenantsSpec{
+ Mode: lokiv1.Static,
+ Authentication: []lokiv1.AuthenticationSpec{
+ {
+ TenantName: "test-tenant",
},
},
},
},
},
- err: apierrors.NewInvalid(
- schema.GroupKind{Group: "loki.grafana.com", Kind: "LokiStack"},
- "testing-stack",
- field.ErrorList{
- field.Invalid(
- field.NewPath("spec", "limits", "global", "otlp", "resourceAttributes").Index(0),
- []string{},
- lokiv1.ErrOTLPAttributesSpecInvalid.Error(),
- ),
- },
- ),
+ err: nil,
},
{
- desc: "invalid global OTLP scope attribute specs",
+ desc: "lokistack with custom OTLP configuration missing a global stream label",
spec: lokiv1.LokiStack{
Spec: lokiv1.LokiStackSpec{
- Storage: lokiv1.ObjectStorageSpec{
- Schemas: []lokiv1.ObjectStorageSchema{
- {
- Version: lokiv1.ObjectStorageSchemaV13,
- EffectiveDate: "2020-10-11",
- },
- },
- },
Limits: &lokiv1.LimitsSpec{
Global: &lokiv1.LimitsTemplateSpec{
- OTLP: &lokiv1.GlobalOTLPSpec{
- OTLPSpec: lokiv1.OTLPSpec{
- ScopeAttributes: []lokiv1.OTLPAttributesSpec{
+ OTLP: &lokiv1.OTLPSpec{
+ StructuredMetadata: &lokiv1.OTLPMetadataSpec{
+ ResourceAttributes: []lokiv1.OTLPAttributeReference{
{
- Action: lokiv1.OTLPAttributeActionIndexLabel,
- },
- {
- Action: lokiv1.OTLPAttributeActionStructuredMetadata,
+ Name: "custom.resource.attribute",
},
},
},
},
},
},
+ Storage: lokiv1.ObjectStorageSpec{
+ Schemas: []lokiv1.ObjectStorageSchema{
+ {
+ Version: lokiv1.ObjectStorageSchemaV13,
+ EffectiveDate: "2024-10-22",
+ },
+ },
+ },
+ Tenants: &lokiv1.TenantsSpec{
+ Mode: lokiv1.Static,
+ },
},
},
err: apierrors.NewInvalid(
@@ -603,43 +546,48 @@ var ltt = []struct {
"testing-stack",
field.ErrorList{
field.Invalid(
- field.NewPath("spec", "limits", "global", "otlp", "scopeAttributes").Index(0),
- []string{},
- lokiv1.ErrOTLPAttributesSpecInvalid.Error(),
- ),
- field.Invalid(
- field.NewPath("spec", "limits", "global", "otlp", "scopeAttributes").Index(1),
- []string{},
- lokiv1.ErrOTLPAttributesSpecInvalid.Error(),
+ field.NewPath("spec", "limits", "global", "otlp", "streamLabels", "resourceAttributes"),
+ nil,
+ lokiv1.ErrOTLPGlobalNoStreamLabel.Error(),
),
},
),
},
{
- desc: "invalid global OTLP log attribute specs",
+ desc: "lokistack with custom OTLP configuration missing a tenant",
spec: lokiv1.LokiStack{
Spec: lokiv1.LokiStackSpec{
+ Limits: &lokiv1.LimitsSpec{
+ Tenants: map[string]lokiv1.PerTenantLimitsTemplateSpec{
+ "test-tenant": {
+ OTLP: &lokiv1.OTLPSpec{
+ StreamLabels: &lokiv1.OTLPStreamLabelSpec{
+ ResourceAttributes: []lokiv1.OTLPAttributeReference{
+ {
+ Name: "tenant.stream.label",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
Storage: lokiv1.ObjectStorageSpec{
Schemas: []lokiv1.ObjectStorageSchema{
{
Version: lokiv1.ObjectStorageSchemaV13,
- EffectiveDate: "2020-10-11",
+ EffectiveDate: "2024-10-22",
},
},
},
- Limits: &lokiv1.LimitsSpec{
- Global: &lokiv1.LimitsTemplateSpec{
- OTLP: &lokiv1.GlobalOTLPSpec{
- OTLPSpec: lokiv1.OTLPSpec{
- LogAttributes: []lokiv1.OTLPAttributesSpec{
- {
- Action: lokiv1.OTLPAttributeActionIndexLabel,
- },
- {
- Action: lokiv1.OTLPAttributeActionStructuredMetadata,
- },
- },
- },
+ Tenants: &lokiv1.TenantsSpec{
+ Mode: lokiv1.Static,
+ Authentication: []lokiv1.AuthenticationSpec{
+ {
+ TenantName: "test-tenant",
+ },
+ {
+ TenantName: "second-tenant",
},
},
},
@@ -650,38 +598,45 @@ var ltt = []struct {
"testing-stack",
field.ErrorList{
field.Invalid(
- field.NewPath("spec", "limits", "global", "otlp", "logAttributes").Index(0),
- []string{},
- lokiv1.ErrOTLPAttributesSpecInvalid.Error(),
- ),
- field.Invalid(
- field.NewPath("spec", "limits", "global", "otlp", "logAttributes").Index(1),
- []string{},
- lokiv1.ErrOTLPAttributesSpecInvalid.Error(),
+ field.NewPath("spec", "limits", "tenants", "second-tenant", "otlp"),
+ nil,
+ lokiv1.ErrOTLPTenantMissing.Error(),
),
},
),
},
{
- desc: "enabling per-tenant limits OTLP IgnoreDefaults without resource attributes",
+ desc: "lokistack with custom OTLP configuration with a tenant without stream label",
spec: lokiv1.LokiStack{
Spec: lokiv1.LokiStackSpec{
+ Limits: &lokiv1.LimitsSpec{
+ Tenants: map[string]lokiv1.PerTenantLimitsTemplateSpec{
+ "test-tenant": {
+ OTLP: &lokiv1.OTLPSpec{
+ StructuredMetadata: &lokiv1.OTLPMetadataSpec{
+ ResourceAttributes: []lokiv1.OTLPAttributeReference{
+ {
+ Name: "tenant.resource.attribute",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
Storage: lokiv1.ObjectStorageSpec{
Schemas: []lokiv1.ObjectStorageSchema{
{
Version: lokiv1.ObjectStorageSchemaV13,
- EffectiveDate: "2020-10-11",
+ EffectiveDate: "2024-10-22",
},
},
},
- Limits: &lokiv1.LimitsSpec{
- Tenants: map[string]lokiv1.PerTenantLimitsTemplateSpec{
- "tenant-a": {
- OTLP: &lokiv1.OTLPSpec{
- ResourceAttributes: &lokiv1.OTLPResourceAttributesSpec{
- IgnoreDefaults: true,
- },
- },
+ Tenants: &lokiv1.TenantsSpec{
+ Mode: lokiv1.Static,
+ Authentication: []lokiv1.AuthenticationSpec{
+ {
+ TenantName: "test-tenant",
},
},
},
@@ -692,9 +647,9 @@ var ltt = []struct {
"testing-stack",
field.ErrorList{
field.Invalid(
- field.NewPath("spec", "limits", "tenants").Key("tenant-a").Child("otlp", "resourceAttributes"),
- []lokiv1.OTLPAttributesSpec{},
- lokiv1.ErrOTLPResourceAttributesEmptyNotAllowed.Error(),
+ field.NewPath("spec", "limits", "tenants", "test-tenant", "otlp", "streamLabels", "resourceAttributes"),
+ nil,
+ lokiv1.ErrOTLPTenantNoStreamLabel.Error(),
),
},
),
diff --git a/operator/internal/validation/openshift/alertingrule.go b/operator/internal/validation/openshift/alertingrule.go
index cb22f1785895e..e9dc191597e0b 100644
--- a/operator/internal/validation/openshift/alertingrule.go
+++ b/operator/internal/validation/openshift/alertingrule.go
@@ -7,7 +7,7 @@ import (
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/utils/strings/slices"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
)
// AlertingRuleValidator does extended-validation of AlertingRule resources for Openshift-based deployments.
diff --git a/operator/internal/validation/openshift/alertingrule_test.go b/operator/internal/validation/openshift/alertingrule_test.go
index c613de51df0fa..1644f7e10bd3e 100644
--- a/operator/internal/validation/openshift/alertingrule_test.go
+++ b/operator/internal/validation/openshift/alertingrule_test.go
@@ -8,7 +8,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/validation/field"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
)
func TestAlertingRuleValidator(t *testing.T) {
diff --git a/operator/internal/validation/openshift/common.go b/operator/internal/validation/openshift/common.go
index a2b28d5f5f0aa..a41161cbc54f6 100644
--- a/operator/internal/validation/openshift/common.go
+++ b/operator/internal/validation/openshift/common.go
@@ -9,7 +9,7 @@ import (
"github.com/prometheus/prometheus/model/labels"
"k8s.io/apimachinery/pkg/util/validation/field"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
)
const (
diff --git a/operator/internal/validation/openshift/recordingrule.go b/operator/internal/validation/openshift/recordingrule.go
index 6e0d9106c8c35..bb6851058fa6d 100644
--- a/operator/internal/validation/openshift/recordingrule.go
+++ b/operator/internal/validation/openshift/recordingrule.go
@@ -7,7 +7,7 @@ import (
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/utils/strings/slices"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
)
// RecordingRuleValidator does extended-validation of RecordingRule resources for Openshift-based deployments.
diff --git a/operator/internal/validation/openshift/recordingrule_test.go b/operator/internal/validation/openshift/recordingrule_test.go
index 99540f2937c42..e51ab748c82cf 100644
--- a/operator/internal/validation/openshift/recordingrule_test.go
+++ b/operator/internal/validation/openshift/recordingrule_test.go
@@ -8,7 +8,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/validation/field"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
)
func TestRecordingRuleValidator(t *testing.T) {
diff --git a/operator/internal/validation/recordingrule.go b/operator/internal/validation/recordingrule.go
index a84011efd2de8..a5b2a803af23e 100644
--- a/operator/internal/validation/recordingrule.go
+++ b/operator/internal/validation/recordingrule.go
@@ -13,7 +13,7 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
)
var _ admission.CustomValidator = &RecordingRuleValidator{}
diff --git a/operator/internal/validation/recordingrule_test.go b/operator/internal/validation/recordingrule_test.go
index 6b47ef0d6d8d7..fd79def757ed6 100644
--- a/operator/internal/validation/recordingrule_test.go
+++ b/operator/internal/validation/recordingrule_test.go
@@ -10,7 +10,7 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/validation/field"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/validation"
)
diff --git a/operator/internal/validation/rulerconfig.go b/operator/internal/validation/rulerconfig.go
index 9e4ab0b0b8297..3a9c51282887c 100644
--- a/operator/internal/validation/rulerconfig.go
+++ b/operator/internal/validation/rulerconfig.go
@@ -11,7 +11,7 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
)
var _ admission.CustomValidator = &RulerConfigValidator{}
diff --git a/operator/internal/validation/rulerconfig_test.go b/operator/internal/validation/rulerconfig_test.go
index 4ed8757cabdbd..32097460dae8e 100644
--- a/operator/internal/validation/rulerconfig_test.go
+++ b/operator/internal/validation/rulerconfig_test.go
@@ -11,7 +11,7 @@ import (
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/utils/ptr"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ lokiv1 "github.com/grafana/loki/operator/api/loki/v1"
"github.com/grafana/loki/operator/internal/validation"
)
diff --git a/operator/jsonnet/config.libsonnet b/operator/jsonnet/config.libsonnet
index b557030a2f70e..3e8a0172dda3b 100644
--- a/operator/jsonnet/config.libsonnet
+++ b/operator/jsonnet/config.libsonnet
@@ -188,7 +188,7 @@ local utils = (import 'github.com/grafana/jsonnet-libs/mixin-utils/utils.libsonn
'loki-reads.json'+: {
// We drop both BigTable and BlotDB dashboards as they have been
// replaced by the Index dashboards
- local dropList = ['BigTable', 'Ingester - Zone Aware', 'BoltDB Shipper', 'Bloom Gateway'],
+ local dropList = ['BigTable', 'Ingester - Zone Aware', 'BoltDB Index', 'Bloom Gateway'],
uid: '62q5jjYwhVSaz4Mcrm8tV3My3gcKED',
@@ -233,7 +233,7 @@ local utils = (import 'github.com/grafana/jsonnet-libs/mixin-utils/utils.libsonn
},
},
'loki-writes.json'+: {
- local dropList = ['Ingester - Zone Aware', 'BoltDB Shipper'],
+ local dropList = ['Ingester - Zone Aware', 'BoltDB Index'],
uid: 'F6nRYKuXmFVpVSFQmXr7cgXy5j7UNr',
title: 'OpenShift Logging / LokiStack / Writes',
tags: defaultLokiTags(super.tags),
diff --git a/operator/jsonnet/jsonnetfile.json b/operator/jsonnet/jsonnetfile.json
index a2ac102b07587..6456bf5997c11 100644
--- a/operator/jsonnet/jsonnetfile.json
+++ b/operator/jsonnet/jsonnetfile.json
@@ -8,7 +8,7 @@
"subdir": "production/loki-mixin"
}
},
- "version": "v3.2.0"
+ "version": "v3.2.1"
}
],
"legacyImports": true
diff --git a/operator/jsonnet/jsonnetfile.lock.json b/operator/jsonnet/jsonnetfile.lock.json
index 40bf4f23a2544..2ba0c39bea391 100644
--- a/operator/jsonnet/jsonnetfile.lock.json
+++ b/operator/jsonnet/jsonnetfile.lock.json
@@ -38,7 +38,7 @@
"subdir": "production/loki-mixin"
}
},
- "version": "659f5421dde1dc4b27d6a0afd1a568673f50dfcc",
+ "version": "3c386cc5d13629a74cddb43c429ec290ba2e4a0a",
"sum": "mVKuwcuL1Wm+JMgt2MiwOAtKkmuClzE75+liMe0AGek="
},
{
diff --git a/operator/netlify.toml b/operator/netlify.toml
index 16393ca5b7dd7..aa313b64d45c0 100644
--- a/operator/netlify.toml
+++ b/operator/netlify.toml
@@ -4,9 +4,9 @@
[build.environment]
# HUGO_VERSION = "..." is set by bingo which allows reproducible local environment.
- NODE_VERSION = "15.5.1"
- NPM_VERSION = "7.3.0"
- GO_VERSION = "1.20.1"
+ NODE_VERSION = "22.10.0"
+ NPM_VERSION = "10.9.0"
+ GO_VERSION = "1.22.8"
[context.production]
command = "(env && make web) || (sleep 30; false)"
diff --git a/pkg/bloombuild/builder/batch.go b/pkg/bloombuild/builder/batch.go
index d86111d2924a7..cb39a6df831f8 100644
--- a/pkg/bloombuild/builder/batch.go
+++ b/pkg/bloombuild/builder/batch.go
@@ -248,7 +248,7 @@ func (i *blockLoadingIter) loadNext() bool {
blockRefs := i.overlapping.At()
loader := newBatchedBlockLoader(i.ctx, i.fetcher, blockRefs, i.batchSize)
- filtered := iter.NewFilterIter[*bloomshipper.CloseableBlockQuerier](loader, i.filter)
+ filtered := iter.NewFilterIter(loader, i.filter)
iters := make([]iter.PeekIterator[*v1.SeriesWithBlooms], 0, len(blockRefs))
for filtered.Next() {
@@ -279,7 +279,7 @@ func (i *blockLoadingIter) loadNext() bool {
// two overlapping blocks can conceivably have the same series, so we need to dedupe,
// preferring the one with the most chunks already indexed since we'll have
// to add fewer chunks to the bloom
- i.iter = iter.NewDedupingIter[*v1.SeriesWithBlooms, *v1.SeriesWithBlooms](
+ i.iter = iter.NewDedupingIter(
func(a, b *v1.SeriesWithBlooms) bool {
return a.Series.Fingerprint == b.Series.Fingerprint
},
@@ -346,7 +346,7 @@ func overlappingBlocksIter(inputs []bloomshipper.BlockRef) iter.Iterator[[]bloom
// can we assume sorted blocks?
peekIter := iter.NewPeekIter(iter.NewSliceIter(inputs))
- return iter.NewDedupingIter[bloomshipper.BlockRef, []bloomshipper.BlockRef](
+ return iter.NewDedupingIter(
func(a bloomshipper.BlockRef, b []bloomshipper.BlockRef) bool {
minFp := b[0].Bounds.Min
maxFp := slices.MaxFunc(b, func(a, b bloomshipper.BlockRef) int { return int(a.Bounds.Max - b.Bounds.Max) }).Bounds.Max
diff --git a/pkg/bloombuild/builder/batch_test.go b/pkg/bloombuild/builder/batch_test.go
index 608ab0807c9ec..cedba1480e2f6 100644
--- a/pkg/bloombuild/builder/batch_test.go
+++ b/pkg/bloombuild/builder/batch_test.go
@@ -120,7 +120,7 @@ func TestBatchedLoader(t *testing.T) {
)
}
- loader := newBatchedLoader[int, int, int](
+ loader := newBatchedLoader(
tc.ctx,
fetchers,
tc.inputs,
@@ -128,7 +128,7 @@ func TestBatchedLoader(t *testing.T) {
tc.batchSize,
)
- got, err := v2.Collect[int](loader)
+ got, err := v2.Collect(loader)
if tc.err {
require.Error(t, err)
return
diff --git a/pkg/bloombuild/builder/builder.go b/pkg/bloombuild/builder/builder.go
index 63950d7eadcbd..7f0ac4b65b4ce 100644
--- a/pkg/bloombuild/builder/builder.go
+++ b/pkg/bloombuild/builder/builder.go
@@ -46,7 +46,6 @@ type Builder struct {
metrics *Metrics
logger log.Logger
- tsdbStore common.TSDBStore
bloomStore bloomshipper.Store
chunkLoader ChunkLoader
@@ -60,9 +59,9 @@ type Builder struct {
func New(
cfg Config,
limits Limits,
- schemaCfg config.SchemaConfig,
- storeCfg storage.Config,
- storageMetrics storage.ClientMetrics,
+ _ config.SchemaConfig,
+ _ storage.Config,
+ _ storage.ClientMetrics,
fetcherProvider stores.ChunkFetcherProvider,
bloomStore bloomshipper.Store,
logger log.Logger,
@@ -74,18 +73,12 @@ func New(
builderID := uuid.NewString()
logger = log.With(logger, "builder_id", builderID)
- tsdbStore, err := common.NewTSDBStores(schemaCfg, storeCfg, storageMetrics, logger)
- if err != nil {
- return nil, fmt.Errorf("error creating TSDB store: %w", err)
- }
-
metrics := NewMetrics(r)
b := &Builder{
ID: builderID,
cfg: cfg,
limits: limits,
metrics: metrics,
- tsdbStore: tsdbStore,
bloomStore: bloomStore,
chunkLoader: NewStoreChunkLoader(fetcherProvider, metrics),
logger: logger,
@@ -386,7 +379,7 @@ func (b *Builder) processTask(
// Blocks are built consuming the series iterator. For observability, we wrap the series iterator
// with a counter iterator to count the number of times Next() is called on it.
// This is used to observe the number of series that are being processed.
- seriesItrWithCounter := iter.NewCounterIter[*v1.Series](seriesItr)
+ seriesItrWithCounter := iter.NewCounterIter(seriesItr)
gen := NewSimpleBloomGenerator(
tenant,
@@ -416,7 +409,7 @@ func (b *Builder) processTask(
return nil, fmt.Errorf("failed to build block: %w", err)
}
- logger := log.With(logger, "block", built.BlockRef.String())
+ logger := log.With(logger, "block", built.String())
if err := client.PutBlock(
ctx,
@@ -461,7 +454,7 @@ func (b *Builder) processTask(
}
meta.MetaRef = ref
- logger = log.With(logger, "meta", meta.MetaRef.String())
+ logger = log.With(logger, "meta", meta.String())
if err := client.PutMeta(ctx, meta); err != nil {
level.Error(logger).Log("msg", "failed to write meta", "err", err)
@@ -490,7 +483,7 @@ func (b *Builder) loadWorkForGap(
table config.DayTable,
gap protos.Gap,
) (iter.Iterator[*v1.Series], iter.CloseResetIterator[*v1.SeriesWithBlooms], error) {
- seriesItr := iter.NewCancelableIter[*v1.Series](ctx, iter.NewSliceIter[*v1.Series](gap.Series))
+ seriesItr := iter.NewCancelableIter(ctx, iter.NewSliceIter(gap.Series))
// load a blocks iterator for the gap
fetcher, err := b.bloomStore.Fetcher(table.ModelTime())
diff --git a/pkg/bloombuild/builder/spec.go b/pkg/bloombuild/builder/spec.go
index 180c2fc32cb00..f7c147fb0a2f8 100644
--- a/pkg/bloombuild/builder/spec.go
+++ b/pkg/bloombuild/builder/spec.go
@@ -3,7 +3,6 @@ package builder
import (
"context"
"fmt"
- "io"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
@@ -17,29 +16,8 @@ import (
"github.com/grafana/loki/v3/pkg/storage/chunk/fetcher"
"github.com/grafana/loki/v3/pkg/storage/stores"
"github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper"
- "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb"
)
-// inclusive range
-type Keyspace struct {
- min, max model.Fingerprint
-}
-
-func (k Keyspace) Cmp(other Keyspace) v1.BoundsCheck {
- if other.max < k.min {
- return v1.Before
- } else if other.min > k.max {
- return v1.After
- }
- return v1.Overlap
-}
-
-// Store is likely bound within. This allows specifying impls like ShardedStore
-// to only request the shard-range needed from the existing store.
-type BloomGenerator interface {
- Generate(ctx context.Context) (skippedBlocks []v1.BlockMetadata, toClose []io.Closer, results iter.Iterator[*v1.Block], err error)
-}
-
// Simple implementation of a BloomGenerator.
type SimpleBloomGenerator struct {
userID string
@@ -159,7 +137,7 @@ func (s *SimpleBloomGenerator) Generate(ctx context.Context) *LazyBlockBuilderIt
)
}
- return NewLazyBlockBuilderIterator(ctx, s.opts, s.metrics, s.populator(ctx), s.writerReaderFunc, series, s.blocksIter)
+ return NewLazyBlockBuilderIterator(ctx, s.opts, s.metrics, s.logger, s.populator(ctx), s.writerReaderFunc, series, s.blocksIter)
}
// LazyBlockBuilderIterator is a lazy iterator over blocks that builds
@@ -168,6 +146,7 @@ type LazyBlockBuilderIterator struct {
ctx context.Context
opts v1.BlockOptions
metrics *v1.Metrics
+ logger log.Logger
populate v1.BloomPopulatorFunc
writerReaderFunc func() (v1.BlockWriter, v1.BlockReader)
series iter.PeekIterator[*v1.Series]
@@ -182,6 +161,7 @@ func NewLazyBlockBuilderIterator(
ctx context.Context,
opts v1.BlockOptions,
metrics *v1.Metrics,
+ logger log.Logger,
populate v1.BloomPopulatorFunc,
writerReaderFunc func() (v1.BlockWriter, v1.BlockReader),
series iter.PeekIterator[*v1.Series],
@@ -191,6 +171,7 @@ func NewLazyBlockBuilderIterator(
ctx: ctx,
opts: opts,
metrics: metrics,
+ logger: logger,
populate: populate,
writerReaderFunc: writerReaderFunc,
series: series,
@@ -218,7 +199,7 @@ func (b *LazyBlockBuilderIterator) Next() bool {
return false
}
- mergeBuilder := v1.NewMergeBuilder(b.blocks, b.series, b.populate, b.metrics)
+ mergeBuilder := v1.NewMergeBuilder(b.blocks, b.series, b.populate, b.metrics, b.logger)
writer, reader := b.writerReaderFunc()
blockBuilder, err := v1.NewBlockBuilder(b.opts, writer)
if err != nil {
@@ -247,12 +228,6 @@ func (b *LazyBlockBuilderIterator) Err() error {
return b.err
}
-// IndexLoader loads an index. This helps us do things like
-// load TSDBs for a specific period excluding multitenant (pre-compacted) indices
-type indexLoader interface {
- Index() (tsdb.Index, error)
-}
-
// ChunkItersByFingerprint models the chunks belonging to a fingerprint
type ChunkItersByFingerprint struct {
fp model.Fingerprint
diff --git a/pkg/bloombuild/builder/spec_test.go b/pkg/bloombuild/builder/spec_test.go
index 23afa58754743..be14c91db36a1 100644
--- a/pkg/bloombuild/builder/spec_test.go
+++ b/pkg/bloombuild/builder/spec_test.go
@@ -48,7 +48,7 @@ func blocksFromSchemaWithRange(t *testing.T, n int, options v1.BlockOptions, fro
minIdx, maxIdx := i*seriesPerBlock, (i+1)*seriesPerBlock
- itr := v2.NewSliceIter[v1.SeriesWithBlooms](data[minIdx:maxIdx])
+ itr := v2.NewSliceIter(data[minIdx:maxIdx])
_, err = builder.BuildFrom(itr)
require.Nil(t, err)
@@ -134,8 +134,8 @@ func TestSimpleBloomGenerator(t *testing.T) {
} {
t.Run(fmt.Sprintf("%s/%s", tc.desc, enc), func(t *testing.T) {
sourceBlocks, data, refs := blocksFromSchemaWithRange(t, 2, tc.fromSchema, 0x00000, 0x6ffff)
- storeItr := v2.NewMapIter[v1.SeriesWithBlooms, *v1.Series](
- v2.NewSliceIter[v1.SeriesWithBlooms](data),
+ storeItr := v2.NewMapIter(
+ v2.NewSliceIter(data),
func(swb v1.SeriesWithBlooms) *v1.Series {
return &swb.Series.Series
},
diff --git a/pkg/bloombuild/common/tsdb.go b/pkg/bloombuild/common/tsdb.go
index e45ff4b153c7c..a58b7cd6130f9 100644
--- a/pkg/bloombuild/common/tsdb.go
+++ b/pkg/bloombuild/common/tsdb.go
@@ -163,7 +163,7 @@ func NewTSDBSeriesIter(ctx context.Context, user string, f sharding.ForSeries, b
case <-ctx.Done():
return iter.NewEmptyIter[*v1.Series](), ctx.Err()
default:
- return iter.NewCancelableIter[*v1.Series](ctx, iter.NewSliceIter[*v1.Series](series)), nil
+ return iter.NewCancelableIter(ctx, iter.NewSliceIter(series)), nil
}
}
@@ -173,6 +173,7 @@ type TSDBStores struct {
}
func NewTSDBStores(
+ component string,
schemaCfg config.SchemaConfig,
storeCfg baseStore.Config,
clientMetrics baseStore.ClientMetrics,
@@ -185,8 +186,7 @@ func NewTSDBStores(
for i, cfg := range schemaCfg.Configs {
if cfg.IndexType == types.TSDBType {
-
- c, err := baseStore.NewObjectClient(cfg.ObjectType, storeCfg, clientMetrics)
+ c, err := baseStore.NewObjectClient(cfg.ObjectType, component, storeCfg, clientMetrics)
if err != nil {
return nil, errors.Wrap(err, "failed to create object client")
}
diff --git a/pkg/bloombuild/common/tsdb_test.go b/pkg/bloombuild/common/tsdb_test.go
index 70ee440551e42..b2df7982f4382 100644
--- a/pkg/bloombuild/common/tsdb_test.go
+++ b/pkg/bloombuild/common/tsdb_test.go
@@ -66,7 +66,7 @@ func TestTSDBSeriesIter(t *testing.T) {
itr, err := NewTSDBSeriesIter(context.Background(), "", forSeriesTestImpl(input), v1.NewBounds(0, math.MaxUint64))
require.NoError(t, err)
- v1.EqualIterators[*v1.Series](
+ v1.EqualIterators(
t,
func(a, b *v1.Series) {
require.Equal(t, a, b)
diff --git a/pkg/bloombuild/planner/config.go b/pkg/bloombuild/planner/config.go
index 40ec5707ef715..cfbccd84322d1 100644
--- a/pkg/bloombuild/planner/config.go
+++ b/pkg/bloombuild/planner/config.go
@@ -4,6 +4,8 @@ import (
"flag"
"fmt"
"time"
+
+ "github.com/grafana/loki/v3/pkg/bloombuild/planner/strategies"
)
// Config configures the bloom-planner component.
@@ -44,8 +46,8 @@ func (cfg *Config) Validate() error {
type Limits interface {
RetentionLimits
+ strategies.Limits
BloomCreationEnabled(tenantID string) bool
- BloomSplitSeriesKeyspaceBy(tenantID string) int
BloomBuildMaxBuilders(tenantID string) int
BuilderResponseTimeout(tenantID string) time.Duration
BloomTaskMaxRetries(tenantID string) int
diff --git a/pkg/bloombuild/planner/metrics.go b/pkg/bloombuild/planner/metrics.go
index 3523135780e55..936515ad736f3 100644
--- a/pkg/bloombuild/planner/metrics.go
+++ b/pkg/bloombuild/planner/metrics.go
@@ -5,8 +5,6 @@ import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
-
- "github.com/grafana/loki/v3/pkg/queue"
)
const (
@@ -211,7 +209,3 @@ func NewMetrics(
}),
}
}
-
-func NewQueueMetrics(r prometheus.Registerer) *queue.Metrics {
- return queue.NewMetrics(r, metricsNamespace, metricsSubsystem)
-}
diff --git a/pkg/bloombuild/planner/planner.go b/pkg/bloombuild/planner/planner.go
index f66748f1832b8..33f0bf64c833b 100644
--- a/pkg/bloombuild/planner/planner.go
+++ b/pkg/bloombuild/planner/planner.go
@@ -17,6 +17,7 @@ import (
"go.uber.org/atomic"
"github.com/grafana/loki/v3/pkg/bloombuild/common"
+ "github.com/grafana/loki/v3/pkg/bloombuild/planner/strategies"
"github.com/grafana/loki/v3/pkg/bloombuild/protos"
iter "github.com/grafana/loki/v3/pkg/iter/v2"
"github.com/grafana/loki/v3/pkg/queue"
@@ -75,13 +76,13 @@ func New(
) (*Planner, error) {
utillog.WarnExperimentalUse("Bloom Planner", logger)
- tsdbStore, err := common.NewTSDBStores(schemaCfg, storeCfg, storageMetrics, logger)
+ tsdbStore, err := common.NewTSDBStores("bloom-planner", schemaCfg, storeCfg, storageMetrics, logger)
if err != nil {
return nil, fmt.Errorf("error creating TSDB store: %w", err)
}
// Queue to manage tasks
- queueMetrics := NewQueueMetrics(r)
+ queueMetrics := queue.NewMetrics(r, metricsNamespace, metricsSubsystem)
tasksQueue := queue.NewRequestQueue(cfg.MaxQueuedTasksPerTenant, 0, NewQueueLimits(limits), queueMetrics)
// Clean metrics for inactive users: do not have added tasks to the queue in the last 1 hour
@@ -254,7 +255,7 @@ func (p *Planner) runOne(ctx context.Context) error {
tables := p.tables(time.Now())
level.Debug(p.logger).Log("msg", "loaded tables", "tables", tables.TotalDays())
- work, err := p.loadTenantWork(ctx, tables)
+ tenantTables, err := p.loadTenantTables(ctx, tables)
if err != nil {
return fmt.Errorf("error loading work: %w", err)
}
@@ -265,20 +266,20 @@ func (p *Planner) runOne(ctx context.Context) error {
tasksResultForTenantTable := make(map[tenantTable]tenantTableTaskResults)
var totalTasks int
- for table, tenants := range work {
- for tenant, ownershipRanges := range tenants {
+ for table, tenants := range tenantTables {
+ for _, tenant := range tenants {
logger := log.With(p.logger, "tenant", tenant, "table", table.Addr())
+
tt := tenantTable{
tenant: tenant,
table: table,
}
- tasks, existingMetas, err := p.computeTasks(ctx, table, tenant, ownershipRanges)
+ tasks, existingMetas, err := p.computeTasks(ctx, table, tenant)
if err != nil {
- level.Error(logger).Log("msg", "error computing tasks", "err", err)
+ level.Error(logger).Log("msg", "failed to compute tasks", "err", err)
continue
}
- level.Debug(logger).Log("msg", "computed tasks", "tasks", len(tasks), "existingMetas", len(existingMetas))
var tenantTableEnqueuedTasks int
resultsCh := make(chan *protos.TaskResult, len(tasks))
@@ -367,16 +368,19 @@ func (p *Planner) runOne(ctx context.Context) error {
return nil
}
-// computeTasks computes the tasks for a given table and tenant and ownership range.
-// It returns the tasks to be executed and the metas that are existing relevant for the ownership range.
+// computeTasks computes the tasks for a given table and tenant.
+// It returns the tasks to be executed and the existing metas.
func (p *Planner) computeTasks(
ctx context.Context,
table config.DayTable,
tenant string,
- ownershipRanges []v1.FingerprintBounds,
) ([]*protos.Task, []bloomshipper.Meta, error) {
- var tasks []*protos.Task
- logger := log.With(p.logger, "table", table.Addr(), "tenant", tenant)
+ strategy, err := strategies.NewStrategy(tenant, p.limits, p.logger)
+ if err != nil {
+ return nil, nil, fmt.Errorf("error creating strategy: %w", err)
+ }
+
+ logger := log.With(p.logger, "table", table.Addr(), "tenant", tenant, "strategy", strategy.Name())
// Fetch source metas to be used in both build and cleanup of out-of-date metas+blooms
metas, err := p.bloomStore.FetchMetas(
@@ -421,24 +425,12 @@ func (p *Planner) computeTasks(
}
}()
- for _, ownershipRange := range ownershipRanges {
- logger := log.With(logger, "ownership", ownershipRange.String())
-
- // Filter only the metas that overlap in the ownership range
- metasInBounds := bloomshipper.FilterMetasOverlappingBounds(metas, ownershipRange)
-
- // Find gaps in the TSDBs for this tenant/table
- gaps, err := p.findOutdatedGaps(ctx, tenant, openTSDBs, ownershipRange, metasInBounds, logger)
- if err != nil {
- level.Error(logger).Log("msg", "failed to find outdated gaps", "err", err)
- continue
- }
-
- for _, gap := range gaps {
- tasks = append(tasks, protos.NewTask(table, tenant, ownershipRange, gap.tsdb, gap.gaps))
- }
+ tasks, err := strategy.Plan(ctx, table, tenant, openTSDBs, metas)
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to plan tasks: %w", err)
}
+ level.Debug(logger).Log("msg", "computed tasks", "tasks", len(tasks), "existingMetas", len(metas))
return tasks, metas, nil
}
@@ -598,14 +590,14 @@ func (p *Planner) deleteOutdatedMetasAndBlocks(
err = client.DeleteMetas(ctx, []bloomshipper.MetaRef{meta.MetaRef})
if err != nil {
if client.IsObjectNotFoundErr(err) {
- level.Debug(logger).Log("msg", "meta not found while attempting delete, continuing", "meta", meta.MetaRef.String())
+ level.Debug(logger).Log("msg", "meta not found while attempting delete, continuing", "meta", meta.String())
} else {
- level.Error(logger).Log("msg", "failed to delete meta", "err", err, "meta", meta.MetaRef.String())
+ level.Error(logger).Log("msg", "failed to delete meta", "err", err, "meta", meta.String())
return nil, errors.Wrap(err, "failed to delete meta")
}
}
deletedMetas++
- level.Debug(logger).Log("msg", "removed outdated meta", "meta", meta.MetaRef.String())
+ level.Debug(logger).Log("msg", "removed outdated meta", "meta", meta.String())
}
level.Debug(logger).Log(
@@ -649,15 +641,12 @@ func (p *Planner) tables(ts time.Time) *dayRangeIterator {
return newDayRangeIterator(fromDay, throughDay, p.schemaCfg)
}
-type work map[config.DayTable]map[string][]v1.FingerprintBounds
-
-// loadTenantWork loads the work for each tenant and table tuple.
-// work is the list of fingerprint ranges that need to be indexed in bloom filters.
-func (p *Planner) loadTenantWork(
+// loadTenantTables loads all tenants with bloom build enabled for each table.
+func (p *Planner) loadTenantTables(
ctx context.Context,
tables *dayRangeIterator,
-) (work, error) {
- tenantTableWork := make(map[config.DayTable]map[string][]v1.FingerprintBounds, tables.TotalDays())
+) (map[config.DayTable][]string, error) {
+ tenantTables := make(map[config.DayTable][]string, tables.TotalDays())
for tables.Next() && tables.Err() == nil && ctx.Err() == nil {
table := tables.At()
@@ -670,8 +659,8 @@ func (p *Planner) loadTenantWork(
level.Debug(p.logger).Log("msg", "loaded tenants", "table", table, "tenants", tenants.Remaining())
// If this is the first this we see this table, initialize the map
- if tenantTableWork[table] == nil {
- tenantTableWork[table] = make(map[string][]v1.FingerprintBounds, tenants.Remaining())
+ if tenantTables[table] == nil {
+ tenantTables[table] = make([]string, tenants.Remaining())
}
for tenants.Next() && tenants.Err() == nil && ctx.Err() == nil {
@@ -683,11 +672,6 @@ func (p *Planner) loadTenantWork(
continue
}
- splitFactor := p.limits.BloomSplitSeriesKeyspaceBy(tenant)
- bounds := SplitFingerprintKeyspaceByFactor(splitFactor)
-
- tenantTableWork[table][tenant] = bounds
-
// Reset progress tracking metrics for this tenant
// NOTE(salvacorts): We will reset them multiple times for the same tenant, for each table, but it's not a big deal.
// Alternatively, we can use a Counter instead of a Gauge, but I think a Gauge is easier to reason about.
@@ -695,7 +679,7 @@ func (p *Planner) loadTenantWork(
p.metrics.tenantTasksCompleted.WithLabelValues(tenant, statusSuccess).Set(0)
p.metrics.tenantTasksCompleted.WithLabelValues(tenant, statusFailure).Set(0)
- level.Debug(p.logger).Log("msg", "loading work for tenant", "table", table, "tenant", tenant, "splitFactor", splitFactor)
+ tenantTables[table] = append(tenantTables[table], tenant)
}
if err := tenants.Err(); err != nil {
level.Error(p.logger).Log("msg", "error iterating tenants", "err", err)
@@ -708,7 +692,7 @@ func (p *Planner) loadTenantWork(
return nil, fmt.Errorf("error iterating tables: %w", err)
}
- return tenantTableWork, ctx.Err()
+ return tenantTables, ctx.Err()
}
func (p *Planner) tenants(ctx context.Context, table config.DayTable) (*iter.SliceIter[string], error) {
@@ -720,178 +704,6 @@ func (p *Planner) tenants(ctx context.Context, table config.DayTable) (*iter.Sli
return iter.NewSliceIter(tenants), nil
}
-// blockPlan is a plan for all the work needed to build a meta.json
-// It includes:
-// - the tsdb (source of truth) which contains all the series+chunks
-// we need to ensure are indexed in bloom blocks
-// - a list of gaps that are out of date and need to be checked+built
-// - within each gap, a list of block refs which overlap the gap are included
-// so we can use them to accelerate bloom generation. They likely contain many
-// of the same chunks we need to ensure are indexed, just from previous tsdb iterations.
-// This is a performance optimization to avoid expensive re-reindexing
-type blockPlan struct {
- tsdb tsdb.SingleTenantTSDBIdentifier
- gaps []protos.Gap
-}
-
-func (p *Planner) findOutdatedGaps(
- ctx context.Context,
- tenant string,
- tsdbs map[tsdb.SingleTenantTSDBIdentifier]common.ClosableForSeries,
- ownershipRange v1.FingerprintBounds,
- metas []bloomshipper.Meta,
- logger log.Logger,
-) ([]blockPlan, error) {
- // Determine which TSDBs have gaps in the ownership range and need to
- // be processed.
- tsdbsWithGaps, err := gapsBetweenTSDBsAndMetas(ownershipRange, tsdbs, metas)
- if err != nil {
- level.Error(logger).Log("msg", "failed to find gaps", "err", err)
- return nil, fmt.Errorf("failed to find gaps: %w", err)
- }
-
- if len(tsdbsWithGaps) == 0 {
- level.Debug(logger).Log("msg", "blooms exist for all tsdbs")
- return nil, nil
- }
-
- work, err := blockPlansForGaps(ctx, tenant, tsdbsWithGaps, metas)
- if err != nil {
- level.Error(logger).Log("msg", "failed to create plan", "err", err)
- return nil, fmt.Errorf("failed to create plan: %w", err)
- }
-
- return work, nil
-}
-
-// Used to signal the gaps that need to be populated for a tsdb
-type tsdbGaps struct {
- tsdbIdentifier tsdb.SingleTenantTSDBIdentifier
- tsdb common.ClosableForSeries
- gaps []v1.FingerprintBounds
-}
-
-// gapsBetweenTSDBsAndMetas returns if the metas are up-to-date with the TSDBs. This is determined by asserting
-// that for each TSDB, there are metas covering the entire ownership range which were generated from that specific TSDB.
-func gapsBetweenTSDBsAndMetas(
- ownershipRange v1.FingerprintBounds,
- tsdbs map[tsdb.SingleTenantTSDBIdentifier]common.ClosableForSeries,
- metas []bloomshipper.Meta,
-) (res []tsdbGaps, err error) {
- for db, tsdb := range tsdbs {
- id := db.Name()
-
- relevantMetas := make([]v1.FingerprintBounds, 0, len(metas))
- for _, meta := range metas {
- for _, s := range meta.Sources {
- if s.Name() == id {
- relevantMetas = append(relevantMetas, meta.Bounds)
- }
- }
- }
-
- gaps, err := FindGapsInFingerprintBounds(ownershipRange, relevantMetas)
- if err != nil {
- return nil, err
- }
-
- if len(gaps) > 0 {
- res = append(res, tsdbGaps{
- tsdbIdentifier: db,
- tsdb: tsdb,
- gaps: gaps,
- })
- }
- }
-
- return res, err
-}
-
-// blockPlansForGaps groups tsdb gaps we wish to fill with overlapping but out of date blocks.
-// This allows us to expedite bloom generation by using existing blocks to fill in the gaps
-// since many will contain the same chunks.
-func blockPlansForGaps(
- ctx context.Context,
- tenant string,
- tsdbs []tsdbGaps,
- metas []bloomshipper.Meta,
-) ([]blockPlan, error) {
- plans := make([]blockPlan, 0, len(tsdbs))
-
- for _, idx := range tsdbs {
- plan := blockPlan{
- tsdb: idx.tsdbIdentifier,
- gaps: make([]protos.Gap, 0, len(idx.gaps)),
- }
-
- for _, gap := range idx.gaps {
- planGap := protos.Gap{
- Bounds: gap,
- }
-
- seriesItr, err := common.NewTSDBSeriesIter(ctx, tenant, idx.tsdb, gap)
- if err != nil {
- return nil, fmt.Errorf("failed to load series from TSDB for gap (%s): %w", gap.String(), err)
- }
- planGap.Series, err = iter.Collect(seriesItr)
- if err != nil {
- return nil, fmt.Errorf("failed to collect series: %w", err)
- }
-
- for _, meta := range metas {
- if meta.Bounds.Intersection(gap) == nil {
- // this meta doesn't overlap the gap, skip
- continue
- }
-
- for _, block := range meta.Blocks {
- if block.Bounds.Intersection(gap) == nil {
- // this block doesn't overlap the gap, skip
- continue
- }
- // this block overlaps the gap, add it to the plan
- // for this gap
- planGap.Blocks = append(planGap.Blocks, block)
- }
- }
-
- // ensure we sort blocks so deduping iterator works as expected
- sort.Slice(planGap.Blocks, func(i, j int) bool {
- return planGap.Blocks[i].Bounds.Less(planGap.Blocks[j].Bounds)
- })
-
- peekingBlocks := iter.NewPeekIter[bloomshipper.BlockRef](
- iter.NewSliceIter[bloomshipper.BlockRef](
- planGap.Blocks,
- ),
- )
- // dedupe blocks which could be in multiple metas
- itr := iter.NewDedupingIter[bloomshipper.BlockRef, bloomshipper.BlockRef](
- func(a, b bloomshipper.BlockRef) bool {
- return a == b
- },
- iter.Identity[bloomshipper.BlockRef],
- func(a, _ bloomshipper.BlockRef) bloomshipper.BlockRef {
- return a
- },
- peekingBlocks,
- )
-
- deduped, err := iter.Collect[bloomshipper.BlockRef](itr)
- if err != nil {
- return nil, fmt.Errorf("failed to dedupe blocks: %w", err)
- }
- planGap.Blocks = deduped
-
- plan.gaps = append(plan.gaps, planGap)
- }
-
- plans = append(plans, plan)
- }
-
- return plans, nil
-}
-
func (p *Planner) addPendingTask(task *QueueTask) {
p.pendingTasks.Store(task.ID, task)
}
diff --git a/pkg/bloombuild/planner/planner_test.go b/pkg/bloombuild/planner/planner_test.go
index 9523a45795579..6b1b1e0beba16 100644
--- a/pkg/bloombuild/planner/planner_test.go
+++ b/pkg/bloombuild/planner/planner_test.go
@@ -1,10 +1,8 @@
package planner
import (
- "bytes"
"context"
"fmt"
- "io"
"math"
"sync"
"testing"
@@ -15,16 +13,13 @@ import (
"github.com/grafana/dskit/services"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
- "github.com/prometheus/common/model"
- "github.com/prometheus/prometheus/model/labels"
"github.com/stretchr/testify/require"
"go.uber.org/atomic"
"google.golang.org/grpc"
- "github.com/grafana/loki/v3/pkg/bloombuild/common"
+ "github.com/grafana/loki/v3/pkg/bloombuild/planner/plannertest"
+ "github.com/grafana/loki/v3/pkg/bloombuild/planner/strategies"
"github.com/grafana/loki/v3/pkg/bloombuild/protos"
- "github.com/grafana/loki/v3/pkg/compression"
- iter "github.com/grafana/loki/v3/pkg/iter/v2"
"github.com/grafana/loki/v3/pkg/storage"
v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1"
"github.com/grafana/loki/v3/pkg/storage/chunk/cache"
@@ -32,461 +27,10 @@ import (
"github.com/grafana/loki/v3/pkg/storage/config"
"github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper"
bloomshipperconfig "github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper/config"
- "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb"
- "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb/index"
"github.com/grafana/loki/v3/pkg/storage/types"
"github.com/grafana/loki/v3/pkg/util/mempool"
)
-var testDay = parseDayTime("2023-09-01")
-var testTable = config.NewDayTable(testDay, "index_")
-
-func tsdbID(n int) tsdb.SingleTenantTSDBIdentifier {
- return tsdb.SingleTenantTSDBIdentifier{
- TS: time.Unix(int64(n), 0),
- }
-}
-
-func genMeta(min, max model.Fingerprint, sources []int, blocks []bloomshipper.BlockRef) bloomshipper.Meta {
- m := bloomshipper.Meta{
- MetaRef: bloomshipper.MetaRef{
- Ref: bloomshipper.Ref{
- TenantID: "fakeTenant",
- TableName: testTable.Addr(),
- Bounds: v1.NewBounds(min, max),
- },
- },
- Blocks: blocks,
- }
- for _, source := range sources {
- m.Sources = append(m.Sources, tsdbID(source))
- }
- return m
-}
-
-func Test_gapsBetweenTSDBsAndMetas(t *testing.T) {
-
- for _, tc := range []struct {
- desc string
- err bool
- exp []tsdbGaps
- ownershipRange v1.FingerprintBounds
- tsdbs map[tsdb.SingleTenantTSDBIdentifier]common.ClosableForSeries
- metas []bloomshipper.Meta
- }{
- {
- desc: "non-overlapping tsdbs and metas",
- err: true,
- ownershipRange: v1.NewBounds(0, 10),
- tsdbs: map[tsdb.SingleTenantTSDBIdentifier]common.ClosableForSeries{
- tsdbID(0): nil,
- },
- metas: []bloomshipper.Meta{
- genMeta(11, 20, []int{0}, nil),
- },
- },
- {
- desc: "single tsdb",
- ownershipRange: v1.NewBounds(0, 10),
- tsdbs: map[tsdb.SingleTenantTSDBIdentifier]common.ClosableForSeries{
- tsdbID(0): nil,
- },
- metas: []bloomshipper.Meta{
- genMeta(4, 8, []int{0}, nil),
- },
- exp: []tsdbGaps{
- {
- tsdbIdentifier: tsdbID(0),
- gaps: []v1.FingerprintBounds{
- v1.NewBounds(0, 3),
- v1.NewBounds(9, 10),
- },
- },
- },
- },
- {
- desc: "multiple tsdbs with separate blocks",
- ownershipRange: v1.NewBounds(0, 10),
- tsdbs: map[tsdb.SingleTenantTSDBIdentifier]common.ClosableForSeries{
- tsdbID(0): nil,
- tsdbID(1): nil,
- },
- metas: []bloomshipper.Meta{
- genMeta(0, 5, []int{0}, nil),
- genMeta(6, 10, []int{1}, nil),
- },
- exp: []tsdbGaps{
- {
- tsdbIdentifier: tsdbID(0),
- gaps: []v1.FingerprintBounds{
- v1.NewBounds(6, 10),
- },
- },
- {
- tsdbIdentifier: tsdbID(1),
- gaps: []v1.FingerprintBounds{
- v1.NewBounds(0, 5),
- },
- },
- },
- },
- {
- desc: "multiple tsdbs with the same blocks",
- ownershipRange: v1.NewBounds(0, 10),
- tsdbs: map[tsdb.SingleTenantTSDBIdentifier]common.ClosableForSeries{
- tsdbID(0): nil,
- tsdbID(1): nil,
- },
- metas: []bloomshipper.Meta{
- genMeta(0, 5, []int{0, 1}, nil),
- genMeta(6, 8, []int{1}, nil),
- },
- exp: []tsdbGaps{
- {
- tsdbIdentifier: tsdbID(0),
- gaps: []v1.FingerprintBounds{
- v1.NewBounds(6, 10),
- },
- },
- {
- tsdbIdentifier: tsdbID(1),
- gaps: []v1.FingerprintBounds{
- v1.NewBounds(9, 10),
- },
- },
- },
- },
- } {
- t.Run(tc.desc, func(t *testing.T) {
- gaps, err := gapsBetweenTSDBsAndMetas(tc.ownershipRange, tc.tsdbs, tc.metas)
- if tc.err {
- require.Error(t, err)
- return
- }
- require.ElementsMatch(t, tc.exp, gaps)
- })
- }
-}
-
-func genBlockRef(min, max model.Fingerprint) bloomshipper.BlockRef {
- startTS, endTS := testDay.Bounds()
- return bloomshipper.BlockRef{
- Ref: bloomshipper.Ref{
- TenantID: "fakeTenant",
- TableName: testTable.Addr(),
- Bounds: v1.NewBounds(min, max),
- StartTimestamp: startTS,
- EndTimestamp: endTS,
- Checksum: 0,
- },
- }
-}
-
-func genBlock(ref bloomshipper.BlockRef) (bloomshipper.Block, error) {
- indexBuf := bytes.NewBuffer(nil)
- bloomsBuf := bytes.NewBuffer(nil)
- writer := v1.NewMemoryBlockWriter(indexBuf, bloomsBuf)
- reader := v1.NewByteReader(indexBuf, bloomsBuf)
-
- blockOpts := v1.NewBlockOptions(compression.None, 0, 0)
-
- builder, err := v1.NewBlockBuilder(blockOpts, writer)
- if err != nil {
- return bloomshipper.Block{}, err
- }
-
- if _, err = builder.BuildFrom(iter.NewEmptyIter[v1.SeriesWithBlooms]()); err != nil {
- return bloomshipper.Block{}, err
- }
-
- block := v1.NewBlock(reader, v1.NewMetrics(nil))
-
- buf := bytes.NewBuffer(nil)
- if err := v1.TarCompress(ref.Codec, buf, block.Reader()); err != nil {
- return bloomshipper.Block{}, err
- }
-
- tarReader := bytes.NewReader(buf.Bytes())
-
- return bloomshipper.Block{
- BlockRef: ref,
- Data: bloomshipper.ClosableReadSeekerAdapter{ReadSeeker: tarReader},
- }, nil
-}
-
-func Test_blockPlansForGaps(t *testing.T) {
- for _, tc := range []struct {
- desc string
- ownershipRange v1.FingerprintBounds
- tsdbs []tsdb.SingleTenantTSDBIdentifier
- metas []bloomshipper.Meta
- err bool
- exp []blockPlan
- }{
- {
- desc: "single overlapping meta+no overlapping block",
- ownershipRange: v1.NewBounds(0, 10),
- tsdbs: []tsdb.SingleTenantTSDBIdentifier{tsdbID(0)},
- metas: []bloomshipper.Meta{
- genMeta(5, 20, []int{1}, []bloomshipper.BlockRef{genBlockRef(11, 20)}),
- },
- exp: []blockPlan{
- {
- tsdb: tsdbID(0),
- gaps: []protos.Gap{
- {
- Bounds: v1.NewBounds(0, 10),
- Series: genSeries(v1.NewBounds(0, 10)),
- },
- },
- },
- },
- },
- {
- desc: "single overlapping meta+one overlapping block",
- ownershipRange: v1.NewBounds(0, 10),
- tsdbs: []tsdb.SingleTenantTSDBIdentifier{tsdbID(0)},
- metas: []bloomshipper.Meta{
- genMeta(5, 20, []int{1}, []bloomshipper.BlockRef{genBlockRef(9, 20)}),
- },
- exp: []blockPlan{
- {
- tsdb: tsdbID(0),
- gaps: []protos.Gap{
- {
- Bounds: v1.NewBounds(0, 10),
- Series: genSeries(v1.NewBounds(0, 10)),
- Blocks: []bloomshipper.BlockRef{genBlockRef(9, 20)},
- },
- },
- },
- },
- },
- {
- // the range which needs to be generated doesn't overlap with existing blocks
- // from other tsdb versions since theres an up to date tsdb version block,
- // but we can trim the range needing generation
- desc: "trims up to date area",
- ownershipRange: v1.NewBounds(0, 10),
- tsdbs: []tsdb.SingleTenantTSDBIdentifier{tsdbID(0)},
- metas: []bloomshipper.Meta{
- genMeta(9, 20, []int{0}, []bloomshipper.BlockRef{genBlockRef(9, 20)}), // block for same tsdb
- genMeta(9, 20, []int{1}, []bloomshipper.BlockRef{genBlockRef(9, 20)}), // block for different tsdb
- },
- exp: []blockPlan{
- {
- tsdb: tsdbID(0),
- gaps: []protos.Gap{
- {
- Bounds: v1.NewBounds(0, 8),
- Series: genSeries(v1.NewBounds(0, 8)),
- },
- },
- },
- },
- },
- {
- desc: "uses old block for overlapping range",
- ownershipRange: v1.NewBounds(0, 10),
- tsdbs: []tsdb.SingleTenantTSDBIdentifier{tsdbID(0)},
- metas: []bloomshipper.Meta{
- genMeta(9, 20, []int{0}, []bloomshipper.BlockRef{genBlockRef(9, 20)}), // block for same tsdb
- genMeta(5, 20, []int{1}, []bloomshipper.BlockRef{genBlockRef(5, 20)}), // block for different tsdb
- },
- exp: []blockPlan{
- {
- tsdb: tsdbID(0),
- gaps: []protos.Gap{
- {
- Bounds: v1.NewBounds(0, 8),
- Series: genSeries(v1.NewBounds(0, 8)),
- Blocks: []bloomshipper.BlockRef{genBlockRef(5, 20)},
- },
- },
- },
- },
- },
- {
- desc: "multi case",
- ownershipRange: v1.NewBounds(0, 10),
- tsdbs: []tsdb.SingleTenantTSDBIdentifier{tsdbID(0), tsdbID(1)}, // generate for both tsdbs
- metas: []bloomshipper.Meta{
- genMeta(0, 2, []int{0}, []bloomshipper.BlockRef{
- genBlockRef(0, 1),
- genBlockRef(1, 2),
- }), // tsdb_0
- genMeta(6, 8, []int{0}, []bloomshipper.BlockRef{genBlockRef(6, 8)}), // tsdb_0
-
- genMeta(3, 5, []int{1}, []bloomshipper.BlockRef{genBlockRef(3, 5)}), // tsdb_1
- genMeta(8, 10, []int{1}, []bloomshipper.BlockRef{genBlockRef(8, 10)}), // tsdb_1
- },
- exp: []blockPlan{
- {
- tsdb: tsdbID(0),
- gaps: []protos.Gap{
- // tsdb (id=0) can source chunks from the blocks built from tsdb (id=1)
- {
- Bounds: v1.NewBounds(3, 5),
- Series: genSeries(v1.NewBounds(3, 5)),
- Blocks: []bloomshipper.BlockRef{genBlockRef(3, 5)},
- },
- {
- Bounds: v1.NewBounds(9, 10),
- Series: genSeries(v1.NewBounds(9, 10)),
- Blocks: []bloomshipper.BlockRef{genBlockRef(8, 10)},
- },
- },
- },
- // tsdb (id=1) can source chunks from the blocks built from tsdb (id=0)
- {
- tsdb: tsdbID(1),
- gaps: []protos.Gap{
- {
- Bounds: v1.NewBounds(0, 2),
- Series: genSeries(v1.NewBounds(0, 2)),
- Blocks: []bloomshipper.BlockRef{
- genBlockRef(0, 1),
- genBlockRef(1, 2),
- },
- },
- {
- Bounds: v1.NewBounds(6, 7),
- Series: genSeries(v1.NewBounds(6, 7)),
- Blocks: []bloomshipper.BlockRef{genBlockRef(6, 8)},
- },
- },
- },
- },
- },
- {
- desc: "dedupes block refs",
- ownershipRange: v1.NewBounds(0, 10),
- tsdbs: []tsdb.SingleTenantTSDBIdentifier{tsdbID(0)},
- metas: []bloomshipper.Meta{
- genMeta(9, 20, []int{1}, []bloomshipper.BlockRef{
- genBlockRef(1, 4),
- genBlockRef(9, 20),
- }), // blocks for first diff tsdb
- genMeta(5, 20, []int{2}, []bloomshipper.BlockRef{
- genBlockRef(5, 10),
- genBlockRef(9, 20), // same block references in prior meta (will be deduped)
- }), // block for second diff tsdb
- },
- exp: []blockPlan{
- {
- tsdb: tsdbID(0),
- gaps: []protos.Gap{
- {
- Bounds: v1.NewBounds(0, 10),
- Series: genSeries(v1.NewBounds(0, 10)),
- Blocks: []bloomshipper.BlockRef{
- genBlockRef(1, 4),
- genBlockRef(5, 10),
- genBlockRef(9, 20),
- },
- },
- },
- },
- },
- },
- } {
- t.Run(tc.desc, func(t *testing.T) {
- // We add series spanning the whole FP ownership range
- tsdbs := make(map[tsdb.SingleTenantTSDBIdentifier]common.ClosableForSeries)
- for _, id := range tc.tsdbs {
- tsdbs[id] = newFakeForSeries(genSeries(tc.ownershipRange))
- }
-
- // we reuse the gapsBetweenTSDBsAndMetas function to generate the gaps as this function is tested
- // separately and it's used to generate input in our regular code path (easier to write tests this way).
- gaps, err := gapsBetweenTSDBsAndMetas(tc.ownershipRange, tsdbs, tc.metas)
- require.NoError(t, err)
-
- plans, err := blockPlansForGaps(
- context.Background(),
- "fakeTenant",
- gaps,
- tc.metas,
- )
- if tc.err {
- require.Error(t, err)
- return
- }
- require.ElementsMatch(t, tc.exp, plans)
- })
- }
-}
-
-func genSeries(bounds v1.FingerprintBounds) []*v1.Series {
- series := make([]*v1.Series, 0, int(bounds.Max-bounds.Min+1))
- for i := bounds.Min; i <= bounds.Max; i++ {
- series = append(series, &v1.Series{
- Fingerprint: i,
- Chunks: v1.ChunkRefs{
- {
- From: 0,
- Through: 1,
- Checksum: 1,
- },
- },
- })
- }
- return series
-}
-
-type fakeForSeries struct {
- series []*v1.Series
-}
-
-func newFakeForSeries(series []*v1.Series) *fakeForSeries {
- return &fakeForSeries{
- series: series,
- }
-}
-
-func (f fakeForSeries) ForSeries(_ context.Context, _ string, ff index.FingerprintFilter, _ model.Time, _ model.Time, fn func(labels.Labels, model.Fingerprint, []index.ChunkMeta) (stop bool), _ ...*labels.Matcher) error {
- overlapping := make([]*v1.Series, 0, len(f.series))
- for _, s := range f.series {
- if ff.Match(s.Fingerprint) {
- overlapping = append(overlapping, s)
- }
- }
-
- for _, s := range overlapping {
- chunks := make([]index.ChunkMeta, 0, len(s.Chunks))
- for _, c := range s.Chunks {
- chunks = append(chunks, index.ChunkMeta{
- MinTime: int64(c.From),
- MaxTime: int64(c.Through),
- Checksum: c.Checksum,
- })
- }
-
- if fn(labels.EmptyLabels(), s.Fingerprint, chunks) {
- break
- }
- }
- return nil
-}
-
-func (f fakeForSeries) Close() error {
- return nil
-}
-
-func createTasks(n int, resultsCh chan *protos.TaskResult) []*QueueTask {
- tasks := make([]*QueueTask, 0, n)
- // Enqueue tasks
- for i := 0; i < n; i++ {
- task := NewQueueTask(
- context.Background(), time.Now(),
- protos.NewTask(config.NewDayTable(testDay, "fake"), "fakeTenant", v1.NewBounds(0, 10), tsdbID(1), nil),
- resultsCh,
- )
- tasks = append(tasks, task)
- }
- return tasks
-}
-
func createPlanner(
t *testing.T,
cfg Config,
@@ -496,7 +40,7 @@ func createPlanner(
schemaCfg := config.SchemaConfig{
Configs: []config.PeriodConfig{
{
- From: parseDayTime("2023-09-01"),
+ From: plannertest.ParseDayTime("2023-09-01"),
IndexTables: config.IndexPeriodicTableConfig{
PeriodicTableConfig: config.PeriodicTableConfig{
Prefix: "index_",
@@ -720,28 +264,6 @@ func Test_BuilderLoop(t *testing.T) {
}
}
-func putMetas(bloomClient bloomshipper.Client, metas []bloomshipper.Meta) error {
- for _, meta := range metas {
- err := bloomClient.PutMeta(context.Background(), meta)
- if err != nil {
- return err
- }
-
- for _, block := range meta.Blocks {
- writtenBlock, err := genBlock(block)
- if err != nil {
- return err
- }
-
- err = bloomClient.PutBlock(context.Background(), writtenBlock)
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
-
func Test_processTenantTaskResults(t *testing.T) {
for _, tc := range []struct {
name string
@@ -754,8 +276,8 @@ func Test_processTenantTaskResults(t *testing.T) {
{
name: "errors",
originalMetas: []bloomshipper.Meta{
- genMeta(0, 10, []int{0}, []bloomshipper.BlockRef{genBlockRef(0, 10)}),
- genMeta(10, 20, []int{0}, []bloomshipper.BlockRef{genBlockRef(10, 20)}),
+ plannertest.GenMeta(0, 10, []int{0}, []bloomshipper.BlockRef{plannertest.GenBlockRef(0, 10)}),
+ plannertest.GenMeta(10, 20, []int{0}, []bloomshipper.BlockRef{plannertest.GenBlockRef(10, 20)}),
},
taskResults: []*protos.TaskResult{
{
@@ -769,16 +291,16 @@ func Test_processTenantTaskResults(t *testing.T) {
},
expectedMetas: []bloomshipper.Meta{
// The original metas should remain unchanged
- genMeta(0, 10, []int{0}, []bloomshipper.BlockRef{genBlockRef(0, 10)}),
- genMeta(10, 20, []int{0}, []bloomshipper.BlockRef{genBlockRef(10, 20)}),
+ plannertest.GenMeta(0, 10, []int{0}, []bloomshipper.BlockRef{plannertest.GenBlockRef(0, 10)}),
+ plannertest.GenMeta(10, 20, []int{0}, []bloomshipper.BlockRef{plannertest.GenBlockRef(10, 20)}),
},
expectedTasksSucceed: 0,
},
{
name: "no new metas",
originalMetas: []bloomshipper.Meta{
- genMeta(0, 10, []int{0}, []bloomshipper.BlockRef{genBlockRef(0, 10)}),
- genMeta(10, 20, []int{0}, []bloomshipper.BlockRef{genBlockRef(10, 20)}),
+ plannertest.GenMeta(0, 10, []int{0}, []bloomshipper.BlockRef{plannertest.GenBlockRef(0, 10)}),
+ plannertest.GenMeta(10, 20, []int{0}, []bloomshipper.BlockRef{plannertest.GenBlockRef(10, 20)}),
},
taskResults: []*protos.TaskResult{
{
@@ -790,8 +312,8 @@ func Test_processTenantTaskResults(t *testing.T) {
},
expectedMetas: []bloomshipper.Meta{
// The original metas should remain unchanged
- genMeta(0, 10, []int{0}, []bloomshipper.BlockRef{genBlockRef(0, 10)}),
- genMeta(10, 20, []int{0}, []bloomshipper.BlockRef{genBlockRef(10, 20)}),
+ plannertest.GenMeta(0, 10, []int{0}, []bloomshipper.BlockRef{plannertest.GenBlockRef(0, 10)}),
+ plannertest.GenMeta(10, 20, []int{0}, []bloomshipper.BlockRef{plannertest.GenBlockRef(10, 20)}),
},
expectedTasksSucceed: 2,
},
@@ -801,58 +323,58 @@ func Test_processTenantTaskResults(t *testing.T) {
{
TaskID: "1",
CreatedMetas: []bloomshipper.Meta{
- genMeta(0, 10, []int{0}, []bloomshipper.BlockRef{genBlockRef(0, 10)}),
+ plannertest.GenMeta(0, 10, []int{0}, []bloomshipper.BlockRef{plannertest.GenBlockRef(0, 10)}),
},
},
{
TaskID: "2",
CreatedMetas: []bloomshipper.Meta{
- genMeta(10, 20, []int{0}, []bloomshipper.BlockRef{genBlockRef(10, 20)}),
+ plannertest.GenMeta(10, 20, []int{0}, []bloomshipper.BlockRef{plannertest.GenBlockRef(10, 20)}),
},
},
},
expectedMetas: []bloomshipper.Meta{
- genMeta(0, 10, []int{0}, []bloomshipper.BlockRef{genBlockRef(0, 10)}),
- genMeta(10, 20, []int{0}, []bloomshipper.BlockRef{genBlockRef(10, 20)}),
+ plannertest.GenMeta(0, 10, []int{0}, []bloomshipper.BlockRef{plannertest.GenBlockRef(0, 10)}),
+ plannertest.GenMeta(10, 20, []int{0}, []bloomshipper.BlockRef{plannertest.GenBlockRef(10, 20)}),
},
expectedTasksSucceed: 2,
},
{
name: "single meta covers all original",
originalMetas: []bloomshipper.Meta{
- genMeta(0, 5, []int{0}, []bloomshipper.BlockRef{genBlockRef(0, 5)}),
- genMeta(6, 10, []int{0}, []bloomshipper.BlockRef{genBlockRef(6, 10)}),
+ plannertest.GenMeta(0, 5, []int{0}, []bloomshipper.BlockRef{plannertest.GenBlockRef(0, 5)}),
+ plannertest.GenMeta(6, 10, []int{0}, []bloomshipper.BlockRef{plannertest.GenBlockRef(6, 10)}),
},
taskResults: []*protos.TaskResult{
{
TaskID: "1",
CreatedMetas: []bloomshipper.Meta{
- genMeta(0, 10, []int{1}, []bloomshipper.BlockRef{genBlockRef(0, 10)}),
+ plannertest.GenMeta(0, 10, []int{1}, []bloomshipper.BlockRef{plannertest.GenBlockRef(0, 10)}),
},
},
},
expectedMetas: []bloomshipper.Meta{
- genMeta(0, 10, []int{1}, []bloomshipper.BlockRef{genBlockRef(0, 10)}),
+ plannertest.GenMeta(0, 10, []int{1}, []bloomshipper.BlockRef{plannertest.GenBlockRef(0, 10)}),
},
expectedTasksSucceed: 1,
},
{
name: "multi version ordering",
originalMetas: []bloomshipper.Meta{
- genMeta(0, 5, []int{0}, []bloomshipper.BlockRef{genBlockRef(0, 5)}),
- genMeta(0, 10, []int{1}, []bloomshipper.BlockRef{genBlockRef(0, 10)}), // only part of the range is outdated, must keep
+ plannertest.GenMeta(0, 5, []int{0}, []bloomshipper.BlockRef{plannertest.GenBlockRef(0, 5)}),
+ plannertest.GenMeta(0, 10, []int{1}, []bloomshipper.BlockRef{plannertest.GenBlockRef(0, 10)}), // only part of the range is outdated, must keep
},
taskResults: []*protos.TaskResult{
{
TaskID: "1",
CreatedMetas: []bloomshipper.Meta{
- genMeta(8, 10, []int{2}, []bloomshipper.BlockRef{genBlockRef(8, 10)}),
+ plannertest.GenMeta(8, 10, []int{2}, []bloomshipper.BlockRef{plannertest.GenBlockRef(8, 10)}),
},
},
},
expectedMetas: []bloomshipper.Meta{
- genMeta(0, 10, []int{1}, []bloomshipper.BlockRef{genBlockRef(0, 10)}),
- genMeta(8, 10, []int{2}, []bloomshipper.BlockRef{genBlockRef(8, 10)}),
+ plannertest.GenMeta(0, 10, []int{1}, []bloomshipper.BlockRef{plannertest.GenBlockRef(0, 10)}),
+ plannertest.GenMeta(8, 10, []int{2}, []bloomshipper.BlockRef{plannertest.GenBlockRef(8, 10)}),
},
expectedTasksSucceed: 1,
},
@@ -867,11 +389,11 @@ func Test_processTenantTaskResults(t *testing.T) {
}
planner := createPlanner(t, cfg, &fakeLimits{}, logger)
- bloomClient, err := planner.bloomStore.Client(testDay.ModelTime())
+ bloomClient, err := planner.bloomStore.Client(plannertest.TestDay.ModelTime())
require.NoError(t, err)
// Create original metas and blocks
- err = putMetas(bloomClient, tc.originalMetas)
+ err = plannertest.PutMetas(bloomClient, tc.originalMetas)
require.NoError(t, err)
ctx, ctxCancel := context.WithCancel(context.Background())
@@ -885,7 +407,7 @@ func Test_processTenantTaskResults(t *testing.T) {
completed, err := planner.processTenantTaskResults(
ctx,
- testTable,
+ plannertest.TestTable,
"fakeTenant",
tc.originalMetas,
len(tc.taskResults),
@@ -898,7 +420,7 @@ func Test_processTenantTaskResults(t *testing.T) {
for _, taskResult := range tc.taskResults {
if len(taskResult.CreatedMetas) > 0 {
// Emulate builder putting new metas to obj store
- err = putMetas(bloomClient, taskResult.CreatedMetas)
+ err = plannertest.PutMetas(bloomClient, taskResult.CreatedMetas)
require.NoError(t, err)
}
@@ -913,7 +435,7 @@ func Test_processTenantTaskResults(t *testing.T) {
context.Background(),
bloomshipper.MetaSearchParams{
TenantID: "fakeTenant",
- Interval: bloomshipper.NewInterval(testTable.Bounds()),
+ Interval: bloomshipper.NewInterval(plannertest.TestTable.Bounds()),
Keyspace: v1.NewBounds(0, math.MaxUint64),
},
)
@@ -956,63 +478,63 @@ func Test_deleteOutdatedMetas(t *testing.T) {
{
name: "only up to date metas",
originalMetas: []bloomshipper.Meta{
- genMeta(0, 10, []int{0}, []bloomshipper.BlockRef{genBlockRef(0, 10)}),
+ plannertest.GenMeta(0, 10, []int{0}, []bloomshipper.BlockRef{plannertest.GenBlockRef(0, 10)}),
},
newMetas: []bloomshipper.Meta{
- genMeta(10, 20, []int{0}, []bloomshipper.BlockRef{genBlockRef(10, 20)}),
+ plannertest.GenMeta(10, 20, []int{0}, []bloomshipper.BlockRef{plannertest.GenBlockRef(10, 20)}),
},
expectedUpToDateMetas: []bloomshipper.Meta{
- genMeta(0, 10, []int{0}, []bloomshipper.BlockRef{genBlockRef(0, 10)}),
- genMeta(10, 20, []int{0}, []bloomshipper.BlockRef{genBlockRef(10, 20)}),
+ plannertest.GenMeta(0, 10, []int{0}, []bloomshipper.BlockRef{plannertest.GenBlockRef(0, 10)}),
+ plannertest.GenMeta(10, 20, []int{0}, []bloomshipper.BlockRef{plannertest.GenBlockRef(10, 20)}),
},
},
{
name: "outdated metas",
originalMetas: []bloomshipper.Meta{
- genMeta(0, 5, []int{0}, []bloomshipper.BlockRef{genBlockRef(0, 5)}),
+ plannertest.GenMeta(0, 5, []int{0}, []bloomshipper.BlockRef{plannertest.GenBlockRef(0, 5)}),
},
newMetas: []bloomshipper.Meta{
- genMeta(0, 10, []int{1}, []bloomshipper.BlockRef{genBlockRef(0, 10)}),
+ plannertest.GenMeta(0, 10, []int{1}, []bloomshipper.BlockRef{plannertest.GenBlockRef(0, 10)}),
},
expectedUpToDateMetas: []bloomshipper.Meta{
- genMeta(0, 10, []int{1}, []bloomshipper.BlockRef{genBlockRef(0, 10)}),
+ plannertest.GenMeta(0, 10, []int{1}, []bloomshipper.BlockRef{plannertest.GenBlockRef(0, 10)}),
},
},
{
name: "new metas reuse blocks from outdated meta",
originalMetas: []bloomshipper.Meta{
- genMeta(0, 10, []int{0}, []bloomshipper.BlockRef{ // Outdated
- genBlockRef(0, 5), // Reuse
- genBlockRef(5, 10), // Delete
+ plannertest.GenMeta(0, 10, []int{0}, []bloomshipper.BlockRef{ // Outdated
+ plannertest.GenBlockRef(0, 5), // Reuse
+ plannertest.GenBlockRef(5, 10), // Delete
}),
- genMeta(10, 20, []int{0}, []bloomshipper.BlockRef{ // Outdated
- genBlockRef(10, 20), // Reuse
+ plannertest.GenMeta(10, 20, []int{0}, []bloomshipper.BlockRef{ // Outdated
+ plannertest.GenBlockRef(10, 20), // Reuse
}),
- genMeta(20, 30, []int{0}, []bloomshipper.BlockRef{ // Up to date
- genBlockRef(20, 30),
+ plannertest.GenMeta(20, 30, []int{0}, []bloomshipper.BlockRef{ // Up to date
+ plannertest.GenBlockRef(20, 30),
}),
},
newMetas: []bloomshipper.Meta{
- genMeta(0, 5, []int{1}, []bloomshipper.BlockRef{
- genBlockRef(0, 5), // Reused block
+ plannertest.GenMeta(0, 5, []int{1}, []bloomshipper.BlockRef{
+ plannertest.GenBlockRef(0, 5), // Reused block
}),
- genMeta(5, 20, []int{1}, []bloomshipper.BlockRef{
- genBlockRef(5, 7), // New block
- genBlockRef(7, 10), // New block
- genBlockRef(10, 20), // Reused block
+ plannertest.GenMeta(5, 20, []int{1}, []bloomshipper.BlockRef{
+ plannertest.GenBlockRef(5, 7), // New block
+ plannertest.GenBlockRef(7, 10), // New block
+ plannertest.GenBlockRef(10, 20), // Reused block
}),
},
expectedUpToDateMetas: []bloomshipper.Meta{
- genMeta(0, 5, []int{1}, []bloomshipper.BlockRef{
- genBlockRef(0, 5),
+ plannertest.GenMeta(0, 5, []int{1}, []bloomshipper.BlockRef{
+ plannertest.GenBlockRef(0, 5),
}),
- genMeta(5, 20, []int{1}, []bloomshipper.BlockRef{
- genBlockRef(5, 7),
- genBlockRef(7, 10),
- genBlockRef(10, 20),
+ plannertest.GenMeta(5, 20, []int{1}, []bloomshipper.BlockRef{
+ plannertest.GenBlockRef(5, 7),
+ plannertest.GenBlockRef(7, 10),
+ plannertest.GenBlockRef(10, 20),
}),
- genMeta(20, 30, []int{0}, []bloomshipper.BlockRef{
- genBlockRef(20, 30),
+ plannertest.GenMeta(20, 30, []int{0}, []bloomshipper.BlockRef{
+ plannertest.GenBlockRef(20, 30),
}),
},
},
@@ -1027,13 +549,13 @@ func Test_deleteOutdatedMetas(t *testing.T) {
}
planner := createPlanner(t, cfg, &fakeLimits{}, logger)
- bloomClient, err := planner.bloomStore.Client(testDay.ModelTime())
+ bloomClient, err := planner.bloomStore.Client(plannertest.TestDay.ModelTime())
require.NoError(t, err)
// Create original/new metas and blocks
- err = putMetas(bloomClient, tc.originalMetas)
+ err = plannertest.PutMetas(bloomClient, tc.originalMetas)
require.NoError(t, err)
- err = putMetas(bloomClient, tc.newMetas)
+ err = plannertest.PutMetas(bloomClient, tc.newMetas)
require.NoError(t, err)
// Get all metas
@@ -1041,7 +563,7 @@ func Test_deleteOutdatedMetas(t *testing.T) {
context.Background(),
bloomshipper.MetaSearchParams{
TenantID: "fakeTenant",
- Interval: bloomshipper.NewInterval(testTable.Bounds()),
+ Interval: bloomshipper.NewInterval(plannertest.TestTable.Bounds()),
Keyspace: v1.NewBounds(0, math.MaxUint64),
},
)
@@ -1049,7 +571,7 @@ func Test_deleteOutdatedMetas(t *testing.T) {
removeLocFromMetasSources(metas)
require.ElementsMatch(t, append(tc.originalMetas, tc.newMetas...), metas)
- upToDate, err := planner.deleteOutdatedMetasAndBlocks(context.Background(), testTable, "fakeTenant", tc.newMetas, tc.originalMetas, phasePlanning)
+ upToDate, err := planner.deleteOutdatedMetasAndBlocks(context.Background(), plannertest.TestTable, "fakeTenant", tc.newMetas, tc.originalMetas, phasePlanning)
require.NoError(t, err)
require.ElementsMatch(t, tc.expectedUpToDateMetas, upToDate)
@@ -1058,7 +580,7 @@ func Test_deleteOutdatedMetas(t *testing.T) {
context.Background(),
bloomshipper.MetaSearchParams{
TenantID: "fakeTenant",
- Interval: bloomshipper.NewInterval(testTable.Bounds()),
+ Interval: bloomshipper.NewInterval(plannertest.TestTable.Bounds()),
Keyspace: v1.NewBounds(0, math.MaxUint64),
},
)
@@ -1190,6 +712,20 @@ func (f *fakeBuilder) Recv() (*protos.BuilderToPlanner, error) {
}, nil
}
+func createTasks(n int, resultsCh chan *protos.TaskResult) []*QueueTask {
+ tasks := make([]*QueueTask, 0, n)
+ // Enqueue tasks
+ for i := 0; i < n; i++ {
+ task := NewQueueTask(
+ context.Background(), time.Now(),
+ protos.NewTask(config.NewDayTable(plannertest.TestDay, "fake"), "fakeTenant", v1.NewBounds(0, 10), plannertest.TsdbID(1), nil),
+ resultsCh,
+ )
+ tasks = append(tasks, task)
+ }
+ return tasks
+}
+
type fakeLimits struct {
Limits
timeout time.Duration
@@ -1216,26 +752,10 @@ func (f *fakeLimits) BloomTaskMaxRetries(_ string) int {
return f.maxRetries
}
-func parseDayTime(s string) config.DayTime {
- t, err := time.Parse("2006-01-02", s)
- if err != nil {
- panic(err)
- }
- return config.DayTime{
- Time: model.TimeFromUnix(t.Unix()),
- }
+func (f *fakeLimits) BloomPlanningStrategy(_ string) string {
+ return strategies.SplitBySeriesChunkSizeStrategyName
}
-type DummyReadSeekCloser struct{}
-
-func (d *DummyReadSeekCloser) Read(_ []byte) (n int, err error) {
- return 0, io.EOF
-}
-
-func (d *DummyReadSeekCloser) Seek(_ int64, _ int) (int64, error) {
- return 0, nil
-}
-
-func (d *DummyReadSeekCloser) Close() error {
- return nil
+func (f *fakeLimits) BloomTaskTargetSeriesChunksSizeBytes(_ string) uint64 {
+ return 1 << 20 // 1MB
}
diff --git a/pkg/bloombuild/planner/plannertest/utils.go b/pkg/bloombuild/planner/plannertest/utils.go
new file mode 100644
index 0000000000000..706e0abdf00a7
--- /dev/null
+++ b/pkg/bloombuild/planner/plannertest/utils.go
@@ -0,0 +1,141 @@
+package plannertest
+
+import (
+ "bytes"
+ "context"
+ "time"
+
+ "github.com/prometheus/common/model"
+
+ "github.com/grafana/loki/v3/pkg/compression"
+ v2 "github.com/grafana/loki/v3/pkg/iter/v2"
+ v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1"
+ "github.com/grafana/loki/v3/pkg/storage/config"
+ "github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper"
+ "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb"
+)
+
+var TestDay = ParseDayTime("2023-09-01")
+var TestTable = config.NewDayTable(TestDay, "index_")
+
+func TsdbID(n int) tsdb.SingleTenantTSDBIdentifier {
+ return tsdb.SingleTenantTSDBIdentifier{
+ TS: time.Unix(int64(n), 0),
+ }
+}
+
+func GenMeta(min, max model.Fingerprint, sources []int, blocks []bloomshipper.BlockRef) bloomshipper.Meta {
+ m := bloomshipper.Meta{
+ MetaRef: bloomshipper.MetaRef{
+ Ref: bloomshipper.Ref{
+ TenantID: "fakeTenant",
+ TableName: TestTable.Addr(),
+ Bounds: v1.NewBounds(min, max),
+ },
+ },
+ Blocks: blocks,
+ }
+ for _, source := range sources {
+ m.Sources = append(m.Sources, TsdbID(source))
+ }
+ return m
+}
+
+func GenBlockRef(min, max model.Fingerprint) bloomshipper.BlockRef {
+ startTS, endTS := TestDay.Bounds()
+ return bloomshipper.BlockRef{
+ Ref: bloomshipper.Ref{
+ TenantID: "fakeTenant",
+ TableName: TestTable.Addr(),
+ Bounds: v1.NewBounds(min, max),
+ StartTimestamp: startTS,
+ EndTimestamp: endTS,
+ Checksum: 0,
+ },
+ }
+}
+
+func GenBlock(ref bloomshipper.BlockRef) (bloomshipper.Block, error) {
+ indexBuf := bytes.NewBuffer(nil)
+ bloomsBuf := bytes.NewBuffer(nil)
+ writer := v1.NewMemoryBlockWriter(indexBuf, bloomsBuf)
+ reader := v1.NewByteReader(indexBuf, bloomsBuf)
+
+ blockOpts := v1.NewBlockOptions(compression.None, 0, 0)
+
+ builder, err := v1.NewBlockBuilder(blockOpts, writer)
+ if err != nil {
+ return bloomshipper.Block{}, err
+ }
+
+ if _, err = builder.BuildFrom(v2.NewEmptyIter[v1.SeriesWithBlooms]()); err != nil {
+ return bloomshipper.Block{}, err
+ }
+
+ block := v1.NewBlock(reader, v1.NewMetrics(nil))
+
+ buf := bytes.NewBuffer(nil)
+ if err := v1.TarCompress(ref.Codec, buf, block.Reader()); err != nil {
+ return bloomshipper.Block{}, err
+ }
+
+ tarReader := bytes.NewReader(buf.Bytes())
+
+ return bloomshipper.Block{
+ BlockRef: ref,
+ Data: bloomshipper.ClosableReadSeekerAdapter{ReadSeeker: tarReader},
+ }, nil
+}
+
+func GenSeries(bounds v1.FingerprintBounds) []*v1.Series {
+ return GenSeriesWithStep(bounds, 1)
+}
+
+func GenSeriesWithStep(bounds v1.FingerprintBounds, step int) []*v1.Series {
+ series := make([]*v1.Series, 0, int(bounds.Max-bounds.Min+1)/step)
+ for i := bounds.Min; i <= bounds.Max; i += model.Fingerprint(step) {
+ series = append(series, &v1.Series{
+ Fingerprint: i,
+ Chunks: v1.ChunkRefs{
+ {
+ From: 0,
+ Through: 1,
+ Checksum: 1,
+ },
+ },
+ })
+ }
+ return series
+}
+
+func PutMetas(bloomClient bloomshipper.Client, metas []bloomshipper.Meta) error {
+ for _, meta := range metas {
+ err := bloomClient.PutMeta(context.Background(), meta)
+ if err != nil {
+ return err
+ }
+
+ for _, block := range meta.Blocks {
+ writtenBlock, err := GenBlock(block)
+ if err != nil {
+ return err
+ }
+
+ err = bloomClient.PutBlock(context.Background(), writtenBlock)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func ParseDayTime(s string) config.DayTime {
+ t, err := time.Parse("2006-01-02", s)
+ if err != nil {
+ panic(err)
+ }
+ return config.DayTime{
+ Time: model.TimeFromUnix(t.Unix()),
+ }
+}
diff --git a/pkg/bloombuild/planner/retention_test.go b/pkg/bloombuild/planner/retention_test.go
index 6738ac336e749..a309a7fc53013 100644
--- a/pkg/bloombuild/planner/retention_test.go
+++ b/pkg/bloombuild/planner/retention_test.go
@@ -11,6 +11,7 @@ import (
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
+ "github.com/grafana/loki/v3/pkg/bloombuild/planner/plannertest"
"github.com/grafana/loki/v3/pkg/storage"
v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1"
"github.com/grafana/loki/v3/pkg/storage/chunk/cache"
@@ -23,7 +24,7 @@ import (
"github.com/grafana/loki/v3/pkg/validation"
)
-var testTime = parseDayTime("2024-12-31").ModelTime()
+var testTime = plannertest.ParseDayTime("2024-12-31").ModelTime()
func TestRetention(t *testing.T) {
for _, tc := range []struct {
diff --git a/pkg/bloombuild/planner/strategies/chunksize.go b/pkg/bloombuild/planner/strategies/chunksize.go
new file mode 100644
index 0000000000000..21f473908dd99
--- /dev/null
+++ b/pkg/bloombuild/planner/strategies/chunksize.go
@@ -0,0 +1,286 @@
+package strategies
+
+import (
+ "context"
+ "fmt"
+ "math"
+ "sort"
+
+ "github.com/dustin/go-humanize"
+ "github.com/go-kit/log"
+ "github.com/go-kit/log/level"
+ "github.com/prometheus/common/model"
+ "github.com/prometheus/prometheus/model/labels"
+
+ "github.com/grafana/loki/v3/pkg/bloombuild/protos"
+ iter "github.com/grafana/loki/v3/pkg/iter/v2"
+ v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1"
+ "github.com/grafana/loki/v3/pkg/storage/config"
+ "github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper"
+ "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb"
+ "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb/index"
+)
+
+type ChunkSizeStrategyLimits interface {
+ BloomTaskTargetSeriesChunksSizeBytes(tenantID string) uint64
+}
+
+type ChunkSizeStrategy struct {
+ limits ChunkSizeStrategyLimits
+ logger log.Logger
+}
+
+func NewChunkSizeStrategy(
+ limits ChunkSizeStrategyLimits,
+ logger log.Logger,
+) (*ChunkSizeStrategy, error) {
+ return &ChunkSizeStrategy{
+ limits: limits,
+ logger: logger,
+ }, nil
+}
+
+func (s *ChunkSizeStrategy) Name() string {
+ return SplitBySeriesChunkSizeStrategyName
+}
+
+func (s *ChunkSizeStrategy) Plan(
+ ctx context.Context,
+ table config.DayTable,
+ tenant string,
+ tsdbs TSDBSet,
+ metas []bloomshipper.Meta,
+) ([]*protos.Task, error) {
+ targetTaskSize := s.limits.BloomTaskTargetSeriesChunksSizeBytes(tenant)
+
+ logger := log.With(s.logger, "table", table.Addr(), "tenant", tenant)
+ level.Debug(s.logger).Log("msg", "loading work for tenant", "target task size", humanize.Bytes(targetTaskSize))
+
+ // Determine which TSDBs have gaps and need to be processed.
+ tsdbsWithGaps, err := gapsBetweenTSDBsAndMetas(v1.NewBounds(0, math.MaxUint64), tsdbs, metas)
+ if err != nil {
+ level.Error(logger).Log("msg", "failed to find gaps", "err", err)
+ return nil, fmt.Errorf("failed to find gaps: %w", err)
+ }
+
+ if len(tsdbsWithGaps) == 0 {
+ level.Debug(logger).Log("msg", "blooms exist for all tsdbs")
+ return nil, nil
+ }
+
+ sizedIter, iterSize, err := s.sizedSeriesIter(ctx, tenant, tsdbsWithGaps, targetTaskSize)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get sized series iter: %w", err)
+ }
+
+ tasks := make([]*protos.Task, 0, iterSize)
+ for sizedIter.Next() {
+ series := sizedIter.At()
+ if series.Len() == 0 {
+ // This should never happen, but just in case.
+ level.Warn(logger).Log("msg", "got empty series batch", "tsdb", series.TSDB().Name())
+ continue
+ }
+
+ bounds := series.Bounds()
+
+ blocks, err := getBlocksMatchingBounds(metas, bounds)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get blocks matching bounds: %w", err)
+ }
+
+ planGap := protos.Gap{
+ Bounds: bounds,
+ Series: series.V1Series(),
+ Blocks: blocks,
+ }
+
+ tasks = append(tasks, protos.NewTask(table, tenant, bounds, series.TSDB(), []protos.Gap{planGap}))
+ }
+ if err := sizedIter.Err(); err != nil {
+ return nil, fmt.Errorf("failed to iterate over sized series: %w", err)
+ }
+
+ return tasks, nil
+}
+
+func getBlocksMatchingBounds(metas []bloomshipper.Meta, bounds v1.FingerprintBounds) ([]bloomshipper.BlockRef, error) {
+ blocks := make([]bloomshipper.BlockRef, 0, 10)
+
+ for _, meta := range metas {
+ if meta.Bounds.Intersection(bounds) == nil {
+ // this meta doesn't overlap the gap, skip
+ continue
+ }
+
+ for _, block := range meta.Blocks {
+ if block.Bounds.Intersection(bounds) == nil {
+ // this block doesn't overlap the gap, skip
+ continue
+ }
+ // this block overlaps the gap, add it to the plan
+ // for this gap
+ blocks = append(blocks, block)
+ }
+ }
+
+ // ensure we sort blocks so deduping iterator works as expected
+ sort.Slice(blocks, func(i, j int) bool {
+ return blocks[i].Bounds.Less(blocks[j].Bounds)
+ })
+
+ peekingBlocks := iter.NewPeekIter(
+ iter.NewSliceIter(
+ blocks,
+ ),
+ )
+
+ // dedupe blocks which could be in multiple metas
+ itr := iter.NewDedupingIter(
+ func(a, b bloomshipper.BlockRef) bool {
+ return a == b
+ },
+ iter.Identity[bloomshipper.BlockRef],
+ func(a, _ bloomshipper.BlockRef) bloomshipper.BlockRef {
+ return a
+ },
+ peekingBlocks,
+ )
+
+ deduped, err := iter.Collect(itr)
+ if err != nil {
+ return nil, fmt.Errorf("failed to dedupe blocks: %w", err)
+ }
+
+ return deduped, nil
+}
+
+type seriesWithChunks struct {
+ tsdb tsdb.SingleTenantTSDBIdentifier
+ fp model.Fingerprint
+ chunks []index.ChunkMeta
+}
+
+type seriesBatch struct {
+ series []seriesWithChunks
+ size uint64
+}
+
+func newSeriesBatch() seriesBatch {
+ return seriesBatch{
+ series: make([]seriesWithChunks, 0, 100),
+ }
+}
+
+func (b *seriesBatch) Bounds() v1.FingerprintBounds {
+ if len(b.series) == 0 {
+ return v1.NewBounds(0, 0)
+ }
+
+ // We assume that the series are sorted by fingerprint.
+ // This is guaranteed since series are iterated in order by the TSDB.
+ return v1.NewBounds(b.series[0].fp, b.series[len(b.series)-1].fp)
+}
+
+func (b *seriesBatch) V1Series() []*v1.Series {
+ series := make([]*v1.Series, 0, len(b.series))
+ for _, s := range b.series {
+ res := &v1.Series{
+ Fingerprint: s.fp,
+ Chunks: make(v1.ChunkRefs, 0, len(s.chunks)),
+ }
+ for _, chk := range s.chunks {
+ res.Chunks = append(res.Chunks, v1.ChunkRef{
+ From: model.Time(chk.MinTime),
+ Through: model.Time(chk.MaxTime),
+ Checksum: chk.Checksum,
+ })
+ }
+
+ series = append(series, res)
+ }
+
+ return series
+}
+
+func (b *seriesBatch) Append(s seriesWithChunks, size uint64) {
+ b.series = append(b.series, s)
+ b.size += size
+}
+
+func (b *seriesBatch) Len() int {
+ return len(b.series)
+}
+
+func (b *seriesBatch) Size() uint64 {
+ return b.size
+}
+
+func (b *seriesBatch) TSDB() tsdb.SingleTenantTSDBIdentifier {
+ if len(b.series) == 0 {
+ return tsdb.SingleTenantTSDBIdentifier{}
+ }
+ return b.series[0].tsdb
+}
+
+func (s *ChunkSizeStrategy) sizedSeriesIter(
+ ctx context.Context,
+ tenant string,
+ tsdbsWithGaps []tsdbGaps,
+ targetTaskSizeBytes uint64,
+) (iter.Iterator[seriesBatch], int, error) {
+ batches := make([]seriesBatch, 0, 100)
+ currentBatch := newSeriesBatch()
+
+ for _, idx := range tsdbsWithGaps {
+ for _, gap := range idx.gaps {
+ if err := idx.tsdb.ForSeries(
+ ctx,
+ tenant,
+ gap,
+ 0, math.MaxInt64,
+ func(_ labels.Labels, fp model.Fingerprint, chks []index.ChunkMeta) (stop bool) {
+ select {
+ case <-ctx.Done():
+ return true
+ default:
+ var seriesSize uint64
+ for _, chk := range chks {
+ seriesSize += uint64(chk.KB * 1024)
+ }
+
+ // Cut a new batch IF the current batch is not empty (so we add at least one series to the batch)
+ // AND Adding this series to the batch would exceed the target task size.
+ if currentBatch.Len() > 0 && currentBatch.Size()+seriesSize > targetTaskSizeBytes {
+ batches = append(batches, currentBatch)
+ currentBatch = newSeriesBatch()
+ }
+
+ currentBatch.Append(seriesWithChunks{
+ tsdb: idx.tsdbIdentifier,
+ fp: fp,
+ chunks: chks,
+ }, seriesSize)
+ return false
+ }
+ },
+ labels.MustNewMatcher(labels.MatchEqual, "", ""),
+ ); err != nil {
+ return nil, 0, err
+ }
+
+ // Add the last batch for this TSDB if it's not empty.
+ if currentBatch.Len() > 0 {
+ batches = append(batches, currentBatch)
+ currentBatch = newSeriesBatch()
+ }
+ }
+ }
+
+ select {
+ case <-ctx.Done():
+ return iter.NewEmptyIter[seriesBatch](), 0, ctx.Err()
+ default:
+ return iter.NewCancelableIter[seriesBatch](ctx, iter.NewSliceIter[seriesBatch](batches)), len(batches), nil
+ }
+}
diff --git a/pkg/bloombuild/planner/strategies/chunksize_test.go b/pkg/bloombuild/planner/strategies/chunksize_test.go
new file mode 100644
index 0000000000000..951d033e5c100
--- /dev/null
+++ b/pkg/bloombuild/planner/strategies/chunksize_test.go
@@ -0,0 +1,248 @@
+package strategies
+
+import (
+ "context"
+ "testing"
+
+ "github.com/go-kit/log"
+ "github.com/stretchr/testify/require"
+
+ "github.com/grafana/loki/v3/pkg/bloombuild/planner/plannertest"
+ "github.com/grafana/loki/v3/pkg/bloombuild/protos"
+ v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1"
+ "github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper"
+ "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb"
+)
+
+func taskForGap(tsdb tsdb.SingleTenantTSDBIdentifier, bounds v1.FingerprintBounds, blocks []bloomshipper.BlockRef) *protos.Task {
+ return protos.NewTask(plannertest.TestTable, "fake", bounds, tsdb, []protos.Gap{
+ {
+ Bounds: bounds,
+ Series: plannertest.GenSeriesWithStep(bounds, 10),
+ Blocks: blocks,
+ },
+ })
+}
+
+func Test_ChunkSizeStrategy_Plan(t *testing.T) {
+ for _, tc := range []struct {
+ name string
+ limits ChunkSizeStrategyLimits
+ originalMetas []bloomshipper.Meta
+ tsdbs TSDBSet
+ expectedTasks []*protos.Task
+ }{
+ {
+ name: "no previous blocks and metas",
+ limits: fakeChunkSizeLimits{TargetSize: 200 * 1 << 10}, // 2 series (100KB each) per task
+
+ // Each series will have 1 chunk of 100KB each
+ tsdbs: TSDBSet{
+ plannertest.TsdbID(0): newFakeForSeries(plannertest.GenSeriesWithStep(v1.NewBounds(0, 100), 10)), // 10 series
+ },
+
+ // We expect 5 tasks, each with 2 series each
+ expectedTasks: []*protos.Task{
+ taskForGap(plannertest.TsdbID(0), v1.NewBounds(0, 10), nil),
+ taskForGap(plannertest.TsdbID(0), v1.NewBounds(20, 30), nil),
+ taskForGap(plannertest.TsdbID(0), v1.NewBounds(40, 50), nil),
+ taskForGap(plannertest.TsdbID(0), v1.NewBounds(60, 70), nil),
+ taskForGap(plannertest.TsdbID(0), v1.NewBounds(80, 90), nil),
+ taskForGap(plannertest.TsdbID(0), v1.NewBounds(100, 100), nil),
+ },
+ },
+ {
+ name: "previous metas with no gaps",
+ limits: fakeChunkSizeLimits{TargetSize: 200 * 1 << 10},
+
+ // Original metas cover the entire range
+ // One meta for each 2 series w/ 1 block per series
+ originalMetas: []bloomshipper.Meta{
+ plannertest.GenMeta(0, 10, []int{0}, []bloomshipper.BlockRef{
+ plannertest.GenBlockRef(0, 0),
+ plannertest.GenBlockRef(10, 10),
+ }),
+ plannertest.GenMeta(20, 30, []int{0}, []bloomshipper.BlockRef{
+ plannertest.GenBlockRef(20, 20),
+ plannertest.GenBlockRef(30, 30),
+ }),
+ plannertest.GenMeta(40, 50, []int{0}, []bloomshipper.BlockRef{
+ plannertest.GenBlockRef(40, 40),
+ plannertest.GenBlockRef(50, 50),
+ }),
+ plannertest.GenMeta(60, 70, []int{0}, []bloomshipper.BlockRef{
+ plannertest.GenBlockRef(60, 60),
+ plannertest.GenBlockRef(70, 70),
+ }),
+ plannertest.GenMeta(80, 90, []int{0}, []bloomshipper.BlockRef{
+ plannertest.GenBlockRef(80, 80),
+ plannertest.GenBlockRef(90, 90),
+ }),
+ plannertest.GenMeta(100, 100, []int{0}, []bloomshipper.BlockRef{
+ plannertest.GenBlockRef(100, 100),
+ }),
+ },
+
+ tsdbs: TSDBSet{
+ plannertest.TsdbID(0): newFakeForSeries(plannertest.GenSeriesWithStep(v1.NewBounds(0, 100), 10)), // 10 series
+ },
+
+ // We expect no tasks
+ expectedTasks: []*protos.Task{},
+ },
+ {
+ name: "Original metas do not cover the entire range",
+ limits: fakeChunkSizeLimits{TargetSize: 200 * 1 << 10},
+
+ // Original metas cover only part of the range
+ // Original metas cover the entire range
+ // One meta for each 2 series w/ 1 block per series
+ originalMetas: []bloomshipper.Meta{
+ plannertest.GenMeta(0, 10, []int{0}, []bloomshipper.BlockRef{
+ plannertest.GenBlockRef(0, 0),
+ plannertest.GenBlockRef(10, 10),
+ }),
+ // Missing meta for 20-30
+ plannertest.GenMeta(40, 50, []int{0}, []bloomshipper.BlockRef{
+ plannertest.GenBlockRef(40, 40),
+ plannertest.GenBlockRef(50, 50),
+ }),
+ plannertest.GenMeta(60, 70, []int{0}, []bloomshipper.BlockRef{
+ plannertest.GenBlockRef(60, 60),
+ plannertest.GenBlockRef(70, 70),
+ }),
+ plannertest.GenMeta(80, 90, []int{0}, []bloomshipper.BlockRef{
+ plannertest.GenBlockRef(80, 80),
+ plannertest.GenBlockRef(90, 90),
+ }),
+ plannertest.GenMeta(100, 100, []int{0}, []bloomshipper.BlockRef{
+ plannertest.GenBlockRef(100, 100),
+ }),
+ },
+
+ tsdbs: TSDBSet{
+ plannertest.TsdbID(0): newFakeForSeries(plannertest.GenSeriesWithStep(v1.NewBounds(0, 100), 10)), // 10 series
+ },
+
+ // We expect 1 tasks for the missing series
+ expectedTasks: []*protos.Task{
+ taskForGap(plannertest.TsdbID(0), v1.NewBounds(20, 30), nil),
+ },
+ },
+ {
+ name: "All metas are outdated",
+ limits: fakeChunkSizeLimits{TargetSize: 200 * 1 << 10},
+
+ originalMetas: []bloomshipper.Meta{
+ plannertest.GenMeta(0, 100, []int{0}, []bloomshipper.BlockRef{
+ plannertest.GenBlockRef(0, 0),
+ plannertest.GenBlockRef(10, 10),
+ plannertest.GenBlockRef(20, 20),
+ plannertest.GenBlockRef(30, 30),
+ plannertest.GenBlockRef(40, 40),
+ plannertest.GenBlockRef(50, 50),
+ plannertest.GenBlockRef(60, 60),
+ plannertest.GenBlockRef(70, 70),
+ plannertest.GenBlockRef(80, 80),
+ plannertest.GenBlockRef(90, 90),
+ plannertest.GenBlockRef(100, 100),
+ }),
+ },
+
+ tsdbs: TSDBSet{
+ plannertest.TsdbID(1): newFakeForSeries(plannertest.GenSeriesWithStep(v1.NewBounds(0, 100), 10)), // 10 series
+ },
+
+ // We expect 5 tasks, each with 2 series each
+ expectedTasks: []*protos.Task{
+ taskForGap(plannertest.TsdbID(1), v1.NewBounds(0, 10), []bloomshipper.BlockRef{
+ plannertest.GenBlockRef(0, 0),
+ plannertest.GenBlockRef(10, 10),
+ }),
+ taskForGap(plannertest.TsdbID(1), v1.NewBounds(20, 30), []bloomshipper.BlockRef{
+ plannertest.GenBlockRef(20, 20),
+ plannertest.GenBlockRef(30, 30),
+ }),
+ taskForGap(plannertest.TsdbID(1), v1.NewBounds(40, 50), []bloomshipper.BlockRef{
+ plannertest.GenBlockRef(40, 40),
+ plannertest.GenBlockRef(50, 50),
+ }),
+ taskForGap(plannertest.TsdbID(1), v1.NewBounds(60, 70), []bloomshipper.BlockRef{
+ plannertest.GenBlockRef(60, 60),
+ plannertest.GenBlockRef(70, 70),
+ }),
+ taskForGap(plannertest.TsdbID(1), v1.NewBounds(80, 90), []bloomshipper.BlockRef{
+ plannertest.GenBlockRef(80, 80),
+ plannertest.GenBlockRef(90, 90),
+ }),
+ taskForGap(plannertest.TsdbID(1), v1.NewBounds(100, 100), []bloomshipper.BlockRef{
+ plannertest.GenBlockRef(100, 100),
+ }),
+ },
+ },
+ {
+ name: "Some metas are outdated",
+ limits: fakeChunkSizeLimits{TargetSize: 200 * 1 << 10},
+
+ originalMetas: []bloomshipper.Meta{
+ // Outdated meta
+ plannertest.GenMeta(0, 49, []int{0}, []bloomshipper.BlockRef{
+ plannertest.GenBlockRef(0, 0),
+ plannertest.GenBlockRef(10, 10),
+ plannertest.GenBlockRef(20, 20),
+ plannertest.GenBlockRef(30, 30),
+ plannertest.GenBlockRef(40, 40),
+ }),
+ // Updated meta
+ plannertest.GenMeta(50, 100, []int{1}, []bloomshipper.BlockRef{
+ plannertest.GenBlockRef(50, 50),
+ plannertest.GenBlockRef(60, 60),
+ plannertest.GenBlockRef(70, 70),
+ plannertest.GenBlockRef(80, 80),
+ plannertest.GenBlockRef(90, 90),
+ plannertest.GenBlockRef(100, 100),
+ }),
+ },
+
+ tsdbs: TSDBSet{
+ plannertest.TsdbID(1): newFakeForSeries(plannertest.GenSeriesWithStep(v1.NewBounds(0, 100), 10)), // 10 series
+ },
+
+ // We expect 5 tasks, each with 2 series each
+ expectedTasks: []*protos.Task{
+ taskForGap(plannertest.TsdbID(1), v1.NewBounds(0, 10), []bloomshipper.BlockRef{
+ plannertest.GenBlockRef(0, 0),
+ plannertest.GenBlockRef(10, 10),
+ }),
+ taskForGap(plannertest.TsdbID(1), v1.NewBounds(20, 30), []bloomshipper.BlockRef{
+ plannertest.GenBlockRef(20, 20),
+ plannertest.GenBlockRef(30, 30),
+ }),
+ taskForGap(plannertest.TsdbID(1), v1.NewBounds(40, 40), []bloomshipper.BlockRef{
+ plannertest.GenBlockRef(40, 40),
+ }),
+ },
+ },
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ logger := log.NewNopLogger()
+ //logger := log.NewLogfmtLogger(os.Stdout)
+
+ strategy, err := NewChunkSizeStrategy(tc.limits, logger)
+ require.NoError(t, err)
+
+ actual, err := strategy.Plan(context.Background(), plannertest.TestTable, "fake", tc.tsdbs, tc.originalMetas)
+ require.NoError(t, err)
+
+ require.ElementsMatch(t, tc.expectedTasks, actual)
+ })
+ }
+}
+
+type fakeChunkSizeLimits struct {
+ TargetSize uint64
+}
+
+func (f fakeChunkSizeLimits) BloomTaskTargetSeriesChunksSizeBytes(_ string) uint64 {
+ return f.TargetSize
+}
diff --git a/pkg/bloombuild/planner/strategies/factory.go b/pkg/bloombuild/planner/strategies/factory.go
new file mode 100644
index 0000000000000..f58f91e51708d
--- /dev/null
+++ b/pkg/bloombuild/planner/strategies/factory.go
@@ -0,0 +1,50 @@
+package strategies
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/go-kit/log"
+
+ "github.com/grafana/loki/v3/pkg/bloombuild/common"
+ "github.com/grafana/loki/v3/pkg/bloombuild/protos"
+ "github.com/grafana/loki/v3/pkg/storage/config"
+ "github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper"
+ "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb"
+)
+
+const (
+ SplitKeyspaceStrategyName = "split_keyspace_by_factor"
+ SplitBySeriesChunkSizeStrategyName = "split_by_series_chunks_size"
+)
+
+type Limits interface {
+ BloomPlanningStrategy(tenantID string) string
+ SplitKeyspaceStrategyLimits
+ ChunkSizeStrategyLimits
+}
+
+type TSDBSet = map[tsdb.SingleTenantTSDBIdentifier]common.ClosableForSeries
+
+type PlanningStrategy interface {
+ Name() string
+ // Plan returns a set of tasks for a given tenant-table tuple and TSDBs.
+ Plan(ctx context.Context, table config.DayTable, tenant string, tsdbs TSDBSet, metas []bloomshipper.Meta) ([]*protos.Task, error)
+}
+
+func NewStrategy(
+ tenantID string,
+ limits Limits,
+ logger log.Logger,
+) (PlanningStrategy, error) {
+ strategy := limits.BloomPlanningStrategy(tenantID)
+
+ switch strategy {
+ case SplitKeyspaceStrategyName:
+ return NewSplitKeyspaceStrategy(limits, logger)
+ case SplitBySeriesChunkSizeStrategyName:
+ return NewChunkSizeStrategy(limits, logger)
+ default:
+ return nil, fmt.Errorf("unknown bloom planning strategy (%s)", strategy)
+ }
+}
diff --git a/pkg/bloombuild/planner/strategies/splitkeyspace.go b/pkg/bloombuild/planner/strategies/splitkeyspace.go
new file mode 100644
index 0000000000000..2e799d1ed4903
--- /dev/null
+++ b/pkg/bloombuild/planner/strategies/splitkeyspace.go
@@ -0,0 +1,207 @@
+package strategies
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/go-kit/log"
+ "github.com/go-kit/log/level"
+
+ "github.com/grafana/loki/v3/pkg/bloombuild/common"
+ "github.com/grafana/loki/v3/pkg/bloombuild/protos"
+ iter "github.com/grafana/loki/v3/pkg/iter/v2"
+ v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1"
+ "github.com/grafana/loki/v3/pkg/storage/config"
+ "github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper"
+ "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb"
+)
+
+type SplitKeyspaceStrategyLimits interface {
+ BloomSplitSeriesKeyspaceBy(tenantID string) int
+}
+
+type SplitKeyspaceStrategy struct {
+ limits SplitKeyspaceStrategyLimits
+ logger log.Logger
+}
+
+func NewSplitKeyspaceStrategy(
+ limits SplitKeyspaceStrategyLimits,
+ logger log.Logger,
+) (*SplitKeyspaceStrategy, error) {
+ return &SplitKeyspaceStrategy{
+ limits: limits,
+ logger: logger,
+ }, nil
+}
+
+func (s *SplitKeyspaceStrategy) Name() string {
+ return SplitKeyspaceStrategyName
+}
+
+func (s *SplitKeyspaceStrategy) Plan(
+ ctx context.Context,
+ table config.DayTable,
+ tenant string,
+ tsdbs TSDBSet,
+ metas []bloomshipper.Meta,
+) ([]*protos.Task, error) {
+ splitFactor := s.limits.BloomSplitSeriesKeyspaceBy(tenant)
+ ownershipRanges := SplitFingerprintKeyspaceByFactor(splitFactor)
+
+ logger := log.With(s.logger, "table", table.Addr(), "tenant", tenant)
+ level.Debug(s.logger).Log("msg", "loading work for tenant", "splitFactor", splitFactor)
+
+ var tasks []*protos.Task
+ for _, ownershipRange := range ownershipRanges {
+ logger := log.With(logger, "ownership", ownershipRange.String())
+
+ // Filter only the metas that overlap in the ownership range
+ metasInBounds := bloomshipper.FilterMetasOverlappingBounds(metas, ownershipRange)
+
+ // Find gaps in the TSDBs for this tenant/table
+ gaps, err := s.findOutdatedGaps(ctx, tenant, tsdbs, ownershipRange, metasInBounds, logger)
+ if err != nil {
+ level.Error(logger).Log("msg", "failed to find outdated gaps", "err", err)
+ continue
+ }
+
+ for _, gap := range gaps {
+ tasks = append(tasks, protos.NewTask(table, tenant, ownershipRange, gap.tsdb, gap.gaps))
+ }
+ }
+
+ return tasks, nil
+}
+
+// blockPlan is a plan for all the work needed to build a meta.json
+// It includes:
+// - the tsdb (source of truth) which contains all the series+chunks
+// we need to ensure are indexed in bloom blocks
+// - a list of gaps that are out of date and need to be checked+built
+// - within each gap, a list of block refs which overlap the gap are included
+// so we can use them to accelerate bloom generation. They likely contain many
+// of the same chunks we need to ensure are indexed, just from previous tsdb iterations.
+// This is a performance optimization to avoid expensive re-reindexing
+type blockPlan struct {
+ tsdb tsdb.SingleTenantTSDBIdentifier
+ gaps []protos.Gap
+}
+
+func (s *SplitKeyspaceStrategy) findOutdatedGaps(
+ ctx context.Context,
+ tenant string,
+ tsdbs map[tsdb.SingleTenantTSDBIdentifier]common.ClosableForSeries,
+ ownershipRange v1.FingerprintBounds,
+ metas []bloomshipper.Meta,
+ logger log.Logger,
+) ([]blockPlan, error) {
+ // Determine which TSDBs have gaps in the ownership range and need to
+ // be processed.
+ tsdbsWithGaps, err := gapsBetweenTSDBsAndMetas(ownershipRange, tsdbs, metas)
+ if err != nil {
+ level.Error(logger).Log("msg", "failed to find gaps", "err", err)
+ return nil, fmt.Errorf("failed to find gaps: %w", err)
+ }
+
+ if len(tsdbsWithGaps) == 0 {
+ level.Debug(logger).Log("msg", "blooms exist for all tsdbs")
+ return nil, nil
+ }
+
+ work, err := blockPlansForGaps(ctx, tenant, tsdbsWithGaps, metas)
+ if err != nil {
+ level.Error(logger).Log("msg", "failed to create plan", "err", err)
+ return nil, fmt.Errorf("failed to create plan: %w", err)
+ }
+
+ return work, nil
+}
+
+// Used to signal the gaps that need to be populated for a tsdb
+type tsdbGaps struct {
+ tsdbIdentifier tsdb.SingleTenantTSDBIdentifier
+ tsdb common.ClosableForSeries
+ gaps []v1.FingerprintBounds
+}
+
+// gapsBetweenTSDBsAndMetas returns if the metas are up-to-date with the TSDBs. This is determined by asserting
+// that for each TSDB, there are metas covering the entire ownership range which were generated from that specific TSDB.
+func gapsBetweenTSDBsAndMetas(
+ ownershipRange v1.FingerprintBounds,
+ tsdbs map[tsdb.SingleTenantTSDBIdentifier]common.ClosableForSeries,
+ metas []bloomshipper.Meta,
+) (res []tsdbGaps, err error) {
+ for db, tsdb := range tsdbs {
+ id := db.Name()
+
+ relevantMetas := make([]v1.FingerprintBounds, 0, len(metas))
+ for _, meta := range metas {
+ for _, s := range meta.Sources {
+ if s.Name() == id {
+ relevantMetas = append(relevantMetas, meta.Bounds)
+ }
+ }
+ }
+
+ gaps, err := FindGapsInFingerprintBounds(ownershipRange, relevantMetas)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(gaps) > 0 {
+ res = append(res, tsdbGaps{
+ tsdbIdentifier: db,
+ tsdb: tsdb,
+ gaps: gaps,
+ })
+ }
+ }
+
+ return res, err
+}
+
+// blockPlansForGaps groups tsdb gaps we wish to fill with overlapping but out of date blocks.
+// This allows us to expedite bloom generation by using existing blocks to fill in the gaps
+// since many will contain the same chunks.
+func blockPlansForGaps(
+ ctx context.Context,
+ tenant string,
+ tsdbs []tsdbGaps,
+ metas []bloomshipper.Meta,
+) ([]blockPlan, error) {
+ plans := make([]blockPlan, 0, len(tsdbs))
+
+ for _, idx := range tsdbs {
+ plan := blockPlan{
+ tsdb: idx.tsdbIdentifier,
+ gaps: make([]protos.Gap, 0, len(idx.gaps)),
+ }
+
+ for _, gap := range idx.gaps {
+ planGap := protos.Gap{
+ Bounds: gap,
+ }
+
+ seriesItr, err := common.NewTSDBSeriesIter(ctx, tenant, idx.tsdb, gap)
+ if err != nil {
+ return nil, fmt.Errorf("failed to load series from TSDB for gap (%s): %w", gap.String(), err)
+ }
+ planGap.Series, err = iter.Collect(seriesItr)
+ if err != nil {
+ return nil, fmt.Errorf("failed to collect series: %w", err)
+ }
+
+ planGap.Blocks, err = getBlocksMatchingBounds(metas, gap)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get blocks matching bounds: %w", err)
+ }
+
+ plan.gaps = append(plan.gaps, planGap)
+ }
+
+ plans = append(plans, plan)
+ }
+
+ return plans, nil
+}
diff --git a/pkg/bloombuild/planner/strategies/splitkeyspace_test.go b/pkg/bloombuild/planner/strategies/splitkeyspace_test.go
new file mode 100644
index 0000000000000..18480d74c98fc
--- /dev/null
+++ b/pkg/bloombuild/planner/strategies/splitkeyspace_test.go
@@ -0,0 +1,364 @@
+package strategies
+
+import (
+ "context"
+ "testing"
+
+ "github.com/prometheus/common/model"
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/stretchr/testify/require"
+
+ "github.com/grafana/loki/v3/pkg/bloombuild/common"
+ "github.com/grafana/loki/v3/pkg/bloombuild/planner/plannertest"
+ "github.com/grafana/loki/v3/pkg/bloombuild/protos"
+ v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1"
+ "github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper"
+ "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb"
+ "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb/index"
+)
+
+func Test_gapsBetweenTSDBsAndMetas(t *testing.T) {
+ for _, tc := range []struct {
+ desc string
+ err bool
+ exp []tsdbGaps
+ ownershipRange v1.FingerprintBounds
+ tsdbs map[tsdb.SingleTenantTSDBIdentifier]common.ClosableForSeries
+ metas []bloomshipper.Meta
+ }{
+ {
+ desc: "non-overlapping tsdbs and metas",
+ err: true,
+ ownershipRange: v1.NewBounds(0, 10),
+ tsdbs: map[tsdb.SingleTenantTSDBIdentifier]common.ClosableForSeries{
+ plannertest.TsdbID(0): nil,
+ },
+ metas: []bloomshipper.Meta{
+ plannertest.GenMeta(11, 20, []int{0}, nil),
+ },
+ },
+ {
+ desc: "single tsdb",
+ ownershipRange: v1.NewBounds(0, 10),
+ tsdbs: map[tsdb.SingleTenantTSDBIdentifier]common.ClosableForSeries{
+ plannertest.TsdbID(0): nil,
+ },
+ metas: []bloomshipper.Meta{
+ plannertest.GenMeta(4, 8, []int{0}, nil),
+ },
+ exp: []tsdbGaps{
+ {
+ tsdbIdentifier: plannertest.TsdbID(0),
+ gaps: []v1.FingerprintBounds{
+ v1.NewBounds(0, 3),
+ v1.NewBounds(9, 10),
+ },
+ },
+ },
+ },
+ {
+ desc: "multiple tsdbs with separate blocks",
+ ownershipRange: v1.NewBounds(0, 10),
+ tsdbs: map[tsdb.SingleTenantTSDBIdentifier]common.ClosableForSeries{
+ plannertest.TsdbID(0): nil,
+ plannertest.TsdbID(1): nil,
+ },
+ metas: []bloomshipper.Meta{
+ plannertest.GenMeta(0, 5, []int{0}, nil),
+ plannertest.GenMeta(6, 10, []int{1}, nil),
+ },
+ exp: []tsdbGaps{
+ {
+ tsdbIdentifier: plannertest.TsdbID(0),
+ gaps: []v1.FingerprintBounds{
+ v1.NewBounds(6, 10),
+ },
+ },
+ {
+ tsdbIdentifier: plannertest.TsdbID(1),
+ gaps: []v1.FingerprintBounds{
+ v1.NewBounds(0, 5),
+ },
+ },
+ },
+ },
+ {
+ desc: "multiple tsdbs with the same blocks",
+ ownershipRange: v1.NewBounds(0, 10),
+ tsdbs: map[tsdb.SingleTenantTSDBIdentifier]common.ClosableForSeries{
+ plannertest.TsdbID(0): nil,
+ plannertest.TsdbID(1): nil,
+ },
+ metas: []bloomshipper.Meta{
+ plannertest.GenMeta(0, 5, []int{0, 1}, nil),
+ plannertest.GenMeta(6, 8, []int{1}, nil),
+ },
+ exp: []tsdbGaps{
+ {
+ tsdbIdentifier: plannertest.TsdbID(0),
+ gaps: []v1.FingerprintBounds{
+ v1.NewBounds(6, 10),
+ },
+ },
+ {
+ tsdbIdentifier: plannertest.TsdbID(1),
+ gaps: []v1.FingerprintBounds{
+ v1.NewBounds(9, 10),
+ },
+ },
+ },
+ },
+ } {
+ t.Run(tc.desc, func(t *testing.T) {
+ gaps, err := gapsBetweenTSDBsAndMetas(tc.ownershipRange, tc.tsdbs, tc.metas)
+ if tc.err {
+ require.Error(t, err)
+ return
+ }
+ require.ElementsMatch(t, tc.exp, gaps)
+ })
+ }
+}
+
+func Test_blockPlansForGaps(t *testing.T) {
+ for _, tc := range []struct {
+ desc string
+ ownershipRange v1.FingerprintBounds
+ tsdbs []tsdb.SingleTenantTSDBIdentifier
+ metas []bloomshipper.Meta
+ err bool
+ exp []blockPlan
+ }{
+ {
+ desc: "single overlapping meta+no overlapping block",
+ ownershipRange: v1.NewBounds(0, 10),
+ tsdbs: []tsdb.SingleTenantTSDBIdentifier{plannertest.TsdbID(0)},
+ metas: []bloomshipper.Meta{
+ plannertest.GenMeta(5, 20, []int{1}, []bloomshipper.BlockRef{plannertest.GenBlockRef(11, 20)}),
+ },
+ exp: []blockPlan{
+ {
+ tsdb: plannertest.TsdbID(0),
+ gaps: []protos.Gap{
+ {
+ Bounds: v1.NewBounds(0, 10),
+ Series: plannertest.GenSeries(v1.NewBounds(0, 10)),
+ },
+ },
+ },
+ },
+ },
+ {
+ desc: "single overlapping meta+one overlapping block",
+ ownershipRange: v1.NewBounds(0, 10),
+ tsdbs: []tsdb.SingleTenantTSDBIdentifier{plannertest.TsdbID(0)},
+ metas: []bloomshipper.Meta{
+ plannertest.GenMeta(5, 20, []int{1}, []bloomshipper.BlockRef{plannertest.GenBlockRef(9, 20)}),
+ },
+ exp: []blockPlan{
+ {
+ tsdb: plannertest.TsdbID(0),
+ gaps: []protos.Gap{
+ {
+ Bounds: v1.NewBounds(0, 10),
+ Series: plannertest.GenSeries(v1.NewBounds(0, 10)),
+ Blocks: []bloomshipper.BlockRef{plannertest.GenBlockRef(9, 20)},
+ },
+ },
+ },
+ },
+ },
+ {
+ // the range which needs to be generated doesn't overlap with existing blocks
+ // from other tsdb versions since theres an up to date tsdb version block,
+ // but we can trim the range needing generation
+ desc: "trims up to date area",
+ ownershipRange: v1.NewBounds(0, 10),
+ tsdbs: []tsdb.SingleTenantTSDBIdentifier{plannertest.TsdbID(0)},
+ metas: []bloomshipper.Meta{
+ plannertest.GenMeta(9, 20, []int{0}, []bloomshipper.BlockRef{plannertest.GenBlockRef(9, 20)}), // block for same tsdb
+ plannertest.GenMeta(9, 20, []int{1}, []bloomshipper.BlockRef{plannertest.GenBlockRef(9, 20)}), // block for different tsdb
+ },
+ exp: []blockPlan{
+ {
+ tsdb: plannertest.TsdbID(0),
+ gaps: []protos.Gap{
+ {
+ Bounds: v1.NewBounds(0, 8),
+ Series: plannertest.GenSeries(v1.NewBounds(0, 8)),
+ },
+ },
+ },
+ },
+ },
+ {
+ desc: "uses old block for overlapping range",
+ ownershipRange: v1.NewBounds(0, 10),
+ tsdbs: []tsdb.SingleTenantTSDBIdentifier{plannertest.TsdbID(0)},
+ metas: []bloomshipper.Meta{
+ plannertest.GenMeta(9, 20, []int{0}, []bloomshipper.BlockRef{plannertest.GenBlockRef(9, 20)}), // block for same tsdb
+ plannertest.GenMeta(5, 20, []int{1}, []bloomshipper.BlockRef{plannertest.GenBlockRef(5, 20)}), // block for different tsdb
+ },
+ exp: []blockPlan{
+ {
+ tsdb: plannertest.TsdbID(0),
+ gaps: []protos.Gap{
+ {
+ Bounds: v1.NewBounds(0, 8),
+ Series: plannertest.GenSeries(v1.NewBounds(0, 8)),
+ Blocks: []bloomshipper.BlockRef{plannertest.GenBlockRef(5, 20)},
+ },
+ },
+ },
+ },
+ },
+ {
+ desc: "multi case",
+ ownershipRange: v1.NewBounds(0, 10),
+ tsdbs: []tsdb.SingleTenantTSDBIdentifier{plannertest.TsdbID(0), plannertest.TsdbID(1)}, // generate for both tsdbs
+ metas: []bloomshipper.Meta{
+ plannertest.GenMeta(0, 2, []int{0}, []bloomshipper.BlockRef{
+ plannertest.GenBlockRef(0, 1),
+ plannertest.GenBlockRef(1, 2),
+ }), // tsdb_0
+ plannertest.GenMeta(6, 8, []int{0}, []bloomshipper.BlockRef{plannertest.GenBlockRef(6, 8)}), // tsdb_0
+
+ plannertest.GenMeta(3, 5, []int{1}, []bloomshipper.BlockRef{plannertest.GenBlockRef(3, 5)}), // tsdb_1
+ plannertest.GenMeta(8, 10, []int{1}, []bloomshipper.BlockRef{plannertest.GenBlockRef(8, 10)}), // tsdb_1
+ },
+ exp: []blockPlan{
+ {
+ tsdb: plannertest.TsdbID(0),
+ gaps: []protos.Gap{
+ // tsdb (id=0) can source chunks from the blocks built from tsdb (id=1)
+ {
+ Bounds: v1.NewBounds(3, 5),
+ Series: plannertest.GenSeries(v1.NewBounds(3, 5)),
+ Blocks: []bloomshipper.BlockRef{plannertest.GenBlockRef(3, 5)},
+ },
+ {
+ Bounds: v1.NewBounds(9, 10),
+ Series: plannertest.GenSeries(v1.NewBounds(9, 10)),
+ Blocks: []bloomshipper.BlockRef{plannertest.GenBlockRef(8, 10)},
+ },
+ },
+ },
+ // tsdb (id=1) can source chunks from the blocks built from tsdb (id=0)
+ {
+ tsdb: plannertest.TsdbID(1),
+ gaps: []protos.Gap{
+ {
+ Bounds: v1.NewBounds(0, 2),
+ Series: plannertest.GenSeries(v1.NewBounds(0, 2)),
+ Blocks: []bloomshipper.BlockRef{
+ plannertest.GenBlockRef(0, 1),
+ plannertest.GenBlockRef(1, 2),
+ },
+ },
+ {
+ Bounds: v1.NewBounds(6, 7),
+ Series: plannertest.GenSeries(v1.NewBounds(6, 7)),
+ Blocks: []bloomshipper.BlockRef{plannertest.GenBlockRef(6, 8)},
+ },
+ },
+ },
+ },
+ },
+ {
+ desc: "dedupes block refs",
+ ownershipRange: v1.NewBounds(0, 10),
+ tsdbs: []tsdb.SingleTenantTSDBIdentifier{plannertest.TsdbID(0)},
+ metas: []bloomshipper.Meta{
+ plannertest.GenMeta(9, 20, []int{1}, []bloomshipper.BlockRef{
+ plannertest.GenBlockRef(1, 4),
+ plannertest.GenBlockRef(9, 20),
+ }), // blocks for first diff tsdb
+ plannertest.GenMeta(5, 20, []int{2}, []bloomshipper.BlockRef{
+ plannertest.GenBlockRef(5, 10),
+ plannertest.GenBlockRef(9, 20), // same block references in prior meta (will be deduped)
+ }), // block for second diff tsdb
+ },
+ exp: []blockPlan{
+ {
+ tsdb: plannertest.TsdbID(0),
+ gaps: []protos.Gap{
+ {
+ Bounds: v1.NewBounds(0, 10),
+ Series: plannertest.GenSeries(v1.NewBounds(0, 10)),
+ Blocks: []bloomshipper.BlockRef{
+ plannertest.GenBlockRef(1, 4),
+ plannertest.GenBlockRef(5, 10),
+ plannertest.GenBlockRef(9, 20),
+ },
+ },
+ },
+ },
+ },
+ },
+ } {
+ t.Run(tc.desc, func(t *testing.T) {
+ // We add series spanning the whole FP ownership range
+ tsdbs := make(map[tsdb.SingleTenantTSDBIdentifier]common.ClosableForSeries)
+ for _, id := range tc.tsdbs {
+ tsdbs[id] = newFakeForSeries(plannertest.GenSeries(tc.ownershipRange))
+ }
+
+ // we reuse the gapsBetweenTSDBsAndMetas function to generate the gaps as this function is tested
+ // separately and it's used to generate input in our regular code path (easier to write tests this way).
+ gaps, err := gapsBetweenTSDBsAndMetas(tc.ownershipRange, tsdbs, tc.metas)
+ require.NoError(t, err)
+
+ plans, err := blockPlansForGaps(
+ context.Background(),
+ "fakeTenant",
+ gaps,
+ tc.metas,
+ )
+ if tc.err {
+ require.Error(t, err)
+ return
+ }
+ require.ElementsMatch(t, tc.exp, plans)
+ })
+ }
+}
+
+type fakeForSeries struct {
+ series []*v1.Series
+}
+
+func newFakeForSeries(series []*v1.Series) *fakeForSeries {
+ return &fakeForSeries{
+ series: series,
+ }
+}
+
+func (f fakeForSeries) ForSeries(_ context.Context, _ string, ff index.FingerprintFilter, _ model.Time, _ model.Time, fn func(labels.Labels, model.Fingerprint, []index.ChunkMeta) (stop bool), _ ...*labels.Matcher) error {
+ overlapping := make([]*v1.Series, 0, len(f.series))
+ for _, s := range f.series {
+ if ff.Match(s.Fingerprint) {
+ overlapping = append(overlapping, s)
+ }
+ }
+
+ for _, s := range overlapping {
+ chunks := make([]index.ChunkMeta, 0, len(s.Chunks))
+ for _, c := range s.Chunks {
+ chunks = append(chunks, index.ChunkMeta{
+ MinTime: int64(c.From),
+ MaxTime: int64(c.Through),
+ Checksum: c.Checksum,
+ KB: 100,
+ })
+ }
+
+ if fn(labels.EmptyLabels(), s.Fingerprint, chunks) {
+ break
+ }
+ }
+ return nil
+}
+
+func (f fakeForSeries) Close() error {
+ return nil
+}
diff --git a/pkg/bloombuild/planner/util.go b/pkg/bloombuild/planner/strategies/util.go
similarity index 99%
rename from pkg/bloombuild/planner/util.go
rename to pkg/bloombuild/planner/strategies/util.go
index f9a97587f802f..31ce42be154a3 100644
--- a/pkg/bloombuild/planner/util.go
+++ b/pkg/bloombuild/planner/strategies/util.go
@@ -1,4 +1,4 @@
-package planner
+package strategies
import (
"fmt"
diff --git a/pkg/bloombuild/planner/util_test.go b/pkg/bloombuild/planner/util_test.go
deleted file mode 100644
index 6755478ef7290..0000000000000
--- a/pkg/bloombuild/planner/util_test.go
+++ /dev/null
@@ -1,172 +0,0 @@
-package planner
-
-import (
- "math"
- "testing"
-
- "github.com/prometheus/common/model"
- "github.com/stretchr/testify/require"
-
- v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1"
-)
-
-func TestSplitFingerprintKeyspaceByFactor(t *testing.T) {
- for _, tt := range []struct {
- name string
- factor int
- }{
- {
- name: "Factor is 0",
- factor: 0,
- },
- {
- name: "Factor is 1",
- factor: 1,
- },
- {
- name: "Factor is 256",
- factor: 256,
- },
- } {
- t.Run(tt.name, func(t *testing.T) {
- got := SplitFingerprintKeyspaceByFactor(tt.factor)
-
- if tt.factor == 0 {
- require.Empty(t, got)
- return
- }
-
- // Check overall min and max values of the ranges.
- require.Equal(t, model.Fingerprint(math.MaxUint64), got[len(got)-1].Max)
- require.Equal(t, model.Fingerprint(0), got[0].Min)
-
- // For each range, check that the max value of the previous range is one less than the min value of the current range.
- for i := 1; i < len(got); i++ {
- require.Equal(t, got[i-1].Max+1, got[i].Min)
- }
- })
- }
-}
-
-func Test_FindGapsInFingerprintBounds(t *testing.T) {
- for _, tc := range []struct {
- desc string
- err bool
- exp []v1.FingerprintBounds
- ownershipRange v1.FingerprintBounds
- metas []v1.FingerprintBounds
- }{
- {
- desc: "error nonoverlapping metas",
- err: true,
- exp: nil,
- ownershipRange: v1.NewBounds(0, 10),
- metas: []v1.FingerprintBounds{v1.NewBounds(11, 20)},
- },
- {
- desc: "one meta with entire ownership range",
- err: false,
- exp: nil,
- ownershipRange: v1.NewBounds(0, 10),
- metas: []v1.FingerprintBounds{v1.NewBounds(0, 10)},
- },
- {
- desc: "two non-overlapping metas with entire ownership range",
- err: false,
- exp: nil,
- ownershipRange: v1.NewBounds(0, 10),
- metas: []v1.FingerprintBounds{
- v1.NewBounds(0, 5),
- v1.NewBounds(6, 10),
- },
- },
- {
- desc: "two overlapping metas with entire ownership range",
- err: false,
- exp: nil,
- ownershipRange: v1.NewBounds(0, 10),
- metas: []v1.FingerprintBounds{
- v1.NewBounds(0, 6),
- v1.NewBounds(4, 10),
- },
- },
- {
- desc: "one meta with partial ownership range",
- err: false,
- exp: []v1.FingerprintBounds{
- v1.NewBounds(6, 10),
- },
- ownershipRange: v1.NewBounds(0, 10),
- metas: []v1.FingerprintBounds{
- v1.NewBounds(0, 5),
- },
- },
- {
- desc: "smaller subsequent meta with partial ownership range",
- err: false,
- exp: []v1.FingerprintBounds{
- v1.NewBounds(8, 10),
- },
- ownershipRange: v1.NewBounds(0, 10),
- metas: []v1.FingerprintBounds{
- v1.NewBounds(0, 7),
- v1.NewBounds(3, 4),
- },
- },
- {
- desc: "hole in the middle",
- err: false,
- exp: []v1.FingerprintBounds{
- v1.NewBounds(4, 5),
- },
- ownershipRange: v1.NewBounds(0, 10),
- metas: []v1.FingerprintBounds{
- v1.NewBounds(0, 3),
- v1.NewBounds(6, 10),
- },
- },
- {
- desc: "holes on either end",
- err: false,
- exp: []v1.FingerprintBounds{
- v1.NewBounds(0, 2),
- v1.NewBounds(8, 10),
- },
- ownershipRange: v1.NewBounds(0, 10),
- metas: []v1.FingerprintBounds{
- v1.NewBounds(3, 5),
- v1.NewBounds(6, 7),
- },
- },
- {
- desc: "full ownership range with single meta",
- err: false,
- exp: nil,
- ownershipRange: v1.NewBounds(0, math.MaxUint64),
- metas: []v1.FingerprintBounds{
- v1.NewBounds(0, math.MaxUint64),
- },
- },
- {
- desc: "full ownership range with multiple metas",
- err: false,
- exp: nil,
- ownershipRange: v1.NewBounds(0, math.MaxUint64),
- // Three metas covering the whole 0 - MaxUint64
- metas: []v1.FingerprintBounds{
- v1.NewBounds(0, math.MaxUint64/3),
- v1.NewBounds(math.MaxUint64/3+1, math.MaxUint64/2),
- v1.NewBounds(math.MaxUint64/2+1, math.MaxUint64),
- },
- },
- } {
- t.Run(tc.desc, func(t *testing.T) {
- gaps, err := FindGapsInFingerprintBounds(tc.ownershipRange, tc.metas)
- if tc.err {
- require.Error(t, err)
- return
- }
- require.Equal(t, tc.exp, gaps)
- })
- }
-}
diff --git a/pkg/bloombuild/protos/compat.go b/pkg/bloombuild/protos/compat.go
index 468278e77dbea..7c910d405ad9b 100644
--- a/pkg/bloombuild/protos/compat.go
+++ b/pkg/bloombuild/protos/compat.go
@@ -249,7 +249,7 @@ func (r *TaskResult) ToProtoTaskResult() *ProtoTaskResult {
}
protoMetas = append(protoMetas, &ProtoMeta{
- MetaRef: meta.MetaRef.String(),
+ MetaRef: meta.String(),
SourcesTSDBs: metaRefs,
BlockRefs: blockRefs,
})
diff --git a/pkg/bloomgateway/bloomgateway.go b/pkg/bloomgateway/bloomgateway.go
index 3c42f68ef0ddf..c221af9e6d34c 100644
--- a/pkg/bloomgateway/bloomgateway.go
+++ b/pkg/bloomgateway/bloomgateway.go
@@ -200,9 +200,7 @@ func (g *Gateway) FilterChunkRefs(ctx context.Context, req *logproto.FilterChunk
// Shortcut if request does not contain filters
if len(matchers) == 0 {
stats.Status = labelSuccess
- return &logproto.FilterChunkRefResponse{
- ChunkRefs: req.Refs,
- }, nil
+ return &logproto.FilterChunkRefResponse{ChunkRefs: req.Refs}, nil
}
blocks := make([]bloomshipper.BlockRef, 0, len(req.Blocks))
@@ -218,9 +216,7 @@ func (g *Gateway) FilterChunkRefs(ctx context.Context, req *logproto.FilterChunk
// Shortcut if request does not contain blocks
if len(blocks) == 0 {
stats.Status = labelSuccess
- return &logproto.FilterChunkRefResponse{
- ChunkRefs: req.Refs,
- }, nil
+ return &logproto.FilterChunkRefResponse{ChunkRefs: req.Refs}, nil
}
seriesByDay := partitionRequest(req)
@@ -233,6 +229,14 @@ func (g *Gateway) FilterChunkRefs(ctx context.Context, req *logproto.FilterChunk
"series_requested", len(req.Refs),
)
+ // len(seriesByDay) should never be 0
+ // Not sure how this can happen, but there was a bug report
+ // https://github.com/grafana/loki/issues/14623
+ if len(seriesByDay) == 0 {
+ stats.Status = labelSuccess
+ return &logproto.FilterChunkRefResponse{ChunkRefs: req.Refs}, nil
+ }
+
if len(seriesByDay) > 1 {
stats.Status = labelFailure
return nil, errors.New("request time range must span exactly one day")
@@ -352,7 +356,7 @@ func filterChunkRefs(req *logproto.FilterChunkRefRequest, responses []v1.Output)
// dedupe outputs, merging the same series.
// This returns an Iterator[v1.Output]
- dedupedResps := iter.NewDedupingIter[v1.Output, v1.Output](
+ dedupedResps := iter.NewDedupingIter(
// eq
func(o1, o2 v1.Output) bool {
return o1.Fp == o2.Fp
diff --git a/pkg/bloomgateway/client.go b/pkg/bloomgateway/client.go
index a873d04960b47..9163e91e8756e 100644
--- a/pkg/bloomgateway/client.go
+++ b/pkg/bloomgateway/client.go
@@ -204,7 +204,7 @@ func (c *GatewayClient) Close() {
c.dnsProvider.Stop()
}
-// FilterChunkRefs implements Client
+// FilterChunks implements Client
func (c *GatewayClient) FilterChunks(ctx context.Context, _ string, interval bloomshipper.Interval, blocks []blockWithSeries, plan plan.QueryPlan) ([]*logproto.GroupedChunkRefs, error) {
// no block and therefore no series with chunks
if len(blocks) == 0 {
@@ -301,12 +301,12 @@ func mergeSeries(input [][]*logproto.GroupedChunkRefs, buf []*logproto.GroupedCh
iters = append(iters, iter.NewPeekIter(iter.NewSliceIter(inp)))
}
- heapIter := v1.NewHeapIterator[*logproto.GroupedChunkRefs](
+ heapIter := v1.NewHeapIterator(
func(a, b *logproto.GroupedChunkRefs) bool { return a.Fingerprint < b.Fingerprint },
iters...,
)
- dedupeIter := iter.NewDedupingIter[*logproto.GroupedChunkRefs, *logproto.GroupedChunkRefs](
+ dedupeIter := iter.NewDedupingIter(
// eq
func(a, b *logproto.GroupedChunkRefs) bool { return a.Fingerprint == b.Fingerprint },
// from
diff --git a/pkg/bloomgateway/client_pool.go b/pkg/bloomgateway/client_pool.go
index 4b45292bef889..ebf2da9f514c4 100644
--- a/pkg/bloomgateway/client_pool.go
+++ b/pkg/bloomgateway/client_pool.go
@@ -54,7 +54,7 @@ type AddressProvider interface {
}
func NewJumpHashClientPool(clientFactory ClientFactory, dnsProvider AddressProvider, updateInterval time.Duration, logger log.Logger) (*JumpHashClientPool, error) {
- selector := jumphash.DefaultSelector()
+ selector := jumphash.DefaultSelector("bloomgateway")
err := selector.SetServers(dnsProvider.Addresses()...)
if err != nil {
level.Warn(logger).Log("msg", "error updating servers", "err", err)
@@ -76,14 +76,6 @@ func (p *JumpHashClientPool) Stop() {
_ = services.StopAndAwaitTerminated(context.Background(), p.Service)
}
-func (p *JumpHashClientPool) AddrForFingerprint(fp uint64) (string, error) {
- addr, err := p.FromUInt64(fp)
- if err != nil {
- return "", err
- }
- return addr.String(), nil
-}
-
func (p *JumpHashClientPool) Addr(key string) (string, error) {
addr, err := p.FromString(key)
if err != nil {
diff --git a/pkg/bloomgateway/metrics.go b/pkg/bloomgateway/metrics.go
index 4eeffbf8ad682..690f95354a23b 100644
--- a/pkg/bloomgateway/metrics.go
+++ b/pkg/bloomgateway/metrics.go
@@ -23,7 +23,6 @@ const (
type clientMetrics struct {
clientRequests *prometheus.CounterVec
requestLatency *prometheus.HistogramVec
- clients prometheus.Gauge
}
func newClientMetrics(registerer prometheus.Registerer) *clientMetrics {
@@ -41,12 +40,6 @@ func newClientMetrics(registerer prometheus.Registerer) *clientMetrics {
Help: "Time (in seconds) spent serving requests when using the bloom gateway",
Buckets: instrument.DefBuckets,
}, []string{"operation", "status_code"}),
- clients: promauto.With(registerer).NewGauge(prometheus.GaugeOpts{
- Namespace: constants.Loki,
- Subsystem: "bloom_gateway",
- Name: "clients",
- Help: "The current number of bloom gateway clients.",
- }),
}
}
diff --git a/pkg/bloomgateway/multiplexing.go b/pkg/bloomgateway/multiplexing.go
index 2aee9dc32c48b..7f929a1e19c6a 100644
--- a/pkg/bloomgateway/multiplexing.go
+++ b/pkg/bloomgateway/multiplexing.go
@@ -18,17 +18,20 @@ const (
Day = 24 * time.Hour
)
-type tokenSettings struct {
- nGramLen int
-}
-
type wrappedError struct {
mu sync.Mutex
err error
}
func (e *wrappedError) Error() string {
- return e.err.Error()
+ e.mu.Lock()
+ err := e.err
+ e.mu.Unlock()
+
+ if err == nil {
+ return ""
+ }
+ return err.Error()
}
func (e *wrappedError) Set(err error) {
diff --git a/pkg/bloomgateway/multiplexing_test.go b/pkg/bloomgateway/multiplexing_test.go
index e6b97679e1ef8..d290817ef4be7 100644
--- a/pkg/bloomgateway/multiplexing_test.go
+++ b/pkg/bloomgateway/multiplexing_test.go
@@ -108,7 +108,7 @@ func TestTask_RequestIterator(t *testing.T) {
}
// merge the request iterators using the heap sort iterator
- it := v1.NewHeapIterator[v1.Request](func(r1, r2 v1.Request) bool { return r1.Fp < r2.Fp }, iters...)
+ it := v1.NewHeapIterator(func(r1, r2 v1.Request) bool { return r1.Fp < r2.Fp }, iters...)
// first item
require.True(t, it.Next())
diff --git a/pkg/bloomgateway/processor.go b/pkg/bloomgateway/processor.go
index 2b44b1e5459c1..9851166fb2de8 100644
--- a/pkg/bloomgateway/processor.go
+++ b/pkg/bloomgateway/processor.go
@@ -168,7 +168,7 @@ func (p *processor) processBlock(_ context.Context, bq *bloomshipper.CloseableBl
iters = append(iters, it)
}
- logger := log.With(p.logger, "block", bq.BlockRef.String())
+ logger := log.With(p.logger, "block", bq.String())
fq := blockQuerier.Fuse(iters, logger)
start := time.Now()
diff --git a/pkg/bloomgateway/querier.go b/pkg/bloomgateway/querier.go
index dfc3746380ab3..211372b84bcc5 100644
--- a/pkg/bloomgateway/querier.go
+++ b/pkg/bloomgateway/querier.go
@@ -149,7 +149,7 @@ func (bq *BloomQuerier) FilterChunkRefs(ctx context.Context, tenant string, from
// We can perform requests sequentially, because most of the time the request
// only covers a single day, and if not, it's at most two days.
for _, s := range partitionSeriesByDay(from, through, grouped) {
- day := bloomshipper.NewInterval(s.day.Time, s.day.Time.Add(Day))
+ day := bloomshipper.NewInterval(s.day.Time, s.day.Add(Day))
blocks, skipped, err := bq.blockResolver.Resolve(ctx, tenant, day, s.series)
if err != nil {
return nil, err
diff --git a/pkg/bloomgateway/stats.go b/pkg/bloomgateway/stats.go
index fe0046a2f11bd..76fad12ff9e31 100644
--- a/pkg/bloomgateway/stats.go
+++ b/pkg/bloomgateway/stats.go
@@ -13,7 +13,7 @@ type Stats struct {
ChunksRequested, ChunksFiltered int
SeriesRequested, SeriesFiltered int
QueueTime *atomic.Duration
- MetasFetchTime, BlocksFetchTime *atomic.Duration
+ BlocksFetchTime *atomic.Duration
ProcessingTime, TotalProcessingTime *atomic.Duration
PostProcessingTime *atomic.Duration
ProcessedBlocks *atomic.Int32 // blocks processed for this specific request
@@ -31,7 +31,6 @@ func ContextWithEmptyStats(ctx context.Context) (*Stats, context.Context) {
ProcessedBlocks: atomic.NewInt32(0),
ProcessedBlocksTotal: atomic.NewInt32(0),
QueueTime: atomic.NewDuration(0),
- MetasFetchTime: atomic.NewDuration(0),
BlocksFetchTime: atomic.NewDuration(0),
ProcessingTime: atomic.NewDuration(0),
TotalProcessingTime: atomic.NewDuration(0),
@@ -54,7 +53,6 @@ func FromContext(ctx context.Context) *Stats {
// aggregates the total duration
func (s *Stats) Duration() (dur time.Duration) {
dur += s.QueueTime.Load()
- dur += s.MetasFetchTime.Load()
dur += s.BlocksFetchTime.Load()
dur += s.ProcessingTime.Load()
dur += s.PostProcessingTime.Load()
@@ -82,7 +80,6 @@ func (s *Stats) KVArgs() []any {
"chunks_remaining", chunksRemaining,
"filter_ratio", filterRatio,
"queue_time", s.QueueTime.Load(),
- "metas_fetch_time", s.MetasFetchTime.Load(),
"blocks_fetch_time", s.BlocksFetchTime.Load(),
"processing_time", s.ProcessingTime.Load(),
"post_processing_time", s.PostProcessingTime.Load(),
@@ -97,13 +94,6 @@ func (s *Stats) AddQueueTime(t time.Duration) {
s.QueueTime.Add(t)
}
-func (s *Stats) AddMetasFetchTime(t time.Duration) {
- if s == nil {
- return
- }
- s.MetasFetchTime.Add(t)
-}
-
func (s *Stats) AddBlocksFetchTime(t time.Duration) {
if s == nil {
return
diff --git a/pkg/bloomgateway/util.go b/pkg/bloomgateway/util.go
index bb130019d4381..868fdf2ac6ed5 100644
--- a/pkg/bloomgateway/util.go
+++ b/pkg/bloomgateway/util.go
@@ -4,7 +4,6 @@ import (
"sort"
"github.com/prometheus/common/model"
- "golang.org/x/exp/slices"
"github.com/grafana/loki/v3/pkg/logproto"
v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1"
@@ -34,23 +33,6 @@ func daysForRange(from, through model.Time) []model.Time {
return days
}
-// getFromThrough assumes a list of ShortRefs sorted by From time
-func getFromThrough(refs []*logproto.ShortRef) (model.Time, model.Time) {
- if len(refs) == 0 {
- return model.Earliest, model.Latest
- }
-
- if len(refs) == 1 {
- return refs[0].From, refs[0].Through
- }
-
- maxItem := slices.MaxFunc(refs, func(a, b *logproto.ShortRef) int {
- return int(a.Through) - int(b.Through)
- })
-
- return refs[0].From, maxItem.Through
-}
-
// convertToChunkRefs converts a []*logproto.ShortRef into v1.ChunkRefs
func convertToChunkRefs(refs []*logproto.ShortRef) v1.ChunkRefs {
result := make(v1.ChunkRefs, 0, len(refs))
diff --git a/pkg/bloomgateway/util_test.go b/pkg/bloomgateway/util_test.go
index f2be3b0665dd4..2838c9e5e8c5c 100644
--- a/pkg/bloomgateway/util_test.go
+++ b/pkg/bloomgateway/util_test.go
@@ -31,23 +31,6 @@ func mktime(s string) model.Time {
return model.TimeFromUnix(ts.Unix())
}
-func TestGetFromThrough(t *testing.T) {
- chunks := []*logproto.ShortRef{
- {From: 0, Through: 6},
- {From: 1, Through: 5},
- {From: 2, Through: 9},
- {From: 3, Through: 8},
- {From: 4, Through: 7},
- }
- from, through := getFromThrough(chunks)
- require.Equal(t, model.Time(0), from)
- require.Equal(t, model.Time(9), through)
-
- // assert that slice order did not change
- require.Equal(t, model.Time(0), chunks[0].From)
- require.Equal(t, model.Time(4), chunks[len(chunks)-1].From)
-}
-
func TestTruncateDay(t *testing.T) {
expected := mktime("2024-01-24 00:00")
diff --git a/pkg/configs/client/client.go b/pkg/configs/client/client.go
deleted file mode 100644
index 44af1bda4f504..0000000000000
--- a/pkg/configs/client/client.go
+++ /dev/null
@@ -1,184 +0,0 @@
-package client
-
-import (
- "context"
- "crypto/tls"
- "encoding/json"
- "errors"
- "flag"
- "fmt"
- "net/http"
- "net/url"
- "time"
-
- "github.com/go-kit/log/level"
- dstls "github.com/grafana/dskit/crypto/tls"
- "github.com/grafana/dskit/flagext"
- "github.com/grafana/dskit/instrument"
- "github.com/prometheus/client_golang/prometheus"
- "github.com/prometheus/client_golang/prometheus/promauto"
- "github.com/prometheus/common/version"
-
- "github.com/grafana/loki/v3/pkg/configs/userconfig"
- util_log "github.com/grafana/loki/v3/pkg/util/log"
-)
-
-var (
- errBadURL = errors.New("configs_api_url is not set or valid")
-)
-
-// Config says where we can find the ruler userconfig.
-type Config struct {
- ConfigsAPIURL flagext.URLValue `yaml:"configs_api_url"`
- ClientTimeout time.Duration `yaml:"client_timeout"` // HTTP timeout duration for requests made to the Weave Cloud configs service.
- TLS dstls.ClientConfig `yaml:",inline"`
-}
-
-// RegisterFlagsWithPrefix adds the flags required to config this to the given FlagSet
-func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
- f.Var(&cfg.ConfigsAPIURL, prefix+"configs.url", "URL of configs API server.")
- f.DurationVar(&cfg.ClientTimeout, prefix+"configs.client-timeout", 5*time.Second, "Timeout for requests to Weave Cloud configs service.")
- cfg.TLS.RegisterFlagsWithPrefix(prefix+"configs", f)
-}
-
-var configsRequestDuration = instrument.NewHistogramCollector(promauto.NewHistogramVec(prometheus.HistogramOpts{
- Namespace: "cortex",
- Name: "configs_request_duration_seconds",
- Help: "Time spent requesting userconfig.",
- Buckets: prometheus.DefBuckets,
-}, []string{"operation", "status_code"}))
-
-// Client is what the ruler and altermanger needs from a config store to process rules.
-type Client interface {
- // GetRules returns all Cortex configurations from a configs API server
- // that have been updated after the given userconfig.ID was last updated.
- GetRules(ctx context.Context, since userconfig.ID) (map[string]userconfig.VersionedRulesConfig, error)
-
- // GetAlerts fetches all the alerts that have changes since since.
- GetAlerts(ctx context.Context, since userconfig.ID) (*ConfigsResponse, error)
-}
-
-// New creates a new ConfigClient.
-func New(cfg Config) (*ConfigDBClient, error) {
-
- if cfg.ConfigsAPIURL.URL == nil {
- return nil, errBadURL
- }
-
- client := &ConfigDBClient{
- URL: cfg.ConfigsAPIURL.URL,
- Timeout: cfg.ClientTimeout,
- }
-
- tlsConfig, err := cfg.TLS.GetTLSConfig()
- if err != nil {
- return nil, err
- }
-
- if tlsConfig != nil {
- client.TLSConfig = tlsConfig
- }
- return client, nil
-}
-
-// ConfigDBClient allows retrieving recording and alerting rules from the configs server.
-type ConfigDBClient struct {
- URL *url.URL
- Timeout time.Duration
- TLSConfig *tls.Config
-}
-
-// GetRules implements Client
-func (c ConfigDBClient) GetRules(ctx context.Context, since userconfig.ID) (map[string]userconfig.VersionedRulesConfig, error) {
- suffix := ""
- if since != 0 {
- suffix = fmt.Sprintf("?since=%d", since)
- }
- endpoint := fmt.Sprintf("%s/private/api/prom/configs/rules%s", c.URL.String(), suffix)
- var response *ConfigsResponse
- err := instrument.CollectedRequest(ctx, "GetRules", configsRequestDuration, instrument.ErrorCode, func(_ context.Context) error {
- var err error
- response, err = doRequest(endpoint, c.Timeout, c.TLSConfig, since)
- return err
- })
- if err != nil {
- return nil, err
- }
- configs := map[string]userconfig.VersionedRulesConfig{}
- for id, view := range response.Configs {
- cfg := view.GetVersionedRulesConfig()
- if cfg != nil {
- configs[id] = *cfg
- }
- }
- return configs, nil
-}
-
-// GetAlerts implements Client.
-func (c ConfigDBClient) GetAlerts(ctx context.Context, since userconfig.ID) (*ConfigsResponse, error) {
- suffix := ""
- if since != 0 {
- suffix = fmt.Sprintf("?since=%d", since)
- }
- endpoint := fmt.Sprintf("%s/private/api/prom/configs/alertmanager%s", c.URL.String(), suffix)
- var response *ConfigsResponse
- err := instrument.CollectedRequest(ctx, "GetAlerts", configsRequestDuration, instrument.ErrorCode, func(_ context.Context) error {
- var err error
- response, err = doRequest(endpoint, c.Timeout, c.TLSConfig, since)
- return err
- })
- return response, err
-}
-
-func doRequest(endpoint string, timeout time.Duration, tlsConfig *tls.Config, since userconfig.ID) (*ConfigsResponse, error) {
- req, err := http.NewRequest("GET", endpoint, nil)
- if err != nil {
- return nil, err
- }
-
- client := &http.Client{Timeout: timeout}
- if tlsConfig != nil {
- client.Transport = &http.Transport{TLSClientConfig: tlsConfig}
- }
-
- req.Header.Set("User-Agent", fmt.Sprintf("Cortex/%s", version.Version))
-
- resp, err := client.Do(req)
- if err != nil {
- return nil, err
- }
- defer resp.Body.Close()
-
- if resp.StatusCode != http.StatusOK {
- return nil, fmt.Errorf("Invalid response from configs server: %v", resp.StatusCode)
- }
-
- var config ConfigsResponse
- if err := json.NewDecoder(resp.Body).Decode(&config); err != nil {
- level.Error(util_log.Logger).Log("msg", "configs: couldn't decode JSON body", "err", err)
- return nil, err
- }
-
- config.since = since
- return &config, nil
-}
-
-// ConfigsResponse is a response from server for Getuserconfig.
-type ConfigsResponse struct {
- // The version since which these configs were changed
- since userconfig.ID
-
- // Configs maps user ID to their latest userconfig.View.
- Configs map[string]userconfig.View `json:"configs"`
-}
-
-// GetLatestConfigID returns the last config ID from a set of userconfig.
-func (c ConfigsResponse) GetLatestConfigID() userconfig.ID {
- latest := c.since
- for _, config := range c.Configs {
- if config.ID > latest {
- latest = config.ID
- }
- }
- return latest
-}
diff --git a/pkg/configs/client/configs_test.go b/pkg/configs/client/configs_test.go
deleted file mode 100644
index 64f4b98d202e0..0000000000000
--- a/pkg/configs/client/configs_test.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package client
-
-import (
- "net/http"
- "net/http/httptest"
- "testing"
- "time"
-
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-
- "github.com/grafana/loki/v3/pkg/configs/userconfig"
-)
-
-var response = `{
- "configs": {
- "2": {
- "id": 1,
- "config": {
- "rules_files": {
- "recording.rules": "groups:\n- name: demo-service-alerts\n interval: 15s\n rules:\n - alert: SomethingIsUp\n expr: up == 1\n"
- },
- "rule_format_version": "2"
- }
- }
- }
-}
-`
-
-func TestDoRequest(t *testing.T) {
- server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
- _, err := w.Write([]byte(response))
- require.NoError(t, err)
- }))
- defer server.Close()
-
- resp, err := doRequest(server.URL, 1*time.Second, nil, 0)
- assert.Nil(t, err)
-
- expected := ConfigsResponse{Configs: map[string]userconfig.View{
- "2": {
- ID: 1,
- Config: userconfig.Config{
- RulesConfig: userconfig.RulesConfig{
- Files: map[string]string{
- "recording.rules": "groups:\n- name: demo-service-alerts\n interval: 15s\n rules:\n - alert: SomethingIsUp\n expr: up == 1\n",
- },
- FormatVersion: userconfig.RuleFormatV2,
- },
- },
- },
- }}
- assert.Equal(t, &expected, resp)
-}
diff --git a/pkg/configs/userconfig/config.go b/pkg/configs/userconfig/config.go
deleted file mode 100644
index e7d22e033a8ec..0000000000000
--- a/pkg/configs/userconfig/config.go
+++ /dev/null
@@ -1,344 +0,0 @@
-package userconfig
-
-import (
- "encoding/json"
- "fmt"
- "time"
-
- "github.com/go-kit/log"
- "github.com/pkg/errors"
- "github.com/prometheus/prometheus/model/labels"
- "github.com/prometheus/prometheus/model/rulefmt"
- "github.com/prometheus/prometheus/promql/parser"
- "github.com/prometheus/prometheus/rules"
- "gopkg.in/yaml.v3"
-
- util_log "github.com/grafana/loki/v3/pkg/util/log"
-)
-
-// An ID is the ID of a single users's Cortex configuration. When a
-// configuration changes, it gets a new ID.
-type ID int
-
-// RuleFormatVersion indicates which Prometheus rule format (v1 vs. v2) to use in parsing.
-type RuleFormatVersion int
-
-const (
- // RuleFormatV1 is the Prometheus 1.x rule format.
- RuleFormatV1 RuleFormatVersion = iota
- // RuleFormatV2 is the Prometheus 2.x rule format.
- RuleFormatV2 RuleFormatVersion = iota
-)
-
-// IsValid returns whether the rules format version is a valid (known) version.
-func (v RuleFormatVersion) IsValid() bool {
- switch v {
- case RuleFormatV1, RuleFormatV2:
- return true
- default:
- return false
- }
-}
-
-// MarshalJSON implements json.Marshaler.
-func (v RuleFormatVersion) MarshalJSON() ([]byte, error) {
- switch v {
- case RuleFormatV1:
- return json.Marshal("1")
- case RuleFormatV2:
- return json.Marshal("2")
- default:
- return nil, fmt.Errorf("unknown rule format version %d", v)
- }
-}
-
-// MarshalYAML implements yaml.Marshaler.
-func (v RuleFormatVersion) MarshalYAML() (interface{}, error) {
- switch v {
- case RuleFormatV1:
- return yaml.Marshal("1")
- case RuleFormatV2:
- return yaml.Marshal("2")
- default:
- return nil, fmt.Errorf("unknown rule format version %d", v)
- }
-}
-
-// UnmarshalJSON implements json.Unmarshaler.
-func (v *RuleFormatVersion) UnmarshalJSON(data []byte) error {
- var s string
- if err := json.Unmarshal(data, &s); err != nil {
- return err
- }
- switch s {
- case "1":
- *v = RuleFormatV1
- case "2":
- *v = RuleFormatV2
- default:
- return fmt.Errorf("unknown rule format version %q", string(data))
- }
- return nil
-}
-
-// UnmarshalYAML implements yaml.Unmarshaler.
-func (v *RuleFormatVersion) UnmarshalYAML(unmarshal func(interface{}) error) error {
- var s string
- if err := unmarshal(&s); err != nil {
- return err
- }
- switch s {
- case "1":
- *v = RuleFormatV1
- case "2":
- *v = RuleFormatV2
- default:
- return fmt.Errorf("unknown rule format version %q", s)
- }
- return nil
-}
-
-// A Config is a Cortex configuration for a single user.
-type Config struct {
- // RulesFiles maps from a rules filename to file contents.
- RulesConfig RulesConfig
- TemplateFiles map[string]string
- AlertmanagerConfig string
-}
-
-// configCompat is a compatibility struct to support old JSON config blobs
-// saved in the config DB that didn't have a rule format version yet and
-// just had a top-level field for the rule files.
-type configCompat struct {
- RulesFiles map[string]string `json:"rules_files" yaml:"rules_files"`
- RuleFormatVersion RuleFormatVersion `json:"rule_format_version" yaml:"rule_format_version"`
- TemplateFiles map[string]string `json:"template_files" yaml:"template_files"`
- AlertmanagerConfig string `json:"alertmanager_config" yaml:"alertmanager_config"`
-}
-
-// MarshalJSON implements json.Marshaler.
-func (c Config) MarshalJSON() ([]byte, error) {
- compat := &configCompat{
- RulesFiles: c.RulesConfig.Files,
- RuleFormatVersion: c.RulesConfig.FormatVersion,
- TemplateFiles: c.TemplateFiles,
- AlertmanagerConfig: c.AlertmanagerConfig,
- }
-
- return json.Marshal(compat)
-}
-
-// MarshalYAML implements yaml.Marshaler.
-func (c Config) MarshalYAML() (interface{}, error) {
- compat := &configCompat{
- RulesFiles: c.RulesConfig.Files,
- RuleFormatVersion: c.RulesConfig.FormatVersion,
- TemplateFiles: c.TemplateFiles,
- AlertmanagerConfig: c.AlertmanagerConfig,
- }
-
- return yaml.Marshal(compat)
-}
-
-// UnmarshalJSON implements json.Unmarshaler.
-func (c *Config) UnmarshalJSON(data []byte) error {
- compat := configCompat{}
- if err := json.Unmarshal(data, &compat); err != nil {
- return err
- }
- *c = Config{
- RulesConfig: RulesConfig{
- Files: compat.RulesFiles,
- FormatVersion: compat.RuleFormatVersion,
- },
- TemplateFiles: compat.TemplateFiles,
- AlertmanagerConfig: compat.AlertmanagerConfig,
- }
- return nil
-}
-
-// UnmarshalYAML implements yaml.Unmarshaler.
-func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error {
- compat := configCompat{}
- if err := unmarshal(&compat); err != nil {
- return errors.WithStack(err)
- }
- *c = Config{
- RulesConfig: RulesConfig{
- Files: compat.RulesFiles,
- FormatVersion: compat.RuleFormatVersion,
- },
- TemplateFiles: compat.TemplateFiles,
- AlertmanagerConfig: compat.AlertmanagerConfig,
- }
- return nil
-}
-
-// View is what's returned from the Weave Cloud configs service
-// when we ask for all Cortex configurations.
-//
-// The configs service is essentially a JSON blob store that gives each
-// _version_ of a configuration a unique ID and guarantees that later versions
-// have greater IDs.
-type View struct {
- ID ID `json:"id"`
- Config Config `json:"config"`
- DeletedAt time.Time `json:"deleted_at"`
-}
-
-// IsDeleted tells you if the config is deleted.
-func (v View) IsDeleted() bool {
- return !v.DeletedAt.IsZero()
-}
-
-// GetVersionedRulesConfig specializes the view to just the rules config.
-func (v View) GetVersionedRulesConfig() *VersionedRulesConfig {
- if v.Config.RulesConfig.Files == nil {
- return nil
- }
- return &VersionedRulesConfig{
- ID: v.ID,
- Config: v.Config.RulesConfig,
- DeletedAt: v.DeletedAt,
- }
-}
-
-// RulesConfig is the rules configuration for a particular organization.
-type RulesConfig struct {
- FormatVersion RuleFormatVersion `json:"format_version"`
- Files map[string]string `json:"files"`
-}
-
-// Equal compares two RulesConfigs for equality.
-//
-// instance Eq RulesConfig
-func (c RulesConfig) Equal(o RulesConfig) bool {
- if c.FormatVersion != o.FormatVersion {
- return false
- }
- if len(o.Files) != len(c.Files) {
- return false
- }
- for k, v1 := range c.Files {
- v2, ok := o.Files[k]
- if !ok || v1 != v2 {
- return false
- }
- }
- return true
-}
-
-// Parse parses and validates the content of the rule files in a RulesConfig
-// according to the passed rule format version.
-func (c RulesConfig) Parse() (map[string][]rules.Rule, error) {
- switch c.FormatVersion {
- case RuleFormatV1:
- return nil, fmt.Errorf("version %v isn't supported", c.FormatVersion)
- case RuleFormatV2:
- return c.parseV2()
- default:
- return nil, fmt.Errorf("unknown rule format version %v", c.FormatVersion)
- }
-}
-
-// ParseFormatted returns the rulefmt map of a users rules configs. It allows
-// for rules to be mapped to disk and read by the prometheus rules manager.
-func (c RulesConfig) ParseFormatted() (map[string]rulefmt.RuleGroups, error) {
- switch c.FormatVersion {
- case RuleFormatV1:
- return nil, fmt.Errorf("version %v isn't supported", c.FormatVersion)
- case RuleFormatV2:
- return c.parseV2Formatted()
- default:
- return nil, fmt.Errorf("unknown rule format version %v", c.FormatVersion)
- }
-}
-
-// parseV2 parses and validates the content of the rule files in a RulesConfig
-// according to the Prometheus 2.x rule format.
-func (c RulesConfig) parseV2Formatted() (map[string]rulefmt.RuleGroups, error) {
- ruleMap := map[string]rulefmt.RuleGroups{}
-
- for fn, content := range c.Files {
- rgs, errs := rulefmt.Parse([]byte(content))
- for _, err := range errs { // return just the first error, if any
- return nil, err
- }
- ruleMap[fn] = *rgs
-
- }
- return ruleMap, nil
-}
-
-// parseV2 parses and validates the content of the rule files in a RulesConfig
-// according to the Prometheus 2.x rule format.
-//
-// NOTE: On one hand, we cannot return fully-fledged lists of rules.Group
-// here yet, as creating a rules.Group requires already
-// passing in rules.ManagerOptions options (which in turn require a
-// notifier, appender, etc.), which we do not want to create simply
-// for parsing. On the other hand, we should not return barebones
-// rulefmt.RuleGroup sets here either, as only a fully-converted rules.Rule
-// is able to track alert states over multiple rule evaluations. The caller
-// would otherwise have to ensure to convert the rulefmt.RuleGroup only exactly
-// once, not for every evaluation (or risk losing alert pending states). So
-// it's probably better to just return a set of rules.Rule here.
-func (c RulesConfig) parseV2() (map[string][]rules.Rule, error) {
- groups := map[string][]rules.Rule{}
-
- for fn, content := range c.Files {
- rgs, errs := rulefmt.Parse([]byte(content))
- if len(errs) > 0 {
- return nil, fmt.Errorf("error parsing %s: %v", fn, errs[0])
- }
-
- for _, rg := range rgs.Groups {
- rls := make([]rules.Rule, 0, len(rg.Rules))
- for _, rl := range rg.Rules {
- expr, err := parser.ParseExpr(rl.Expr.Value)
- if err != nil {
- return nil, err
- }
-
- if rl.Alert.Value != "" {
- rls = append(rls, rules.NewAlertingRule(
- rl.Alert.Value,
- expr,
- time.Duration(rl.For),
- time.Duration(rl.KeepFiringFor),
- labels.FromMap(rl.Labels),
- labels.FromMap(rl.Annotations),
- nil,
- "",
- true,
- log.With(util_log.Logger, "alert", rl.Alert.Value),
- ))
- continue
- }
- rls = append(rls, rules.NewRecordingRule(
- rl.Record.Value,
- expr,
- labels.FromMap(rl.Labels),
- ))
- }
-
- // Group names have to be unique in Prometheus, but only within one rules file.
- groups[rg.Name+";"+fn] = rls
- }
- }
-
- return groups, nil
-}
-
-// VersionedRulesConfig is a RulesConfig together with a version.
-// `data Versioned a = Versioned { id :: ID , config :: a }`
-type VersionedRulesConfig struct {
- ID ID `json:"id"`
- Config RulesConfig `json:"config"`
- DeletedAt time.Time `json:"deleted_at"`
-}
-
-// IsDeleted tells you if the config is deleted.
-func (vr VersionedRulesConfig) IsDeleted() bool {
- return !vr.DeletedAt.IsZero()
-}
diff --git a/pkg/configs/userconfig/config_test.go b/pkg/configs/userconfig/config_test.go
deleted file mode 100644
index ac81d47e4ee98..0000000000000
--- a/pkg/configs/userconfig/config_test.go
+++ /dev/null
@@ -1,252 +0,0 @@
-package userconfig
-
-import (
- "encoding/json"
- "fmt"
- "strconv"
- "strings"
- "testing"
- "time"
-
- "github.com/go-kit/log"
- "github.com/prometheus/common/model"
- "github.com/prometheus/prometheus/model/labels"
- "github.com/prometheus/prometheus/model/rulefmt"
- "github.com/prometheus/prometheus/promql/parser"
- "github.com/prometheus/prometheus/rules"
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
- "gopkg.in/yaml.v3"
-
- util_log "github.com/grafana/loki/v3/pkg/util/log"
-)
-
-var legacyRulesFile = `ALERT TestAlert
-IF up == 0
-FOR 5m
-LABELS { severity = "critical" }
-ANNOTATIONS {
- message = "I am a message"
-}`
-
-var ruleFile = `groups:
-- name: example
- rules:
- - alert: TestAlert
- expr: up == 0
- for: 5m
- labels:
- severity: critical
- annotations:
- message: I am a message`
-
-func TestUnmarshalJSONLegacyConfigWithMissingRuleFormatVersionSucceeds(t *testing.T) {
- actual := Config{}
- buf := []byte(`{"rules_files": {"a": "b"}}`)
- assert.Nil(t, json.Unmarshal(buf, &actual))
-
- expected := Config{
- RulesConfig: RulesConfig{
- Files: map[string]string{
- "a": "b",
- },
- FormatVersion: RuleFormatV1,
- },
- }
-
- assert.Equal(t, expected, actual)
-}
-
-func TestUnmarshalYAMLLegacyConfigWithMissingRuleFormatVersionSucceeds(t *testing.T) {
- actual := Config{}
- buf := []byte(strings.TrimSpace(`
-rule_format_version: '1'
-rules_files:
- a: b
-`))
- assert.Nil(t, yaml.Unmarshal(buf, &actual))
-
- expected := Config{
- RulesConfig: RulesConfig{
- Files: map[string]string{
- "a": "b",
- },
- FormatVersion: RuleFormatV1,
- },
- }
-
- assert.Equal(t, expected, actual)
-}
-
-func TestParseLegacyAlerts(t *testing.T) {
- parsed, err := parser.ParseExpr("up == 0")
- require.NoError(t, err)
- rule := rules.NewAlertingRule(
- "TestAlert",
- parsed,
- 5*time.Minute,
- 0,
- labels.Labels{
- labels.Label{Name: "severity", Value: "critical"},
- },
- labels.Labels{
- labels.Label{Name: "message", Value: "I am a message"},
- },
- nil,
- "",
- true,
- log.With(util_log.Logger, "alert", "TestAlert"),
- )
-
- for i, tc := range []struct {
- cfg RulesConfig
- expected map[string][]rules.Rule
- wantErr error
- }{
- {
- cfg: RulesConfig{
- FormatVersion: RuleFormatV1,
- Files: map[string]string{
- "legacy.rules": `
- ALERT TestAlert
- IF up == 0
- FOR 5m
- LABELS { severity = "critical" }
- ANNOTATIONS {
- message = "I am a message"
- }
- `,
- },
- },
- expected: map[string][]rules.Rule{
- "legacy.rules": {rule},
- },
- wantErr: fmt.Errorf("version 0 isn't supported"),
- },
- {
- cfg: RulesConfig{
- FormatVersion: RuleFormatV2,
- Files: map[string]string{
- "alerts.yaml": `
-groups:
-- name: example
- rules:
- - alert: TestAlert
- expr: up == 0
- for: 5m
- labels:
- severity: critical
- annotations:
- message: I am a message
-`,
- },
- },
- expected: map[string][]rules.Rule{
- "example;alerts.yaml": {rule},
- },
- wantErr: nil,
- },
- } {
- t.Run(strconv.Itoa(i), func(t *testing.T) {
- rules, err := tc.cfg.Parse()
- if tc.wantErr != nil {
- require.EqualError(t, err, tc.wantErr.Error())
- } else {
- require.NoError(t, err)
- require.Equal(t, tc.expected, rules)
- }
- })
- }
-}
-
-func TestParseFormatted(t *testing.T) {
- dur, err := model.ParseDuration("5m")
- require.NoError(t, err)
-
- rulesV1 := []rulefmt.RuleNode{
- {
- Alert: yaml.Node{Value: "TestAlert"},
- Expr: yaml.Node{Value: "up == 0"},
- For: dur,
- Labels: map[string]string{
- "severity": "critical",
- },
- Annotations: map[string]string{
- "message": "I am a message",
- },
- },
- }
-
- alertNode := yaml.Node{Line: 4, Column: 12}
- alertNode.SetString("TestAlert")
- exprNode := yaml.Node{Line: 5, Column: 11}
- exprNode.SetString("up == 0")
- rulesV2 := []rulefmt.RuleNode{
- {
- Alert: alertNode,
- Expr: exprNode,
- For: dur,
- Labels: map[string]string{
- "severity": "critical",
- },
- Annotations: map[string]string{
- "message": "I am a message",
- },
- },
- }
-
- for i, tc := range []struct {
- cfg RulesConfig
- expected map[string]rulefmt.RuleGroups
- wantErr error
- }{
- {
- cfg: RulesConfig{
- FormatVersion: RuleFormatV1,
- Files: map[string]string{
- "legacy.rules": legacyRulesFile,
- },
- },
- expected: map[string]rulefmt.RuleGroups{
- "legacy.rules": {
- Groups: []rulefmt.RuleGroup{
- {
- Name: "rg:legacy.rules",
- Rules: rulesV1,
- },
- },
- },
- },
- wantErr: fmt.Errorf("version 0 isn't supported"),
- },
- {
- cfg: RulesConfig{
- FormatVersion: RuleFormatV2,
- Files: map[string]string{
- "alerts.yaml": ruleFile,
- },
- },
- expected: map[string]rulefmt.RuleGroups{
- "alerts.yaml": {
- Groups: []rulefmt.RuleGroup{
- {
- Name: "example",
- Rules: rulesV2,
- },
- },
- },
- },
- wantErr: nil,
- },
- } {
- t.Run(strconv.Itoa(i), func(t *testing.T) {
- rules, err := tc.cfg.ParseFormatted()
- if tc.wantErr != nil {
- require.EqualError(t, err, tc.wantErr.Error())
- } else {
- require.NoError(t, err)
- require.Equal(t, tc.expected, rules)
- }
- })
- }
-}
diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go
index 30383bfcbbbd4..6fb7bc3fc7f6b 100644
--- a/pkg/distributor/distributor.go
+++ b/pkg/distributor/distributor.go
@@ -47,6 +47,7 @@ import (
"github.com/grafana/loki/v3/pkg/ingester"
"github.com/grafana/loki/v3/pkg/ingester/client"
"github.com/grafana/loki/v3/pkg/kafka"
+ kafka_client "github.com/grafana/loki/v3/pkg/kafka/client"
"github.com/grafana/loki/v3/pkg/loghttp/push"
"github.com/grafana/loki/v3/pkg/logproto"
"github.com/grafana/loki/v3/pkg/logql/log/logfmt"
@@ -234,11 +235,11 @@ func New(
var kafkaWriter KafkaProducer
if cfg.KafkaEnabled {
- kafkaClient, err := kafka.NewWriterClient(cfg.KafkaConfig, 20, logger, registerer)
+ kafkaClient, err := kafka_client.NewWriterClient(cfg.KafkaConfig, 20, logger, registerer)
if err != nil {
return nil, fmt.Errorf("failed to start kafka client: %w", err)
}
- kafkaWriter = kafka.NewProducer(kafkaClient, cfg.KafkaConfig.ProducerMaxBufferedBytes,
+ kafkaWriter = kafka_client.NewProducer(kafkaClient, cfg.KafkaConfig.ProducerMaxBufferedBytes,
prometheus.WrapRegistererWithPrefix("_kafka_", registerer))
}
@@ -470,11 +471,8 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log
d.writeFailuresManager.Log(tenantID, err)
validationErrors.Add(err)
validation.DiscardedSamples.WithLabelValues(validation.InvalidLabels, tenantID).Add(float64(len(stream.Entries)))
- bytes := 0
- for _, e := range stream.Entries {
- bytes += len(e.Line)
- }
- validation.DiscardedBytes.WithLabelValues(validation.InvalidLabels, tenantID).Add(float64(bytes))
+ discardedBytes := util.EntriesTotalSize(stream.Entries)
+ validation.DiscardedBytes.WithLabelValues(validation.InvalidLabels, tenantID).Add(float64(discardedBytes))
continue
}
@@ -501,7 +499,7 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log
} else {
logLevel = detectLogLevelFromLogEntry(entry, structuredMetadata)
}
- if logLevel != constants.LogLevelUnknown && logLevel != "" {
+ if logLevel != "" {
entry.StructuredMetadata = append(entry.StructuredMetadata, logproto.LabelAdapter{
Name: constants.LevelLabel,
Value: logLevel,
@@ -526,7 +524,7 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log
}
n++
- validatedLineSize += len(entry.Line)
+ validatedLineSize += util.EntryTotalSize(&entry)
validatedLineCount++
pushSize += len(entry.Line)
}
@@ -705,10 +703,7 @@ func (d *Distributor) trackDiscardedData(
continue
}
- discardedStreamBytes := 0
- for _, e := range stream.Entries {
- discardedStreamBytes += len(e.Line)
- }
+ discardedStreamBytes := util.EntriesTotalSize(stream.Entries)
if d.usageTracker != nil {
d.usageTracker.DiscardedBytesAdd(ctx, tenantID, reason, lbs, float64(discardedStreamBytes))
diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go
index 785d6ce03d0c3..f7247290251aa 100644
--- a/pkg/distributor/distributor_test.go
+++ b/pkg/distributor/distributor_test.go
@@ -12,6 +12,7 @@ import (
"testing"
"time"
+ "github.com/c2h5oh/datasize"
"github.com/go-kit/log"
"github.com/grafana/dskit/flagext"
"github.com/grafana/dskit/httpgrpc"
@@ -54,7 +55,9 @@ var (
)
func TestDistributor(t *testing.T) {
- ingestionRateLimit := 0.000096 // 100 Bytes/s limit
+ lineSize := 10
+ ingestionRateLimit := datasize.ByteSize(400)
+ ingestionRateLimitMB := ingestionRateLimit.MBytes() // 400 Bytes/s limit
for i, tc := range []struct {
lines int
@@ -72,7 +75,7 @@ func TestDistributor(t *testing.T) {
{
lines: 100,
streams: 1,
- expectedErrors: []error{httpgrpc.Errorf(http.StatusTooManyRequests, validation.RateLimitedErrorMsg, "test", 100, 100, 1000)},
+ expectedErrors: []error{httpgrpc.Errorf(http.StatusTooManyRequests, validation.RateLimitedErrorMsg, "test", ingestionRateLimit, 100, 100*lineSize)},
},
{
lines: 100,
@@ -104,15 +107,15 @@ func TestDistributor(t *testing.T) {
t.Run(fmt.Sprintf("[%d](lines=%v)", i, tc.lines), func(t *testing.T) {
limits := &validation.Limits{}
flagext.DefaultValues(limits)
- limits.IngestionRateMB = ingestionRateLimit
- limits.IngestionBurstSizeMB = ingestionRateLimit
+ limits.IngestionRateMB = ingestionRateLimitMB
+ limits.IngestionBurstSizeMB = ingestionRateLimitMB
limits.MaxLineSize = fe.ByteSize(tc.maxLineSize)
distributors, _ := prepare(t, 1, 5, limits, nil)
var request logproto.PushRequest
for i := 0; i < tc.streams; i++ {
- req := makeWriteRequest(tc.lines, 10)
+ req := makeWriteRequest(tc.lines, lineSize)
request.Streams = append(request.Streams, req.Streams[0])
}
@@ -1178,37 +1181,37 @@ func TestDistributor_PushIngestionRateLimiter(t *testing.T) {
"local strategy: limit should be set to each distributor": {
distributors: 2,
ingestionRateStrategy: validation.LocalIngestionRateStrategy,
- ingestionRateMB: 10 * (1.0 / float64(bytesInMB)),
- ingestionBurstSizeMB: 10 * (1.0 / float64(bytesInMB)),
+ ingestionRateMB: datasize.ByteSize(100).MBytes(),
+ ingestionBurstSizeMB: datasize.ByteSize(100).MBytes(),
pushes: []testPush{
- {bytes: 5, expectedError: nil},
- {bytes: 6, expectedError: httpgrpc.Errorf(http.StatusTooManyRequests, validation.RateLimitedErrorMsg, "test", 10, 1, 6)},
- {bytes: 5, expectedError: nil},
- {bytes: 1, expectedError: httpgrpc.Errorf(http.StatusTooManyRequests, validation.RateLimitedErrorMsg, "test", 10, 1, 1)},
+ {bytes: 50, expectedError: nil},
+ {bytes: 60, expectedError: httpgrpc.Errorf(http.StatusTooManyRequests, validation.RateLimitedErrorMsg, "test", 100, 1, 60)},
+ {bytes: 50, expectedError: nil},
+ {bytes: 40, expectedError: httpgrpc.Errorf(http.StatusTooManyRequests, validation.RateLimitedErrorMsg, "test", 100, 1, 40)},
},
},
"global strategy: limit should be evenly shared across distributors": {
distributors: 2,
ingestionRateStrategy: validation.GlobalIngestionRateStrategy,
- ingestionRateMB: 10 * (1.0 / float64(bytesInMB)),
- ingestionBurstSizeMB: 5 * (1.0 / float64(bytesInMB)),
+ ingestionRateMB: datasize.ByteSize(200).MBytes(),
+ ingestionBurstSizeMB: datasize.ByteSize(100).MBytes(),
pushes: []testPush{
- {bytes: 3, expectedError: nil},
- {bytes: 3, expectedError: httpgrpc.Errorf(http.StatusTooManyRequests, validation.RateLimitedErrorMsg, "test", 5, 1, 3)},
- {bytes: 2, expectedError: nil},
- {bytes: 1, expectedError: httpgrpc.Errorf(http.StatusTooManyRequests, validation.RateLimitedErrorMsg, "test", 5, 1, 1)},
+ {bytes: 60, expectedError: nil},
+ {bytes: 50, expectedError: httpgrpc.Errorf(http.StatusTooManyRequests, validation.RateLimitedErrorMsg, "test", 100, 1, 50)},
+ {bytes: 40, expectedError: nil},
+ {bytes: 30, expectedError: httpgrpc.Errorf(http.StatusTooManyRequests, validation.RateLimitedErrorMsg, "test", 100, 1, 30)},
},
},
"global strategy: burst should set to each distributor": {
distributors: 2,
ingestionRateStrategy: validation.GlobalIngestionRateStrategy,
- ingestionRateMB: 10 * (1.0 / float64(bytesInMB)),
- ingestionBurstSizeMB: 20 * (1.0 / float64(bytesInMB)),
+ ingestionRateMB: datasize.ByteSize(100).MBytes(),
+ ingestionBurstSizeMB: datasize.ByteSize(200).MBytes(),
pushes: []testPush{
- {bytes: 15, expectedError: nil},
- {bytes: 6, expectedError: httpgrpc.Errorf(http.StatusTooManyRequests, validation.RateLimitedErrorMsg, "test", 5, 1, 6)},
- {bytes: 5, expectedError: nil},
- {bytes: 1, expectedError: httpgrpc.Errorf(http.StatusTooManyRequests, validation.RateLimitedErrorMsg, "test", 5, 1, 1)},
+ {bytes: 150, expectedError: nil},
+ {bytes: 60, expectedError: httpgrpc.Errorf(http.StatusTooManyRequests, validation.RateLimitedErrorMsg, "test", 50, 1, 60)},
+ {bytes: 50, expectedError: nil},
+ {bytes: 30, expectedError: httpgrpc.Errorf(http.StatusTooManyRequests, validation.RateLimitedErrorMsg, "test", 50, 1, 30)},
},
},
}
@@ -1227,8 +1230,8 @@ func TestDistributor_PushIngestionRateLimiter(t *testing.T) {
response, err := distributors[0].Push(ctx, request)
if push.expectedError == nil {
+ assert.NoError(t, err)
assert.Equal(t, success, response)
- assert.Nil(t, err)
} else {
assert.Nil(t, response)
assert.Equal(t, push.expectedError, err)
@@ -1413,7 +1416,7 @@ func makeWriteRequestWithLabelsWithLevel(lines, size int, labels []string, level
for j := 0; j < lines; j++ {
// Construct the log line, honoring the input size
- line := "msg=an error occured " + strconv.Itoa(j) + strings.Repeat("0", size) + " severity=" + level
+ line := "msg=an error occurred " + strconv.Itoa(j) + strings.Repeat("0", size) + " severity=" + level
stream.Entries = append(stream.Entries, logproto.Entry{
Timestamp: time.Now().Add(time.Duration(j) * time.Millisecond),
@@ -1640,7 +1643,7 @@ func Test_DetectLogLevels(t *testing.T) {
require.NoError(t, err)
topVal := ingester.Peek()
require.Equal(t, `{foo="bar"}`, topVal.Streams[0].Labels)
- require.Len(t, topVal.Streams[0].Entries[0].StructuredMetadata, 0)
+ require.Len(t, topVal.Streams[0].Entries[0].StructuredMetadata, 1)
})
t.Run("log level detection enabled and warn logs", func(t *testing.T) {
diff --git a/pkg/distributor/validator.go b/pkg/distributor/validator.go
index 9a25087ffe0cd..61ecb175accc8 100644
--- a/pkg/distributor/validator.go
+++ b/pkg/distributor/validator.go
@@ -11,6 +11,7 @@ import (
"github.com/grafana/loki/v3/pkg/loghttp/push"
"github.com/grafana/loki/v3/pkg/logproto"
+ "github.com/grafana/loki/v3/pkg/util"
"github.com/grafana/loki/v3/pkg/validation"
)
@@ -82,15 +83,18 @@ func (v Validator) getValidationContextForTime(now time.Time, userID string) val
func (v Validator) ValidateEntry(ctx context.Context, vCtx validationContext, labels labels.Labels, entry logproto.Entry) error {
ts := entry.Timestamp.UnixNano()
validation.LineLengthHist.Observe(float64(len(entry.Line)))
+ structuredMetadataCount := len(entry.StructuredMetadata)
+ structuredMetadataSizeBytes := util.StructuredMetadataSize(entry.StructuredMetadata)
+ entrySize := float64(len(entry.Line) + structuredMetadataSizeBytes)
if vCtx.rejectOldSample && ts < vCtx.rejectOldSampleMaxAge {
// Makes time string on the error message formatted consistently.
formatedEntryTime := entry.Timestamp.Format(timeFormat)
formatedRejectMaxAgeTime := time.Unix(0, vCtx.rejectOldSampleMaxAge).Format(timeFormat)
validation.DiscardedSamples.WithLabelValues(validation.GreaterThanMaxSampleAge, vCtx.userID).Inc()
- validation.DiscardedBytes.WithLabelValues(validation.GreaterThanMaxSampleAge, vCtx.userID).Add(float64(len(entry.Line)))
+ validation.DiscardedBytes.WithLabelValues(validation.GreaterThanMaxSampleAge, vCtx.userID).Add(entrySize)
if v.usageTracker != nil {
- v.usageTracker.DiscardedBytesAdd(ctx, vCtx.userID, validation.GreaterThanMaxSampleAge, labels, float64(len(entry.Line)))
+ v.usageTracker.DiscardedBytesAdd(ctx, vCtx.userID, validation.GreaterThanMaxSampleAge, labels, entrySize)
}
return fmt.Errorf(validation.GreaterThanMaxSampleAgeErrorMsg, labels, formatedEntryTime, formatedRejectMaxAgeTime)
}
@@ -98,9 +102,9 @@ func (v Validator) ValidateEntry(ctx context.Context, vCtx validationContext, la
if ts > vCtx.creationGracePeriod {
formatedEntryTime := entry.Timestamp.Format(timeFormat)
validation.DiscardedSamples.WithLabelValues(validation.TooFarInFuture, vCtx.userID).Inc()
- validation.DiscardedBytes.WithLabelValues(validation.TooFarInFuture, vCtx.userID).Add(float64(len(entry.Line)))
+ validation.DiscardedBytes.WithLabelValues(validation.TooFarInFuture, vCtx.userID).Add(entrySize)
if v.usageTracker != nil {
- v.usageTracker.DiscardedBytesAdd(ctx, vCtx.userID, validation.TooFarInFuture, labels, float64(len(entry.Line)))
+ v.usageTracker.DiscardedBytesAdd(ctx, vCtx.userID, validation.TooFarInFuture, labels, entrySize)
}
return fmt.Errorf(validation.TooFarInFutureErrorMsg, labels, formatedEntryTime)
}
@@ -111,43 +115,37 @@ func (v Validator) ValidateEntry(ctx context.Context, vCtx validationContext, la
// but the upstream cortex_validation pkg uses it, so we keep this
// for parity.
validation.DiscardedSamples.WithLabelValues(validation.LineTooLong, vCtx.userID).Inc()
- validation.DiscardedBytes.WithLabelValues(validation.LineTooLong, vCtx.userID).Add(float64(len(entry.Line)))
+ validation.DiscardedBytes.WithLabelValues(validation.LineTooLong, vCtx.userID).Add(entrySize)
if v.usageTracker != nil {
- v.usageTracker.DiscardedBytesAdd(ctx, vCtx.userID, validation.LineTooLong, labels, float64(len(entry.Line)))
+ v.usageTracker.DiscardedBytesAdd(ctx, vCtx.userID, validation.LineTooLong, labels, entrySize)
}
return fmt.Errorf(validation.LineTooLongErrorMsg, maxSize, labels, len(entry.Line))
}
- if len(entry.StructuredMetadata) > 0 {
+ if structuredMetadataCount > 0 {
if !vCtx.allowStructuredMetadata {
validation.DiscardedSamples.WithLabelValues(validation.DisallowedStructuredMetadata, vCtx.userID).Inc()
- validation.DiscardedBytes.WithLabelValues(validation.DisallowedStructuredMetadata, vCtx.userID).Add(float64(len(entry.Line)))
+ validation.DiscardedBytes.WithLabelValues(validation.DisallowedStructuredMetadata, vCtx.userID).Add(entrySize)
if v.usageTracker != nil {
- v.usageTracker.DiscardedBytesAdd(ctx, vCtx.userID, validation.DisallowedStructuredMetadata, labels, float64(len(entry.Line)))
+ v.usageTracker.DiscardedBytesAdd(ctx, vCtx.userID, validation.DisallowedStructuredMetadata, labels, entrySize)
}
return fmt.Errorf(validation.DisallowedStructuredMetadataErrorMsg, labels)
}
- var structuredMetadataSizeBytes, structuredMetadataCount int
- for _, metadata := range entry.StructuredMetadata {
- structuredMetadataSizeBytes += len(metadata.Name) + len(metadata.Value)
- structuredMetadataCount++
- }
-
if maxSize := vCtx.maxStructuredMetadataSize; maxSize != 0 && structuredMetadataSizeBytes > maxSize {
validation.DiscardedSamples.WithLabelValues(validation.StructuredMetadataTooLarge, vCtx.userID).Inc()
- validation.DiscardedBytes.WithLabelValues(validation.StructuredMetadataTooLarge, vCtx.userID).Add(float64(len(entry.Line)))
+ validation.DiscardedBytes.WithLabelValues(validation.StructuredMetadataTooLarge, vCtx.userID).Add(entrySize)
if v.usageTracker != nil {
- v.usageTracker.DiscardedBytesAdd(ctx, vCtx.userID, validation.StructuredMetadataTooLarge, labels, float64(len(entry.Line)))
+ v.usageTracker.DiscardedBytesAdd(ctx, vCtx.userID, validation.StructuredMetadataTooLarge, labels, entrySize)
}
return fmt.Errorf(validation.StructuredMetadataTooLargeErrorMsg, labels, structuredMetadataSizeBytes, vCtx.maxStructuredMetadataSize)
}
if maxCount := vCtx.maxStructuredMetadataCount; maxCount != 0 && structuredMetadataCount > maxCount {
validation.DiscardedSamples.WithLabelValues(validation.StructuredMetadataTooMany, vCtx.userID).Inc()
- validation.DiscardedBytes.WithLabelValues(validation.StructuredMetadataTooMany, vCtx.userID).Add(float64(len(entry.Line)))
+ validation.DiscardedBytes.WithLabelValues(validation.StructuredMetadataTooMany, vCtx.userID).Add(entrySize)
if v.usageTracker != nil {
- v.usageTracker.DiscardedBytesAdd(ctx, vCtx.userID, validation.StructuredMetadataTooMany, labels, float64(len(entry.Line)))
+ v.usageTracker.DiscardedBytesAdd(ctx, vCtx.userID, validation.StructuredMetadataTooMany, labels, entrySize)
}
return fmt.Errorf(validation.StructuredMetadataTooManyErrorMsg, labels, structuredMetadataCount, vCtx.maxStructuredMetadataCount)
}
@@ -207,10 +205,7 @@ func (v Validator) ShouldBlockIngestion(ctx validationContext, now time.Time) (b
}
func updateMetrics(reason, userID string, stream logproto.Stream) {
- validation.DiscardedSamples.WithLabelValues(reason, userID).Inc()
- bytes := 0
- for _, e := range stream.Entries {
- bytes += len(e.Line)
- }
+ validation.DiscardedSamples.WithLabelValues(reason, userID).Add(float64(len(stream.Entries)))
+ bytes := util.EntriesTotalSize(stream.Entries)
validation.DiscardedBytes.WithLabelValues(reason, userID).Add(float64(bytes))
}
diff --git a/pkg/indexgateway/gateway.go b/pkg/indexgateway/gateway.go
index 92d476d496673..a053c8154bde2 100644
--- a/pkg/indexgateway/gateway.go
+++ b/pkg/indexgateway/gateway.go
@@ -22,7 +22,6 @@ import (
iter "github.com/grafana/loki/v3/pkg/iter/v2"
"github.com/grafana/loki/v3/pkg/logproto"
"github.com/grafana/loki/v3/pkg/logql/syntax"
- "github.com/grafana/loki/v3/pkg/logqlmodel/stats"
"github.com/grafana/loki/v3/pkg/querier/plan"
v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1"
"github.com/grafana/loki/v3/pkg/storage/chunk"
@@ -209,6 +208,8 @@ func buildResponses(query seriesindex.Query, batch seriesindex.ReadBatchResult,
}
func (g *Gateway) GetChunkRef(ctx context.Context, req *logproto.GetChunkRefRequest) (result *logproto.GetChunkRefResponse, err error) {
+ logger := util_log.WithContext(ctx, g.log)
+
instanceID, err := tenant.TenantID(ctx)
if err != nil {
return nil, err
@@ -234,6 +235,9 @@ func (g *Gateway) GetChunkRef(ctx context.Context, req *logproto.GetChunkRefRequ
}
initialChunkCount := len(result.Refs)
+ result.Stats.TotalChunks = int64(initialChunkCount)
+ result.Stats.PostFilterChunks = int64(initialChunkCount) // populate early for error reponses
+
defer func() {
if err == nil {
g.metrics.preFilterChunks.WithLabelValues(routeChunkRefs).Observe(float64(initialChunkCount))
@@ -259,7 +263,8 @@ func (g *Gateway) GetChunkRef(ctx context.Context, req *logproto.GetChunkRefRequ
}
result.Refs = chunkRefs
- level.Info(g.log).Log("msg", "return filtered chunk refs", "unfiltered", initialChunkCount, "filtered", len(result.Refs))
+ level.Info(logger).Log("msg", "return filtered chunk refs", "unfiltered", initialChunkCount, "filtered", len(result.Refs))
+ result.Stats.PostFilterChunks = int64(len(result.Refs))
return result, nil
}
@@ -484,16 +489,7 @@ func (g *Gateway) boundedShards(
g.metrics.preFilterChunks.WithLabelValues(routeShards).Observe(float64(ct))
g.metrics.postFilterChunks.WithLabelValues(routeShards).Observe(float64(len(filtered)))
- statistics := stats.Result{
- Index: stats.Index{
- TotalChunks: int64(ct),
- PostFilterChunks: int64(len(filtered)),
- },
- }
-
- resp := &logproto.ShardsResponse{
- Statistics: statistics,
- }
+ resp := &logproto.ShardsResponse{}
// Edge case: if there are no chunks after filtering, we still need to return a single shard
if len(filtered) == 0 {
@@ -528,8 +524,8 @@ func (g *Gateway) boundedShards(
ms := syntax.MatchersExpr{Mts: p.Matchers}
level.Debug(logger).Log(
"msg", "send shards response",
- "total_chunks", statistics.Index.TotalChunks,
- "post_filter_chunks", statistics.Index.PostFilterChunks,
+ "total_chunks", ct,
+ "post_filter_chunks", len(filtered),
"shards", len(resp.Shards),
"query", req.Query,
"target_bytes_per_shard", datasize.ByteSize(req.TargetBytesPerShard).HumanReadable(),
diff --git a/pkg/ingester-rf1/instance.go b/pkg/ingester-rf1/instance.go
index 0444475f7a6bf..1d6e89d7f4f65 100644
--- a/pkg/ingester-rf1/instance.go
+++ b/pkg/ingester-rf1/instance.go
@@ -24,6 +24,7 @@ import (
"github.com/grafana/loki/v3/pkg/runtime"
"github.com/grafana/loki/v3/pkg/storage/config"
"github.com/grafana/loki/v3/pkg/storage/wal"
+ "github.com/grafana/loki/v3/pkg/util"
"github.com/grafana/loki/v3/pkg/util/constants"
util_log "github.com/grafana/loki/v3/pkg/util/log"
"github.com/grafana/loki/v3/pkg/validation"
@@ -269,10 +270,7 @@ func (i *instance) onStreamCreationError(ctx context.Context, pushReqStream logp
}
validation.DiscardedSamples.WithLabelValues(validation.StreamLimit, i.instanceID).Add(float64(len(pushReqStream.Entries)))
- bytes := 0
- for _, e := range pushReqStream.Entries {
- bytes += len(e.Line)
- }
+ bytes := util.EntriesTotalSize(pushReqStream.Entries)
validation.DiscardedBytes.WithLabelValues(validation.StreamLimit, i.instanceID).Add(float64(bytes))
if i.customStreamsTracker != nil {
i.customStreamsTracker.DiscardedBytesAdd(ctx, i.instanceID, validation.StreamLimit, labels, float64(bytes))
diff --git a/pkg/ingester-rf1/objstore/storage.go b/pkg/ingester-rf1/objstore/storage.go
index 9937ee20ee818..1192a9039be78 100644
--- a/pkg/ingester-rf1/objstore/storage.go
+++ b/pkg/ingester-rf1/objstore/storage.go
@@ -40,7 +40,7 @@ func New(
return periodicConfigs[i].From.Time.Before(periodicConfigs[j].From.Time)
})
for _, periodicConfig := range periodicConfigs {
- objectClient, err := storage.NewObjectClient(periodicConfig.ObjectType, storageConfig, clientMetrics)
+ objectClient, err := storage.NewObjectClient(periodicConfig.ObjectType, "storage-rf1", storageConfig, clientMetrics)
if err != nil {
return nil, fmt.Errorf("creating object client for period %s: %w ", periodicConfig.From, err)
}
diff --git a/pkg/ingester-rf1/stream.go b/pkg/ingester-rf1/stream.go
index 8913e206a7c2a..9e92677643a40 100644
--- a/pkg/ingester-rf1/stream.go
+++ b/pkg/ingester-rf1/stream.go
@@ -18,6 +18,7 @@ import (
"github.com/grafana/loki/v3/pkg/loghttp/push"
"github.com/grafana/loki/v3/pkg/logproto"
"github.com/grafana/loki/v3/pkg/storage/wal"
+ "github.com/grafana/loki/v3/pkg/util"
"github.com/grafana/loki/v3/pkg/util/flagext"
"github.com/grafana/loki/v3/pkg/validation"
)
@@ -245,15 +246,15 @@ func (s *stream) validateEntries(ctx context.Context, entries []logproto.Entry,
continue
}
- lineBytes := len(entries[i].Line)
- totalBytes += lineBytes
+ entryBytes := util.EntryTotalSize(&entries[i])
+ totalBytes += entryBytes
now := time.Now()
- if !rateLimitWholeStream && !s.limiter.AllowN(now, len(entries[i].Line)) {
- failedEntriesWithError = append(failedEntriesWithError, entryWithError{&entries[i], &validation.ErrStreamRateLimit{RateLimit: flagext.ByteSize(limit), Labels: s.labelsString, Bytes: flagext.ByteSize(lineBytes)}})
+ if !rateLimitWholeStream && !s.limiter.AllowN(now, entryBytes) {
+ failedEntriesWithError = append(failedEntriesWithError, entryWithError{&entries[i], &validation.ErrStreamRateLimit{RateLimit: flagext.ByteSize(limit), Labels: s.labelsString, Bytes: flagext.ByteSize(entryBytes)}})
s.writeFailures.Log(s.tenant, failedEntriesWithError[len(failedEntriesWithError)-1].e)
rateLimitedSamples++
- rateLimitedBytes += lineBytes
+ rateLimitedBytes += entryBytes
continue
}
@@ -263,11 +264,11 @@ func (s *stream) validateEntries(ctx context.Context, entries []logproto.Entry,
failedEntriesWithError = append(failedEntriesWithError, entryWithError{&entries[i], chunkenc.ErrTooFarBehind(entries[i].Timestamp, cutoff)})
s.writeFailures.Log(s.tenant, fmt.Errorf("%w for stream %s", failedEntriesWithError[len(failedEntriesWithError)-1].e, s.labels))
outOfOrderSamples++
- outOfOrderBytes += lineBytes
+ outOfOrderBytes += entryBytes
continue
}
- validBytes += lineBytes
+ validBytes += entryBytes
lastLine.ts = entries[i].Timestamp
lastLine.content = entries[i].Line
@@ -287,8 +288,9 @@ func (s *stream) validateEntries(ctx context.Context, entries []logproto.Entry,
rateLimitedSamples = len(toStore)
failedEntriesWithError = make([]entryWithError, 0, len(toStore))
for i := 0; i < len(toStore); i++ {
- failedEntriesWithError = append(failedEntriesWithError, entryWithError{toStore[i], &validation.ErrStreamRateLimit{RateLimit: flagext.ByteSize(limit), Labels: s.labelsString, Bytes: flagext.ByteSize(len(toStore[i].Line))}})
- rateLimitedBytes += len(toStore[i].Line)
+ entryTotalSize := util.EntryTotalSize(toStore[i])
+ failedEntriesWithError = append(failedEntriesWithError, entryWithError{toStore[i], &validation.ErrStreamRateLimit{RateLimit: flagext.ByteSize(limit), Labels: s.labelsString, Bytes: flagext.ByteSize(entryTotalSize)}})
+ rateLimitedBytes += entryTotalSize
}
}
diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go
index adf0cd7b332f5..17089efbbf63b 100644
--- a/pkg/ingester/ingester.go
+++ b/pkg/ingester/ingester.go
@@ -611,6 +611,10 @@ func (i *Ingester) starting(ctx context.Context) (err error) {
i.setPrepareShutdown()
}
+ // start our flush loop: this needs to start before the partition-reader in order for chunks to be shipped in the case of Kafka catching up.
+ i.loopDone.Add(1)
+ go i.loop()
+
// When kafka ingestion is enabled, we have to make sure that reader catches up replaying the partition
// BEFORE the ingester ring lifecycler is started, because once the ingester ring lifecycler will start
// it will switch the ingester state in the ring to ACTIVE.
@@ -646,9 +650,7 @@ func (i *Ingester) starting(ctx context.Context) (err error) {
return fmt.Errorf("failed to start partition ring lifecycler: %w", err)
}
}
- // start our loop
- i.loopDone.Add(1)
- go i.loop()
+
return nil
}
diff --git a/pkg/ingester/instance.go b/pkg/ingester/instance.go
index 685fa90a8a762..62182c6b9daef 100644
--- a/pkg/ingester/instance.go
+++ b/pkg/ingester/instance.go
@@ -336,10 +336,7 @@ func (i *instance) onStreamCreationError(ctx context.Context, pushReqStream logp
}
validation.DiscardedSamples.WithLabelValues(validation.StreamLimit, i.instanceID).Add(float64(len(pushReqStream.Entries)))
- bytes := 0
- for _, e := range pushReqStream.Entries {
- bytes += len(e.Line)
- }
+ bytes := util.EntriesTotalSize(pushReqStream.Entries)
validation.DiscardedBytes.WithLabelValues(validation.StreamLimit, i.instanceID).Add(float64(bytes))
if i.customStreamsTracker != nil {
i.customStreamsTracker.DiscardedBytesAdd(ctx, i.instanceID, validation.StreamLimit, labels, float64(bytes))
diff --git a/pkg/ingester/stream.go b/pkg/ingester/stream.go
index d1577b1d2fffe..6ae03e1a4f602 100644
--- a/pkg/ingester/stream.go
+++ b/pkg/ingester/stream.go
@@ -9,8 +9,6 @@ import (
"sync"
"time"
- "github.com/grafana/loki/v3/pkg/runtime"
-
"github.com/go-kit/log/level"
"github.com/grafana/dskit/httpgrpc"
"github.com/opentracing/opentracing-go"
@@ -25,6 +23,8 @@ import (
"github.com/grafana/loki/v3/pkg/logproto"
"github.com/grafana/loki/v3/pkg/logql/log"
"github.com/grafana/loki/v3/pkg/logqlmodel/stats"
+ "github.com/grafana/loki/v3/pkg/runtime"
+ "github.com/grafana/loki/v3/pkg/util"
"github.com/grafana/loki/v3/pkg/util/flagext"
util_log "github.com/grafana/loki/v3/pkg/util/log"
"github.com/grafana/loki/v3/pkg/validation"
@@ -346,7 +346,7 @@ func (s *stream) storeEntries(ctx context.Context, entries []logproto.Entry, usa
if chunkenc.IsOutOfOrderErr(err) {
s.writeFailures.Log(s.tenant, err)
outOfOrderSamples++
- outOfOrderBytes += len(entries[i].Line)
+ outOfOrderBytes += util.EntryTotalSize(&entries[i])
}
continue
}
@@ -409,15 +409,15 @@ func (s *stream) validateEntries(ctx context.Context, entries []logproto.Entry,
continue
}
- lineBytes := len(entries[i].Line)
- totalBytes += lineBytes
+ entryBytes := util.EntryTotalSize(&entries[i])
+ totalBytes += entryBytes
now := time.Now()
- if !rateLimitWholeStream && !s.limiter.AllowN(now, len(entries[i].Line)) {
- failedEntriesWithError = append(failedEntriesWithError, entryWithError{&entries[i], &validation.ErrStreamRateLimit{RateLimit: flagext.ByteSize(limit), Labels: s.labelsString, Bytes: flagext.ByteSize(lineBytes)}})
+ if !rateLimitWholeStream && !s.limiter.AllowN(now, entryBytes) {
+ failedEntriesWithError = append(failedEntriesWithError, entryWithError{&entries[i], &validation.ErrStreamRateLimit{RateLimit: flagext.ByteSize(limit), Labels: s.labelsString, Bytes: flagext.ByteSize(entryBytes)}})
s.writeFailures.Log(s.tenant, failedEntriesWithError[len(failedEntriesWithError)-1].e)
rateLimitedSamples++
- rateLimitedBytes += lineBytes
+ rateLimitedBytes += entryBytes
continue
}
@@ -427,11 +427,11 @@ func (s *stream) validateEntries(ctx context.Context, entries []logproto.Entry,
failedEntriesWithError = append(failedEntriesWithError, entryWithError{&entries[i], chunkenc.ErrTooFarBehind(entries[i].Timestamp, cutoff)})
s.writeFailures.Log(s.tenant, fmt.Errorf("%w for stream %s", failedEntriesWithError[len(failedEntriesWithError)-1].e, s.labels))
outOfOrderSamples++
- outOfOrderBytes += lineBytes
+ outOfOrderBytes += entryBytes
continue
}
- validBytes += lineBytes
+ validBytes += entryBytes
lastLine.ts = entries[i].Timestamp
lastLine.content = entries[i].Line
@@ -451,8 +451,15 @@ func (s *stream) validateEntries(ctx context.Context, entries []logproto.Entry,
rateLimitedSamples = len(toStore)
failedEntriesWithError = make([]entryWithError, 0, len(toStore))
for i := 0; i < len(toStore); i++ {
- failedEntriesWithError = append(failedEntriesWithError, entryWithError{&toStore[i], &validation.ErrStreamRateLimit{RateLimit: flagext.ByteSize(limit), Labels: s.labelsString, Bytes: flagext.ByteSize(len(toStore[i].Line))}})
- rateLimitedBytes += len(toStore[i].Line)
+ failedEntriesWithError = append(failedEntriesWithError, entryWithError{
+ &toStore[i],
+ &validation.ErrStreamRateLimit{
+ RateLimit: flagext.ByteSize(limit),
+ Labels: s.labelsString,
+ Bytes: flagext.ByteSize(util.EntryTotalSize(&toStore[i])),
+ },
+ })
+ rateLimitedBytes += util.EntryTotalSize(&toStore[i])
}
// Log the only last error to the write failures manager.
diff --git a/pkg/kafka/logger.go b/pkg/kafka/client/logger.go
similarity index 98%
rename from pkg/kafka/logger.go
rename to pkg/kafka/client/logger.go
index e055094a4163b..3be96839e1205 100644
--- a/pkg/kafka/logger.go
+++ b/pkg/kafka/client/logger.go
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: AGPL-3.0-only
-package kafka
+package client
import (
"github.com/go-kit/log"
diff --git a/pkg/kafka/reader_client.go b/pkg/kafka/client/reader_client.go
similarity index 51%
rename from pkg/kafka/reader_client.go
rename to pkg/kafka/client/reader_client.go
index 9237686fee609..e8bbb2da8c86a 100644
--- a/pkg/kafka/reader_client.go
+++ b/pkg/kafka/client/reader_client.go
@@ -1,19 +1,25 @@
// SPDX-License-Identifier: AGPL-3.0-only
-package kafka
+package client
import (
+ "context"
+ "fmt"
"time"
"github.com/go-kit/log"
+ "github.com/go-kit/log/level"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
+ "github.com/twmb/franz-go/pkg/kadm"
"github.com/twmb/franz-go/pkg/kgo"
"github.com/twmb/franz-go/plugin/kprom"
+
+ "github.com/grafana/loki/v3/pkg/kafka"
)
// NewReaderClient returns the kgo.Client that should be used by the Reader.
-func NewReaderClient(kafkaCfg Config, metrics *kprom.Metrics, logger log.Logger, opts ...kgo.Opt) (*kgo.Client, error) {
+func NewReaderClient(kafkaCfg kafka.Config, metrics *kprom.Metrics, logger log.Logger, opts ...kgo.Opt) (*kgo.Client, error) {
const fetchMaxBytes = 100_000_000
opts = append(opts, commonKafkaClientOptions(kafkaCfg, metrics, logger)...)
@@ -33,7 +39,7 @@ func NewReaderClient(kafkaCfg Config, metrics *kprom.Metrics, logger log.Logger,
return nil, errors.Wrap(err, "creating kafka client")
}
if kafkaCfg.AutoCreateTopicEnabled {
- kafkaCfg.SetDefaultNumberOfPartitionsForAutocreatedTopics(logger)
+ setDefaultNumberOfPartitionsForAutocreatedTopics(kafkaCfg, client, logger)
}
return client, nil
}
@@ -44,3 +50,29 @@ func NewReaderClientMetrics(component string, reg prometheus.Registerer) *kprom.
// Do not export the client ID, because we use it to specify options to the backend.
kprom.FetchAndProduceDetail(kprom.Batches, kprom.Records, kprom.CompressedBytes, kprom.UncompressedBytes))
}
+
+// setDefaultNumberOfPartitionsForAutocreatedTopics tries to set num.partitions config option on brokers.
+// This is best-effort, if setting the option fails, error is logged, but not returned.
+func setDefaultNumberOfPartitionsForAutocreatedTopics(cfg kafka.Config, cl *kgo.Client, logger log.Logger) {
+ if cfg.AutoCreateTopicDefaultPartitions <= 0 {
+ return
+ }
+
+ // Note: this client doesn't get closed because it is owned by the caller
+ adm := kadm.NewClient(cl)
+
+ defaultNumberOfPartitions := fmt.Sprintf("%d", cfg.AutoCreateTopicDefaultPartitions)
+ _, err := adm.AlterBrokerConfigsState(context.Background(), []kadm.AlterConfig{
+ {
+ Op: kadm.SetConfig,
+ Name: "num.partitions",
+ Value: &defaultNumberOfPartitions,
+ },
+ })
+ if err != nil {
+ level.Error(logger).Log("msg", "failed to alter default number of partitions", "err", err)
+ return
+ }
+
+ level.Info(logger).Log("msg", "configured Kafka-wide default number of partitions for auto-created topics (num.partitions)", "value", cfg.AutoCreateTopicDefaultPartitions)
+}
diff --git a/pkg/kafka/client/reader_client_test.go b/pkg/kafka/client/reader_client_test.go
new file mode 100644
index 0000000000000..90980ad0e9128
--- /dev/null
+++ b/pkg/kafka/client/reader_client_test.go
@@ -0,0 +1,104 @@
+package client
+
+import (
+ "context"
+ "testing"
+
+ "github.com/go-kit/log"
+ "github.com/grafana/dskit/flagext"
+ "github.com/stretchr/testify/require"
+ "github.com/twmb/franz-go/pkg/kfake"
+ "github.com/twmb/franz-go/pkg/kgo"
+ "github.com/twmb/franz-go/pkg/kmsg"
+
+ "github.com/grafana/loki/v3/pkg/kafka"
+ "github.com/grafana/loki/v3/pkg/kafka/testkafka"
+)
+
+func TestNewReaderClient(t *testing.T) {
+ _, addr := testkafka.CreateClusterWithoutCustomConsumerGroupsSupport(t, 1, "test", kfake.EnableSASL(), kfake.Superuser("PLAIN", "user", "password"))
+
+ tests := []struct {
+ name string
+ config kafka.Config
+ wantErr bool
+ }{
+ {
+ name: "valid config",
+ config: kafka.Config{
+ Address: addr,
+ Topic: "abcd",
+ SASLUsername: "user",
+ SASLPassword: flagext.SecretWithValue("password"),
+ },
+ wantErr: false,
+ },
+ {
+ name: "wrong password",
+ config: kafka.Config{
+ Address: addr,
+ Topic: "abcd",
+ SASLUsername: "user",
+ SASLPassword: flagext.SecretWithValue("wrong wrong wrong"),
+ },
+ wantErr: true,
+ },
+ {
+ name: "wrong username",
+ config: kafka.Config{
+ Address: addr,
+ Topic: "abcd",
+ SASLUsername: "wrong wrong wrong",
+ SASLPassword: flagext.SecretWithValue("password"),
+ },
+ wantErr: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ client, err := NewReaderClient(tt.config, nil, nil)
+ require.NoError(t, err)
+
+ err = client.Ping(context.Background())
+ if tt.wantErr {
+ require.Error(t, err)
+ } else {
+ require.NoError(t, err)
+ }
+ })
+ }
+}
+
+func TestSetDefaultNumberOfPartitionsForAutocreatedTopics(t *testing.T) {
+ cluster, err := kfake.NewCluster(kfake.NumBrokers(1))
+ require.NoError(t, err)
+ t.Cleanup(cluster.Close)
+
+ addrs := cluster.ListenAddrs()
+ require.Len(t, addrs, 1)
+
+ cfg := kafka.Config{
+ Address: addrs[0],
+ AutoCreateTopicDefaultPartitions: 100,
+ }
+
+ cluster.ControlKey(kmsg.AlterConfigs.Int16(), func(request kmsg.Request) (kmsg.Response, error, bool) {
+ r := request.(*kmsg.AlterConfigsRequest)
+
+ require.Len(t, r.Resources, 1)
+ res := r.Resources[0]
+ require.Equal(t, kmsg.ConfigResourceTypeBroker, res.ResourceType)
+ require.Len(t, res.Configs, 1)
+ cfg := res.Configs[0]
+ require.Equal(t, "num.partitions", cfg.Name)
+ require.NotNil(t, *cfg.Value)
+ require.Equal(t, "100", *cfg.Value)
+
+ return &kmsg.AlterConfigsResponse{}, nil, true
+ })
+
+ client, err := kgo.NewClient(commonKafkaClientOptions(cfg, nil, log.NewNopLogger())...)
+ require.NoError(t, err)
+
+ setDefaultNumberOfPartitionsForAutocreatedTopics(cfg, client, log.NewNopLogger())
+}
diff --git a/pkg/kafka/writer_client.go b/pkg/kafka/client/writer_client.go
similarity index 90%
rename from pkg/kafka/writer_client.go
rename to pkg/kafka/client/writer_client.go
index 59fefda31d19b..1493e17f51686 100644
--- a/pkg/kafka/writer_client.go
+++ b/pkg/kafka/client/writer_client.go
@@ -1,4 +1,4 @@
-package kafka
+package client
import (
"context"
@@ -13,20 +13,30 @@ import (
"github.com/twmb/franz-go/pkg/kerr"
"github.com/twmb/franz-go/pkg/kgo"
"github.com/twmb/franz-go/pkg/kmsg"
+ "github.com/twmb/franz-go/pkg/sasl/plain"
"github.com/twmb/franz-go/plugin/kotel"
"github.com/twmb/franz-go/plugin/kprom"
"go.opentelemetry.io/otel/propagation"
"go.opentelemetry.io/otel/trace"
"go.uber.org/atomic"
+ "github.com/grafana/loki/v3/pkg/kafka"
"github.com/grafana/loki/v3/pkg/util/constants"
)
+var (
+ // writerRequestTimeoutOverhead is the overhead applied by the Writer to every Kafka timeout.
+ // You can think about this overhead as an extra time for requests sitting in the client's buffer
+ // before being sent on the wire and the actual time it takes to send it over the network and
+ // start being processed by Kafka.
+ writerRequestTimeoutOverhead = 2 * time.Second
+)
+
// NewWriterClient returns the kgo.Client that should be used by the Writer.
//
// The input prometheus.Registerer must be wrapped with a prefix (the names of metrics
// registered don't have a prefix).
-func NewWriterClient(kafkaCfg Config, maxInflightProduceRequests int, logger log.Logger, reg prometheus.Registerer) (*kgo.Client, error) {
+func NewWriterClient(kafkaCfg kafka.Config, maxInflightProduceRequests int, logger log.Logger, reg prometheus.Registerer) (*kgo.Client, error) {
// Do not export the client ID, because we use it to specify options to the backend.
metrics := kprom.NewMetrics(
"", // No prefix. We expect the input prometheus.Registered to be wrapped with a prefix.
@@ -42,7 +52,7 @@ func NewWriterClient(kafkaCfg Config, maxInflightProduceRequests int, logger log
kgo.RecordPartitioner(kgo.ManualPartitioner()),
// Set the upper bounds the size of a record batch.
- kgo.ProducerBatchMaxBytes(producerBatchMaxBytes),
+ kgo.ProducerBatchMaxBytes(kafka.ProducerBatchMaxBytes),
// By default, the Kafka client allows 1 Produce in-flight request per broker. Disabling write idempotency
// (which we don't need), we can increase the max number of in-flight Produce requests per broker. A higher
@@ -81,10 +91,14 @@ func NewWriterClient(kafkaCfg Config, maxInflightProduceRequests int, logger log
kgo.MaxBufferedRecords(math.MaxInt), // Use a high value to set it as unlimited, because the client doesn't support "0 as unlimited".
kgo.MaxBufferedBytes(0),
)
+ client, err := kgo.NewClient(opts...)
+ if err != nil {
+ return nil, err
+ }
if kafkaCfg.AutoCreateTopicEnabled {
- kafkaCfg.SetDefaultNumberOfPartitionsForAutocreatedTopics(logger)
+ setDefaultNumberOfPartitionsForAutocreatedTopics(kafkaCfg, client, logger)
}
- return kgo.NewClient(opts...)
+ return client, nil
}
type onlySampledTraces struct {
@@ -99,7 +113,7 @@ func (o onlySampledTraces) Inject(ctx context.Context, carrier propagation.TextM
o.TextMapPropagator.Inject(ctx, carrier)
}
-func commonKafkaClientOptions(cfg Config, metrics *kprom.Metrics, logger log.Logger) []kgo.Opt {
+func commonKafkaClientOptions(cfg kafka.Config, metrics *kprom.Metrics, logger log.Logger) []kgo.Opt {
opts := []kgo.Opt{
kgo.ClientID(cfg.ClientID),
kgo.SeedBrokers(cfg.Address),
@@ -139,6 +153,16 @@ func commonKafkaClientOptions(cfg Config, metrics *kprom.Metrics, logger log.Log
}),
}
+ // SASL plain auth.
+ if cfg.SASLUsername != "" && cfg.SASLPassword.String() != "" {
+ opts = append(opts, kgo.SASL(plain.Plain(func(_ context.Context) (plain.Auth, error) {
+ return plain.Auth{
+ User: cfg.SASLUsername,
+ Pass: cfg.SASLPassword.String(),
+ }, nil
+ })))
+ }
+
if cfg.AutoCreateTopicEnabled {
opts = append(opts, kgo.AllowAutoTopicCreation())
}
diff --git a/pkg/kafka/client/writer_client_test.go b/pkg/kafka/client/writer_client_test.go
new file mode 100644
index 0000000000000..4feb782ffe639
--- /dev/null
+++ b/pkg/kafka/client/writer_client_test.go
@@ -0,0 +1,71 @@
+package client
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/grafana/dskit/flagext"
+ "github.com/stretchr/testify/require"
+ "github.com/twmb/franz-go/pkg/kfake"
+
+ "github.com/grafana/loki/v3/pkg/kafka"
+ "github.com/grafana/loki/v3/pkg/kafka/testkafka"
+)
+
+func TestNewWriterClient(t *testing.T) {
+ _, addr := testkafka.CreateClusterWithoutCustomConsumerGroupsSupport(t, 1, "test", kfake.EnableSASL(), kfake.Superuser("PLAIN", "user", "password"))
+
+ tests := []struct {
+ name string
+ config kafka.Config
+ wantErr bool
+ }{
+ {
+ name: "valid config",
+ config: kafka.Config{
+ Address: addr,
+ Topic: "abcd",
+ WriteTimeout: time.Second,
+ SASLUsername: "user",
+ SASLPassword: flagext.SecretWithValue("password"),
+ },
+ wantErr: false,
+ },
+ {
+ name: "wrong password",
+ config: kafka.Config{
+ Address: addr,
+ Topic: "abcd",
+ WriteTimeout: time.Second,
+ SASLUsername: "user",
+ SASLPassword: flagext.SecretWithValue("wrong wrong wrong"),
+ },
+ wantErr: true,
+ },
+ {
+ name: "wrong username",
+ config: kafka.Config{
+ Address: addr,
+ Topic: "abcd",
+ WriteTimeout: time.Second,
+ SASLUsername: "wrong wrong wrong",
+ SASLPassword: flagext.SecretWithValue("password"),
+ },
+ wantErr: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ client, err := NewWriterClient(tt.config, 10, nil, nil)
+ require.NoError(t, err)
+
+ err = client.Ping(context.Background())
+ if tt.wantErr {
+ require.Error(t, err)
+ } else {
+ require.NoError(t, err)
+ }
+ })
+ }
+}
diff --git a/pkg/kafka/config.go b/pkg/kafka/config.go
index 13cfb618cfdb9..09008bec93411 100644
--- a/pkg/kafka/config.go
+++ b/pkg/kafka/config.go
@@ -1,7 +1,6 @@
package kafka
import (
- "context"
"errors"
"flag"
"fmt"
@@ -9,10 +8,7 @@ import (
"strings"
"time"
- "github.com/go-kit/log"
- "github.com/go-kit/log/level"
- "github.com/twmb/franz-go/pkg/kadm"
- "github.com/twmb/franz-go/pkg/kgo"
+ "github.com/grafana/dskit/flagext"
)
const (
@@ -21,29 +17,24 @@ const (
consumeFromEnd = "end"
consumeFromTimestamp = "timestamp"
- // writerRequestTimeoutOverhead is the overhead applied by the Writer to every Kafka timeout.
- // You can think about this overhead as an extra time for requests sitting in the client's buffer
- // before being sent on the wire and the actual time it takes to send it over the network and
- // start being processed by Kafka.
- writerRequestTimeoutOverhead = 2 * time.Second
-
- // producerBatchMaxBytes is the max allowed size of a batch of Kafka records.
- producerBatchMaxBytes = 16_000_000
+ // ProducerBatchMaxBytes is the max allowed size of a batch of Kafka records.
+ ProducerBatchMaxBytes = 16_000_000
// maxProducerRecordDataBytesLimit is the max allowed size of a single record data. Given we have a limit
- // on the max batch size (producerBatchMaxBytes), a Kafka record data can't be bigger than the batch size
+ // on the max batch size (ProducerBatchMaxBytes), a Kafka record data can't be bigger than the batch size
// minus some overhead required to serialise the batch and the record itself. We use 16KB as such overhead
// in the worst case scenario, which is expected to be way above the actual one.
- maxProducerRecordDataBytesLimit = producerBatchMaxBytes - 16384
+ maxProducerRecordDataBytesLimit = ProducerBatchMaxBytes - 16384
minProducerRecordDataBytesLimit = 1024 * 1024
)
var (
- ErrMissingKafkaAddress = errors.New("the Kafka address has not been configured")
- ErrMissingKafkaTopic = errors.New("the Kafka topic has not been configured")
- ErrInconsistentConsumerLagAtStartup = errors.New("the target and max consumer lag at startup must be either both set to 0 or to a value greater than 0")
- ErrInvalidMaxConsumerLagAtStartup = errors.New("the configured max consumer lag at startup must greater or equal than the configured target consumer lag")
- ErrInvalidProducerMaxRecordSizeBytes = fmt.Errorf("the configured producer max record size bytes must be a value between %d and %d", minProducerRecordDataBytesLimit, maxProducerRecordDataBytesLimit)
+ ErrMissingKafkaAddress = errors.New("the Kafka address has not been configured")
+ ErrMissingKafkaTopic = errors.New("the Kafka topic has not been configured")
+ ErrInconsistentConsumerLagAtStartup = errors.New("the target and max consumer lag at startup must be either both set to 0 or to a value greater than 0")
+ ErrInvalidMaxConsumerLagAtStartup = errors.New("the configured max consumer lag at startup must greater or equal than the configured target consumer lag")
+ ErrInconsistentSASLUsernameAndPassword = errors.New("both sasl username and password must be set")
+ ErrInvalidProducerMaxRecordSizeBytes = fmt.Errorf("the configured producer max record size bytes must be a value between %d and %d", minProducerRecordDataBytesLimit, maxProducerRecordDataBytesLimit)
)
// Config holds the generic config for the Kafka backend.
@@ -54,6 +45,9 @@ type Config struct {
DialTimeout time.Duration `yaml:"dial_timeout"`
WriteTimeout time.Duration `yaml:"write_timeout"`
+ SASLUsername string `yaml:"sasl_username"`
+ SASLPassword flagext.Secret `yaml:"sasl_password"`
+
ConsumerGroup string `yaml:"consumer_group"`
ConsumerGroupOffsetCommitInterval time.Duration `yaml:"consumer_group_offset_commit_interval"`
@@ -80,6 +74,9 @@ func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
f.DurationVar(&cfg.DialTimeout, prefix+".dial-timeout", 2*time.Second, "The maximum time allowed to open a connection to a Kafka broker.")
f.DurationVar(&cfg.WriteTimeout, prefix+".write-timeout", 10*time.Second, "How long to wait for an incoming write request to be successfully committed to the Kafka backend.")
+ f.StringVar(&cfg.SASLUsername, prefix+".sasl-username", "", "The SASL username for authentication to Kafka using the PLAIN mechanism. Both username and password must be set.")
+ f.Var(&cfg.SASLPassword, prefix+".sasl-password", "The SASL password for authentication to Kafka using the PLAIN mechanism. Both username and password must be set.")
+
f.StringVar(&cfg.ConsumerGroup, prefix+".consumer-group", "", "The consumer group used by the consumer to track the last consumed offset. The consumer group must be different for each ingester. If the configured consumer group contains the '' placeholder, it is replaced with the actual partition ID owned by the ingester. When empty (recommended), Mimir uses the ingester instance ID to guarantee uniqueness.")
f.DurationVar(&cfg.ConsumerGroupOffsetCommitInterval, prefix+".consumer-group-offset-commit-interval", time.Second, "How frequently a consumer should commit the consumed offset to Kafka. The last committed offset is used at startup to continue the consumption from where it was left.")
@@ -113,6 +110,10 @@ func (cfg *Config) Validate() error {
return ErrInvalidMaxConsumerLagAtStartup
}
+ if (cfg.SASLUsername == "") != (cfg.SASLPassword.String() == "") {
+ return ErrInconsistentSASLUsernameAndPassword
+ }
+
return nil
}
@@ -124,35 +125,3 @@ func (cfg *Config) GetConsumerGroup(instanceID string, partitionID int32) string
return strings.ReplaceAll(cfg.ConsumerGroup, "", strconv.Itoa(int(partitionID)))
}
-
-// SetDefaultNumberOfPartitionsForAutocreatedTopics tries to set num.partitions config option on brokers.
-// This is best-effort, if setting the option fails, error is logged, but not returned.
-func (cfg Config) SetDefaultNumberOfPartitionsForAutocreatedTopics(logger log.Logger) {
- if cfg.AutoCreateTopicDefaultPartitions <= 0 {
- return
- }
-
- cl, err := kgo.NewClient(commonKafkaClientOptions(cfg, nil, logger)...)
- if err != nil {
- level.Error(logger).Log("msg", "failed to create kafka client", "err", err)
- return
- }
-
- adm := kadm.NewClient(cl)
- defer adm.Close()
-
- defaultNumberOfPartitions := fmt.Sprintf("%d", cfg.AutoCreateTopicDefaultPartitions)
- _, err = adm.AlterBrokerConfigsState(context.Background(), []kadm.AlterConfig{
- {
- Op: kadm.SetConfig,
- Name: "num.partitions",
- Value: &defaultNumberOfPartitions,
- },
- })
- if err != nil {
- level.Error(logger).Log("msg", "failed to alter default number of partitions", "err", err)
- return
- }
-
- level.Info(logger).Log("msg", "configured Kafka-wide default number of partitions for auto-created topics (num.partitions)", "value", cfg.AutoCreateTopicDefaultPartitions)
-}
diff --git a/pkg/kafka/config_test.go b/pkg/kafka/config_test.go
index 7c21e38fd141e..87c456f42adc0 100644
--- a/pkg/kafka/config_test.go
+++ b/pkg/kafka/config_test.go
@@ -3,39 +3,37 @@ package kafka
import (
"testing"
- "github.com/go-kit/log"
+ "github.com/grafana/dskit/flagext"
"github.com/stretchr/testify/require"
- "github.com/twmb/franz-go/pkg/kfake"
- "github.com/twmb/franz-go/pkg/kmsg"
)
-func TestSetDefaultNumberOfPartitionsForAutocreatedTopics(t *testing.T) {
- cluster, err := kfake.NewCluster(kfake.NumBrokers(1))
- require.NoError(t, err)
- t.Cleanup(cluster.Close)
-
- addrs := cluster.ListenAddrs()
- require.Len(t, addrs, 1)
-
+func TestBothSASLParamsMustBeSet(t *testing.T) {
cfg := Config{
- Address: addrs[0],
- AutoCreateTopicDefaultPartitions: 100,
+ // Other required params
+ Address: "abcd",
+ Topic: "abcd",
+ ProducerMaxRecordSizeBytes: 1048576,
}
- cluster.ControlKey(kmsg.AlterConfigs.Int16(), func(request kmsg.Request) (kmsg.Response, error, bool) {
- r := request.(*kmsg.AlterConfigsRequest)
-
- require.Len(t, r.Resources, 1)
- res := r.Resources[0]
- require.Equal(t, kmsg.ConfigResourceTypeBroker, res.ResourceType)
- require.Len(t, res.Configs, 1)
- cfg := res.Configs[0]
- require.Equal(t, "num.partitions", cfg.Name)
- require.NotNil(t, *cfg.Value)
- require.Equal(t, "100", *cfg.Value)
-
- return &kmsg.AlterConfigsResponse{}, nil, true
- })
+ // No SASL params is valid
+ err := cfg.Validate()
+ require.NoError(t, err)
- cfg.SetDefaultNumberOfPartitionsForAutocreatedTopics(log.NewNopLogger())
+ // Just username is invalid
+ cfg.SASLUsername = "abcd"
+ cfg.SASLPassword = flagext.Secret{}
+ err = cfg.Validate()
+ require.Error(t, err)
+
+ // Just password is invalid
+ cfg.SASLUsername = ""
+ cfg.SASLPassword = flagext.SecretWithValue("abcd")
+ err = cfg.Validate()
+ require.Error(t, err)
+
+ // Both username and password is valid
+ cfg.SASLUsername = "abcd"
+ cfg.SASLPassword = flagext.SecretWithValue("abcd")
+ err = cfg.Validate()
+ require.NoError(t, err)
}
diff --git a/pkg/kafka/partition/committer_test.go b/pkg/kafka/partition/committer_test.go
index 9ef02f910e5d0..1739986cd66c8 100644
--- a/pkg/kafka/partition/committer_test.go
+++ b/pkg/kafka/partition/committer_test.go
@@ -14,7 +14,7 @@ import (
"github.com/prometheus/client_golang/prometheus/testutil"
- "github.com/grafana/loki/v3/pkg/kafka"
+ "github.com/grafana/loki/v3/pkg/kafka/client"
"github.com/grafana/loki/v3/pkg/kafka/testkafka"
)
@@ -24,7 +24,7 @@ func TestPartitionCommitter(t *testing.T) {
topicName := "test-topic"
_, kafkaCfg := testkafka.CreateCluster(t, numPartitions, topicName)
- client, err := kafka.NewReaderClient(kafkaCfg, kprom.NewMetrics("foo"), log.NewNopLogger())
+ client, err := client.NewReaderClient(kafkaCfg, kprom.NewMetrics("foo"), log.NewNopLogger())
require.NoError(t, err)
// Create a Kafka admin client
diff --git a/pkg/kafka/partition/reader.go b/pkg/kafka/partition/reader.go
index e07a65f8a0f15..d90b028e8af81 100644
--- a/pkg/kafka/partition/reader.go
+++ b/pkg/kafka/partition/reader.go
@@ -22,6 +22,7 @@ import (
"github.com/twmb/franz-go/plugin/kprom"
"github.com/grafana/loki/v3/pkg/kafka"
+ "github.com/grafana/loki/v3/pkg/kafka/client"
)
var errWaitTargetLagDeadlineExceeded = errors.New("waiting for target lag deadline exceeded")
@@ -94,7 +95,7 @@ func NewReader(
// This method is called when the PartitionReader service starts.
func (p *Reader) start(ctx context.Context) error {
var err error
- p.client, err = kafka.NewReaderClient(p.kafkaCfg, p.metrics.kprom, p.logger)
+ p.client, err = client.NewReaderClient(p.kafkaCfg, p.metrics.kprom, p.logger)
if err != nil {
return errors.Wrap(err, "creating kafka reader client")
}
@@ -476,7 +477,7 @@ func (p *Reader) recordFetchesMetrics(fetches kgo.Fetches) {
fetches.EachRecord(func(record *kgo.Record) {
numRecords++
delay := now.Sub(record.Timestamp).Seconds()
- if p.lastProcessedOffset == -1 {
+ if p.Service.State() == services.Starting {
p.metrics.receiveDelayWhenStarting.Observe(delay)
} else {
p.metrics.receiveDelayWhenRunning.Observe(delay)
@@ -539,7 +540,7 @@ func newReaderMetrics(reg prometheus.Registerer) readerMetrics {
return readerMetrics{
receiveDelayWhenStarting: receiveDelay.WithLabelValues("starting"),
receiveDelayWhenRunning: receiveDelay.WithLabelValues("running"),
- kprom: kafka.NewReaderClientMetrics("partition-reader", reg),
+ kprom: client.NewReaderClientMetrics("partition-reader", reg),
fetchWaitDuration: promauto.With(reg).NewHistogram(prometheus.HistogramOpts{
Name: "loki_ingest_storage_reader_records_batch_wait_duration_seconds",
Help: "How long a consumer spent waiting for a batch of records from the Kafka client. If fetching is faster than processing, then this will be close to 0.",
diff --git a/pkg/kafka/partition/reader_test.go b/pkg/kafka/partition/reader_test.go
index 8d548c8312411..dfd653de78e3d 100644
--- a/pkg/kafka/partition/reader_test.go
+++ b/pkg/kafka/partition/reader_test.go
@@ -17,6 +17,7 @@ import (
"github.com/twmb/franz-go/pkg/kgo"
"github.com/grafana/loki/v3/pkg/kafka"
+ "github.com/grafana/loki/v3/pkg/kafka/client"
"github.com/grafana/loki/v3/pkg/kafka/testkafka"
"github.com/grafana/loki/v3/pkg/logproto"
)
@@ -58,7 +59,7 @@ func (m *mockConsumer) Flush(ctx context.Context) error {
}
func TestPartitionReader_BasicFunctionality(t *testing.T) {
- _, kafkaCfg := testkafka.CreateCluster(t, 1, "test-topic")
+ _, kafkaCfg := testkafka.CreateCluster(t, 1, "test")
consumer := newMockConsumer()
consumerFactory := func(_ Committer) (Consumer, error) {
@@ -67,7 +68,7 @@ func TestPartitionReader_BasicFunctionality(t *testing.T) {
partitionReader, err := NewReader(kafkaCfg, 0, "test-consumer-group", consumerFactory, log.NewNopLogger(), prometheus.NewRegistry())
require.NoError(t, err)
- producer, err := kafka.NewWriterClient(kafkaCfg, 100, log.NewNopLogger(), prometheus.NewRegistry())
+ producer, err := client.NewWriterClient(kafkaCfg, 100, log.NewNopLogger(), prometheus.NewRegistry())
require.NoError(t, err)
err = services.StartAndAwaitRunning(context.Background(), partitionReader)
@@ -82,8 +83,8 @@ func TestPartitionReader_BasicFunctionality(t *testing.T) {
require.NoError(t, err)
require.Len(t, records, 1)
- producer.ProduceSync(context.Background(), records...)
- producer.ProduceSync(context.Background(), records...)
+ require.NoError(t, producer.ProduceSync(context.Background(), records...).FirstErr())
+ require.NoError(t, producer.ProduceSync(context.Background(), records...).FirstErr())
// Wait for records to be processed
assert.Eventually(t, func() bool {
@@ -121,7 +122,7 @@ func TestPartitionReader_ProcessCatchUpAtStartup(t *testing.T) {
partitionReader, err := NewReader(kafkaCfg, 0, "test-consumer-group", consumerFactory, log.NewNopLogger(), prometheus.NewRegistry())
require.NoError(t, err)
- producer, err := kafka.NewWriterClient(kafkaCfg, 100, log.NewNopLogger(), prometheus.NewRegistry())
+ producer, err := client.NewWriterClient(kafkaCfg, 100, log.NewNopLogger(), prometheus.NewRegistry())
require.NoError(t, err)
stream := logproto.Stream{
@@ -175,11 +176,11 @@ func TestPartitionReader_ProcessCommits(t *testing.T) {
partitionID := int32(0)
partitionReader, err := NewReader(kafkaCfg, partitionID, "test-consumer-group", consumerFactory, log.NewNopLogger(), prometheus.NewRegistry())
require.NoError(t, err)
- producer, err := kafka.NewWriterClient(kafkaCfg, 100, log.NewNopLogger(), prometheus.NewRegistry())
+ producer, err := client.NewWriterClient(kafkaCfg, 100, log.NewNopLogger(), prometheus.NewRegistry())
require.NoError(t, err)
// Init the client: This usually happens in "start" but we want to manage our own lifecycle for this test.
- partitionReader.client, err = kafka.NewReaderClient(kafkaCfg, nil, log.NewNopLogger(),
+ partitionReader.client, err = client.NewReaderClient(kafkaCfg, nil, log.NewNopLogger(),
kgo.ConsumePartitions(map[string]map[int32]kgo.Offset{
kafkaCfg.Topic: {partitionID: kgo.NewOffset().AtStart()},
}),
diff --git a/pkg/kafka/testkafka/cluster.go b/pkg/kafka/testkafka/cluster.go
index cc5847c2bfd35..c70e3da4a71cb 100644
--- a/pkg/kafka/testkafka/cluster.go
+++ b/pkg/kafka/testkafka/cluster.go
@@ -16,8 +16,8 @@ import (
)
// CreateCluster returns a fake Kafka cluster for unit testing.
-func CreateCluster(t testing.TB, numPartitions int32, topicName string) (*kfake.Cluster, kafka.Config) {
- cluster, addr := CreateClusterWithoutCustomConsumerGroupsSupport(t, numPartitions, topicName)
+func CreateCluster(t testing.TB, numPartitions int32, topicName string, opts ...kfake.Opt) (*kfake.Cluster, kafka.Config) {
+ cluster, addr := CreateClusterWithoutCustomConsumerGroupsSupport(t, numPartitions, topicName, opts...)
addSupportForConsumerGroups(t, cluster, topicName, numPartitions)
return cluster, createTestKafkaConfig(addr, topicName)
@@ -34,8 +34,16 @@ func createTestKafkaConfig(clusterAddr, topicName string) kafka.Config {
return cfg
}
-func CreateClusterWithoutCustomConsumerGroupsSupport(t testing.TB, numPartitions int32, topicName string) (*kfake.Cluster, string) {
- cluster, err := kfake.NewCluster(kfake.NumBrokers(1), kfake.SeedTopics(numPartitions, topicName))
+func CreateClusterWithoutCustomConsumerGroupsSupport(t testing.TB, numPartitions int32, topicName string, opts ...kfake.Opt) (*kfake.Cluster, string) {
+ cfg := []kfake.Opt{
+ kfake.NumBrokers(1),
+ kfake.SeedTopics(numPartitions, topicName),
+ }
+
+ // Apply options.
+ cfg = append(cfg, opts...)
+
+ cluster, err := kfake.NewCluster(cfg...)
require.NoError(t, err)
t.Cleanup(cluster.Close)
diff --git a/pkg/logcli/client/client.go b/pkg/logcli/client/client.go
index f2d42b353f969..1ffbfdedf0157 100644
--- a/pkg/logcli/client/client.go
+++ b/pkg/logcli/client/client.go
@@ -88,6 +88,7 @@ type DefaultClient struct {
AuthHeader string
ProxyURL string
BackoffConfig BackoffConfig
+ Compression bool
}
// Query uses the /api/v1/query endpoint to execute an instant query
@@ -320,6 +321,16 @@ func (c *DefaultClient) doRequest(path, query string, quiet bool, out interface{
if c.Tripperware != nil {
client.Transport = c.Tripperware(client.Transport)
}
+ if c.Compression {
+ // NewClientFromConfig() above returns an http.Client that uses a transport which
+ // has compression explicitly disabled. Here we re-enable it. If the caller
+ // defines a custom Tripperware that isn't an http.Transport then this won't work,
+ // but in that case they control the transport anyway and can configure
+ // compression that way.
+ if transport, ok := client.Transport.(*http.Transport); ok {
+ transport.DisableCompression = false
+ }
+ }
var resp *http.Response
diff --git a/pkg/logcli/query/query.go b/pkg/logcli/query/query.go
index 268def0bab685..ba21dade985ab 100644
--- a/pkg/logcli/query/query.go
+++ b/pkg/logcli/query/query.go
@@ -538,15 +538,7 @@ func (q *Query) DoLocalQuery(out output.LogOutput, statistics bool, orgID string
}
func GetObjectClient(store string, conf loki.Config, cm storage.ClientMetrics) (chunk.ObjectClient, error) {
- oc, err := storage.NewObjectClient(
- store,
- conf.StorageConfig,
- cm,
- )
- if err != nil {
- return nil, err
- }
- return oc, nil
+ return storage.NewObjectClient(store, "logcli-query", conf.StorageConfig, cm)
}
var errNotExists = stdErrors.New("doesn't exist")
diff --git a/pkg/logcli/query/tail.go b/pkg/logcli/query/tail.go
index b65e546b904e3..eb95bc848e1de 100644
--- a/pkg/logcli/query/tail.go
+++ b/pkg/logcli/query/tail.go
@@ -45,10 +45,10 @@ func (q *Query) TailQuery(delayFor time.Duration, c client.Client, out output.Lo
log.Println("Print only labels key:", color.RedString(strings.Join(q.ShowLabelsKey, ",")))
}
- tailResponse := new(loghttp.TailResponse)
lastReceivedTimestamp := q.Start
for {
+ tailResponse := new(loghttp.TailResponse)
err := unmarshal.ReadTailResponseJSON(tailResponse, conn)
if err != nil {
// Check if the websocket connection closed unexpectedly. If so, retry.
diff --git a/pkg/loghttp/push/otlp.go b/pkg/loghttp/push/otlp.go
index 3e654b9c21ef2..584b45a833b71 100644
--- a/pkg/loghttp/push/otlp.go
+++ b/pkg/loghttp/push/otlp.go
@@ -193,7 +193,7 @@ func otlpToLokiPushRequest(ctx context.Context, ld plog.Logs, userID string, ten
stats.StreamLabelsSize += int64(labelsSize(logproto.FromLabelsToLabelAdapters(lbs)))
}
- resourceAttributesAsStructuredMetadataSize := labelsSize(resourceAttributesAsStructuredMetadata)
+ resourceAttributesAsStructuredMetadataSize := loki_util.StructuredMetadataSize(resourceAttributesAsStructuredMetadata)
retentionPeriodForUser := tenantsRetention.RetentionPeriodFor(userID, lbs)
stats.StructuredMetadataBytes[retentionPeriodForUser] += int64(resourceAttributesAsStructuredMetadataSize)
@@ -250,7 +250,7 @@ func otlpToLokiPushRequest(ctx context.Context, ld plog.Logs, userID string, ten
})
}
- scopeAttributesAsStructuredMetadataSize := labelsSize(scopeAttributesAsStructuredMetadata)
+ scopeAttributesAsStructuredMetadataSize := loki_util.StructuredMetadataSize(scopeAttributesAsStructuredMetadata)
stats.StructuredMetadataBytes[retentionPeriodForUser] += int64(scopeAttributesAsStructuredMetadataSize)
if tracker != nil {
tracker.ReceivedBytesAdd(ctx, userID, retentionPeriodForUser, lbs, float64(scopeAttributesAsStructuredMetadataSize))
@@ -276,7 +276,7 @@ func otlpToLokiPushRequest(ctx context.Context, ld plog.Logs, userID string, ten
stream.Entries = append(stream.Entries, entry)
pushRequestsByStream[labelsStr] = stream
- metadataSize := int64(labelsSize(entry.StructuredMetadata) - resourceAttributesAsStructuredMetadataSize - scopeAttributesAsStructuredMetadataSize)
+ metadataSize := int64(loki_util.StructuredMetadataSize(entry.StructuredMetadata) - resourceAttributesAsStructuredMetadataSize - scopeAttributesAsStructuredMetadataSize)
stats.StructuredMetadataBytes[retentionPeriodForUser] += metadataSize
stats.LogLinesBytes[retentionPeriodForUser] += int64(len(entry.Line))
diff --git a/pkg/loghttp/push/push.go b/pkg/loghttp/push/push.go
index be1d8b34b9f31..ad7dba5a64f90 100644
--- a/pkg/loghttp/push/push.go
+++ b/pkg/loghttp/push/push.go
@@ -283,10 +283,7 @@ func ParseLokiRequest(userID string, r *http.Request, tenantsRetention TenantsRe
}
for _, e := range s.Entries {
pushStats.NumLines++
- var entryLabelsSize int64
- for _, l := range e.StructuredMetadata {
- entryLabelsSize += int64(len(l.Name) + len(l.Value))
- }
+ entryLabelsSize := int64(util.StructuredMetadataSize(e.StructuredMetadata))
pushStats.LogLinesBytes[retentionPeriod] += int64(len(e.Line))
pushStats.StructuredMetadataBytes[retentionPeriod] += entryLabelsSize
diff --git a/pkg/logproto/logproto.pb.go b/pkg/logproto/logproto.pb.go
index 27b36e7bff093..011d0b28f6c01 100644
--- a/pkg/logproto/logproto.pb.go
+++ b/pkg/logproto/logproto.pb.go
@@ -2009,7 +2009,8 @@ func (m *GetChunkRefRequest) GetMatchers() string {
}
type GetChunkRefResponse struct {
- Refs []*ChunkRef `protobuf:"bytes,1,rep,name=refs,proto3" json:"refs,omitempty"`
+ Refs []*ChunkRef `protobuf:"bytes,1,rep,name=refs,proto3" json:"refs,omitempty"`
+ Stats stats.Index `protobuf:"bytes,2,opt,name=stats,proto3" json:"stats"`
}
func (m *GetChunkRefResponse) Reset() { *m = GetChunkRefResponse{} }
@@ -2051,6 +2052,13 @@ func (m *GetChunkRefResponse) GetRefs() []*ChunkRef {
return nil
}
+func (m *GetChunkRefResponse) GetStats() stats.Index {
+ if m != nil {
+ return m.Stats
+ }
+ return stats.Index{}
+}
+
type GetSeriesRequest struct {
From github_com_prometheus_common_model.Time `protobuf:"varint,1,opt,name=from,proto3,customtype=github.com/prometheus/common/model.Time" json:"from"`
Through github_com_prometheus_common_model.Time `protobuf:"varint,2,opt,name=through,proto3,customtype=github.com/prometheus/common/model.Time" json:"through"`
@@ -3154,180 +3162,181 @@ func init() {
func init() { proto.RegisterFile("pkg/logproto/logproto.proto", fileDescriptor_c28a5f14f1f4c79a) }
var fileDescriptor_c28a5f14f1f4c79a = []byte{
- // 2764 bytes of a gzipped FileDescriptorProto
+ // 2774 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x3a, 0xcd, 0x6f, 0x1b, 0xc7,
- 0xf5, 0x5c, 0x72, 0x49, 0x91, 0x8f, 0x94, 0x2c, 0x8f, 0x68, 0x9b, 0x90, 0x1d, 0xae, 0x32, 0xf8,
- 0xfd, 0x12, 0x27, 0x76, 0x44, 0xdb, 0x69, 0xd2, 0xc4, 0x69, 0x9a, 0x9a, 0x52, 0xec, 0xd8, 0x51,
- 0x6c, 0x67, 0xe4, 0x38, 0x69, 0xd1, 0x20, 0x58, 0x93, 0x23, 0x72, 0x61, 0x72, 0x97, 0xde, 0x1d,
- 0xc6, 0xe1, 0xad, 0xff, 0x40, 0xd1, 0x00, 0x45, 0xd1, 0xf6, 0x52, 0xa0, 0x40, 0x81, 0x16, 0x05,
- 0x72, 0x29, 0x7a, 0xe8, 0xa1, 0x68, 0x2f, 0x05, 0x9a, 0xde, 0x72, 0x0c, 0x72, 0x60, 0x1b, 0xe5,
- 0x52, 0x08, 0x28, 0x90, 0x53, 0x0b, 0xe4, 0x54, 0xcc, 0xd7, 0xee, 0xec, 0x8a, 0xaa, 0x43, 0xd7,
- 0x45, 0x92, 0x0b, 0x39, 0xf3, 0xe6, 0xcd, 0x9b, 0x79, 0x1f, 0xf3, 0xbe, 0x48, 0x38, 0x3e, 0xba,
- 0xdd, 0x6b, 0x0d, 0x82, 0xde, 0x28, 0x0c, 0x58, 0x10, 0x0f, 0xd6, 0xc5, 0x27, 0x2a, 0xeb, 0xf9,
- 0x6a, 0xbd, 0x17, 0xf4, 0x02, 0x89, 0xc3, 0x47, 0x72, 0x7d, 0xd5, 0xe9, 0x05, 0x41, 0x6f, 0x40,
- 0x5b, 0x62, 0x76, 0x6b, 0xbc, 0xd3, 0x62, 0xde, 0x90, 0x46, 0xcc, 0x1d, 0x8e, 0x14, 0xc2, 0x9a,
- 0xa2, 0x7e, 0x67, 0x30, 0x0c, 0xba, 0x74, 0xd0, 0x8a, 0x98, 0xcb, 0x22, 0xf9, 0xa9, 0x30, 0x56,
- 0x38, 0xc6, 0x68, 0x1c, 0xf5, 0xc5, 0x87, 0x02, 0x9e, 0xe1, 0xc0, 0x88, 0x05, 0xa1, 0xdb, 0xa3,
- 0xad, 0x4e, 0x7f, 0xec, 0xdf, 0x6e, 0x75, 0xdc, 0x4e, 0x9f, 0xb6, 0x42, 0x1a, 0x8d, 0x07, 0x2c,
- 0x92, 0x13, 0x36, 0x19, 0x51, 0x45, 0x06, 0xff, 0xd6, 0x82, 0x23, 0x5b, 0xee, 0x2d, 0x3a, 0xb8,
- 0x11, 0xdc, 0x74, 0x07, 0x63, 0x1a, 0x11, 0x1a, 0x8d, 0x02, 0x3f, 0xa2, 0x68, 0x03, 0x4a, 0x03,
- 0xbe, 0x10, 0x35, 0xac, 0xb5, 0xc2, 0xc9, 0xea, 0xb9, 0x53, 0xeb, 0x31, 0x93, 0x33, 0x37, 0x48,
- 0x68, 0xf4, 0xa2, 0xcf, 0xc2, 0x09, 0x51, 0x5b, 0x57, 0x6f, 0x42, 0xd5, 0x00, 0xa3, 0x65, 0x28,
- 0xdc, 0xa6, 0x93, 0x86, 0xb5, 0x66, 0x9d, 0xac, 0x10, 0x3e, 0x44, 0x67, 0xa1, 0xf8, 0x36, 0x27,
- 0xd3, 0xc8, 0xaf, 0x59, 0x27, 0xab, 0xe7, 0x8e, 0x27, 0x87, 0xbc, 0xe6, 0x7b, 0x77, 0xc6, 0x54,
- 0xec, 0x56, 0x07, 0x49, 0xcc, 0xf3, 0xf9, 0x67, 0x2c, 0x7c, 0x0a, 0x0e, 0xef, 0x5b, 0x47, 0x47,
- 0xa1, 0x24, 0x30, 0xe4, 0x8d, 0x2b, 0x44, 0xcd, 0x70, 0x1d, 0xd0, 0x36, 0x0b, 0xa9, 0x3b, 0x24,
- 0x2e, 0xe3, 0xf7, 0xbd, 0x33, 0xa6, 0x11, 0xc3, 0xaf, 0xc0, 0x4a, 0x0a, 0xaa, 0xd8, 0x7e, 0x1a,
- 0xaa, 0x51, 0x02, 0x56, 0xbc, 0xd7, 0x93, 0x6b, 0x25, 0x7b, 0x88, 0x89, 0x88, 0x7f, 0x66, 0x01,
- 0x24, 0x6b, 0xa8, 0x09, 0x20, 0x57, 0x5f, 0x72, 0xa3, 0xbe, 0x60, 0xd8, 0x26, 0x06, 0x04, 0x9d,
- 0x86, 0xc3, 0xc9, 0xec, 0x6a, 0xb0, 0xdd, 0x77, 0xc3, 0xae, 0x90, 0x81, 0x4d, 0xf6, 0x2f, 0x20,
- 0x04, 0x76, 0xe8, 0x32, 0xda, 0x28, 0xac, 0x59, 0x27, 0x0b, 0x44, 0x8c, 0x39, 0xb7, 0x8c, 0xfa,
- 0xae, 0xcf, 0x1a, 0xb6, 0x10, 0xa7, 0x9a, 0x71, 0x38, 0xb7, 0x08, 0x1a, 0x35, 0x8a, 0x6b, 0xd6,
- 0xc9, 0x45, 0xa2, 0x66, 0xf8, 0x9f, 0x05, 0xa8, 0xbd, 0x3a, 0xa6, 0xe1, 0x44, 0x09, 0x00, 0x35,
- 0xa1, 0x1c, 0xd1, 0x01, 0xed, 0xb0, 0x20, 0x94, 0x1a, 0x69, 0xe7, 0x1b, 0x16, 0x89, 0x61, 0xa8,
- 0x0e, 0xc5, 0x81, 0x37, 0xf4, 0x98, 0xb8, 0xd6, 0x22, 0x91, 0x13, 0x74, 0x1e, 0x8a, 0x11, 0x73,
- 0x43, 0x26, 0xee, 0x52, 0x3d, 0xb7, 0xba, 0x2e, 0x4d, 0x79, 0x5d, 0x9b, 0xf2, 0xfa, 0x0d, 0x6d,
- 0xca, 0xed, 0xf2, 0xfb, 0x53, 0x27, 0xf7, 0xee, 0x5f, 0x1d, 0x8b, 0xc8, 0x2d, 0xe8, 0x69, 0x28,
- 0x50, 0xbf, 0x2b, 0xee, 0xfb, 0x79, 0x77, 0xf2, 0x0d, 0xe8, 0x2c, 0x54, 0xba, 0x5e, 0x48, 0x3b,
- 0xcc, 0x0b, 0x7c, 0xc1, 0xd5, 0xd2, 0xb9, 0x95, 0x44, 0x23, 0x9b, 0x7a, 0x89, 0x24, 0x58, 0xe8,
- 0x34, 0x94, 0x22, 0x2e, 0xba, 0xa8, 0xb1, 0xc0, 0x6d, 0xa1, 0x5d, 0xdf, 0x9b, 0x3a, 0xcb, 0x12,
- 0x72, 0x3a, 0x18, 0x7a, 0x8c, 0x0e, 0x47, 0x6c, 0x42, 0x14, 0x0e, 0x7a, 0x1c, 0x16, 0xba, 0x74,
- 0x40, 0xb9, 0xc2, 0xcb, 0x42, 0xe1, 0xcb, 0x06, 0x79, 0xb1, 0x40, 0x34, 0x02, 0x7a, 0x13, 0xec,
- 0xd1, 0xc0, 0xf5, 0x1b, 0x15, 0xc1, 0xc5, 0x52, 0x82, 0x78, 0x7d, 0xe0, 0xfa, 0xed, 0x67, 0x3f,
- 0x9a, 0x3a, 0x4f, 0xf5, 0x3c, 0xd6, 0x1f, 0xdf, 0x5a, 0xef, 0x04, 0xc3, 0x56, 0x2f, 0x74, 0x77,
- 0x5c, 0xdf, 0x6d, 0x0d, 0x82, 0xdb, 0x5e, 0xeb, 0xed, 0x27, 0x5b, 0xfc, 0x81, 0xde, 0x19, 0xd3,
- 0xd0, 0xa3, 0x61, 0x8b, 0x93, 0x59, 0x17, 0x2a, 0xe1, 0x5b, 0x89, 0x20, 0x8b, 0xae, 0x70, 0xfb,
- 0x0b, 0x42, 0xba, 0xc1, 0x5f, 0x6f, 0xd4, 0x00, 0x71, 0xca, 0xb1, 0xe4, 0x14, 0x01, 0x27, 0x74,
- 0xe7, 0x52, 0x18, 0x8c, 0x47, 0xed, 0x43, 0x7b, 0x53, 0xc7, 0xc4, 0x27, 0xe6, 0xe4, 0x8a, 0x5d,
- 0x2e, 0x2d, 0x2f, 0xe0, 0xf7, 0x0a, 0x80, 0xb6, 0xdd, 0xe1, 0x68, 0x40, 0xe7, 0x52, 0x7f, 0xac,
- 0xe8, 0xfc, 0x7d, 0x2b, 0xba, 0x30, 0xaf, 0xa2, 0x13, 0xad, 0xd9, 0xf3, 0x69, 0xad, 0xf8, 0x79,
- 0xb5, 0x56, 0xfa, 0xd2, 0x6b, 0x0d, 0x37, 0xc0, 0xe6, 0x94, 0xb9, 0xb3, 0x0c, 0xdd, 0xbb, 0x42,
- 0x37, 0x35, 0xc2, 0x87, 0x78, 0x0b, 0x4a, 0x92, 0x2f, 0xb4, 0x9a, 0x55, 0x5e, 0xfa, 0xdd, 0x26,
- 0x8a, 0x2b, 0x68, 0x95, 0x2c, 0x27, 0x2a, 0x29, 0x08, 0x61, 0xe3, 0xdf, 0x5b, 0xb0, 0xa8, 0x2c,
- 0x42, 0xf9, 0xbe, 0x5b, 0xb0, 0x20, 0x7d, 0x8f, 0xf6, 0x7b, 0xc7, 0xb2, 0x7e, 0xef, 0x42, 0xd7,
- 0x1d, 0x31, 0x1a, 0xb6, 0x5b, 0xef, 0x4f, 0x1d, 0xeb, 0xa3, 0xa9, 0xf3, 0xe8, 0x41, 0x42, 0xd3,
- 0xd1, 0x49, 0xfb, 0x4b, 0x4d, 0x18, 0x9d, 0x12, 0xb7, 0x63, 0x91, 0x32, 0xab, 0x43, 0xeb, 0x32,
- 0xa8, 0x5d, 0xf6, 0x7b, 0x34, 0xe2, 0x94, 0x6d, 0x6e, 0x11, 0x44, 0xe2, 0x70, 0x36, 0xef, 0xba,
- 0xa1, 0xef, 0xf9, 0xbd, 0xa8, 0x51, 0x10, 0x3e, 0x3d, 0x9e, 0xe3, 0x9f, 0x58, 0xb0, 0x92, 0x32,
- 0x6b, 0xc5, 0xc4, 0x33, 0x50, 0x8a, 0xb8, 0xa6, 0x34, 0x0f, 0x86, 0x51, 0x6c, 0x0b, 0x78, 0x7b,
- 0x49, 0x5d, 0xbe, 0x24, 0xe7, 0x44, 0xe1, 0x3f, 0xb8, 0xab, 0xfd, 0xc9, 0x82, 0x9a, 0x08, 0x4c,
- 0xfa, 0xad, 0x21, 0xb0, 0x7d, 0x77, 0x48, 0x95, 0xaa, 0xc4, 0xd8, 0x88, 0x56, 0xfc, 0xb8, 0xb2,
- 0x8e, 0x56, 0xf3, 0x3a, 0x58, 0xeb, 0xbe, 0x1d, 0xac, 0x95, 0xbc, 0xbb, 0x3a, 0x14, 0xb9, 0x79,
- 0x4f, 0x84, 0x73, 0xad, 0x10, 0x39, 0xc1, 0x8f, 0xc2, 0xa2, 0xe2, 0x42, 0x89, 0xf6, 0xa0, 0x00,
- 0x3b, 0x84, 0x92, 0xd4, 0x04, 0xfa, 0x3f, 0xa8, 0xc4, 0xa9, 0x8c, 0xe0, 0xb6, 0xd0, 0x2e, 0xed,
- 0x4d, 0x9d, 0x3c, 0x8b, 0x48, 0xb2, 0x80, 0x1c, 0x33, 0xe8, 0x5b, 0xed, 0xca, 0xde, 0xd4, 0x91,
- 0x00, 0x15, 0xe2, 0xd1, 0x09, 0xb0, 0xfb, 0x3c, 0x6e, 0x72, 0x11, 0xd8, 0xed, 0xf2, 0xde, 0xd4,
- 0x11, 0x73, 0x22, 0x3e, 0xf1, 0x25, 0xa8, 0x6d, 0xd1, 0x9e, 0xdb, 0x99, 0xa8, 0x43, 0xeb, 0x9a,
- 0x1c, 0x3f, 0xd0, 0xd2, 0x34, 0x1e, 0x86, 0x5a, 0x7c, 0xe2, 0x5b, 0xc3, 0x48, 0xbd, 0x86, 0x6a,
- 0x0c, 0x7b, 0x25, 0xc2, 0x3f, 0xb5, 0x40, 0xd9, 0x00, 0xc2, 0x46, 0xb6, 0xc3, 0x7d, 0x21, 0xec,
- 0x4d, 0x1d, 0x05, 0xd1, 0xc9, 0x0c, 0x7a, 0x0e, 0x16, 0x22, 0x71, 0x22, 0x27, 0x96, 0x35, 0x2d,
- 0xb1, 0xd0, 0x3e, 0xc4, 0x4d, 0x64, 0x6f, 0xea, 0x68, 0x44, 0xa2, 0x07, 0x68, 0x3d, 0x95, 0x10,
- 0x48, 0xc6, 0x96, 0xf6, 0xa6, 0x8e, 0x01, 0x35, 0x13, 0x04, 0xfc, 0x99, 0x05, 0xd5, 0x1b, 0xae,
- 0x17, 0x9b, 0x50, 0x43, 0xab, 0x28, 0xf1, 0xd5, 0x12, 0xc0, 0x2d, 0xb1, 0x4b, 0x07, 0xee, 0xe4,
- 0x62, 0x10, 0x0a, 0xba, 0x8b, 0x24, 0x9e, 0x27, 0x31, 0xdc, 0x9e, 0x19, 0xc3, 0x8b, 0xf3, 0xbb,
- 0xf6, 0xff, 0xad, 0x23, 0xbd, 0x62, 0x97, 0xf3, 0xcb, 0x05, 0xfc, 0x9e, 0x05, 0x35, 0xc9, 0xbc,
- 0xb2, 0xbc, 0xef, 0x42, 0x49, 0xca, 0x46, 0xb0, 0xff, 0x1f, 0x1c, 0xd3, 0xa9, 0x79, 0x9c, 0x92,
- 0xa2, 0x89, 0x5e, 0x80, 0xa5, 0x6e, 0x18, 0x8c, 0x46, 0xb4, 0xbb, 0xad, 0xdc, 0x5f, 0x3e, 0xeb,
- 0xfe, 0x36, 0xcd, 0x75, 0x92, 0x41, 0xc7, 0x7f, 0xb1, 0x60, 0x51, 0x39, 0x13, 0xa5, 0xae, 0x58,
- 0xc4, 0xd6, 0x7d, 0x47, 0xcf, 0xfc, 0xbc, 0xd1, 0xf3, 0x28, 0x94, 0x7a, 0x3c, 0xbe, 0x68, 0x87,
- 0xa4, 0x66, 0xf3, 0x45, 0x55, 0x7c, 0x05, 0x96, 0x34, 0x2b, 0x07, 0x78, 0xd4, 0xd5, 0xac, 0x47,
- 0xbd, 0xdc, 0xa5, 0x3e, 0xf3, 0x76, 0xbc, 0xd8, 0x47, 0x2a, 0x7c, 0xfc, 0x03, 0x0b, 0x96, 0xb3,
- 0x28, 0x68, 0x33, 0x53, 0x58, 0x3c, 0x72, 0x30, 0x39, 0xb3, 0xa6, 0xd0, 0xa4, 0x55, 0x65, 0xf1,
- 0xd4, 0xbd, 0x2a, 0x8b, 0xba, 0xe9, 0x64, 0x2a, 0xca, 0x2b, 0xe0, 0x1f, 0x5b, 0xb0, 0x98, 0xd2,
- 0x25, 0x7a, 0x06, 0xec, 0x9d, 0x30, 0x18, 0xce, 0xa5, 0x28, 0xb1, 0x03, 0x7d, 0x0d, 0xf2, 0x2c,
- 0x98, 0x4b, 0x4d, 0x79, 0x16, 0x70, 0x2d, 0x29, 0xf6, 0x0b, 0x32, 0x6f, 0x97, 0x33, 0xfc, 0x14,
- 0x54, 0x04, 0x43, 0xd7, 0x5d, 0x2f, 0x9c, 0x19, 0x30, 0x66, 0x33, 0xf4, 0x1c, 0x1c, 0x92, 0xce,
- 0x70, 0xf6, 0xe6, 0xda, 0xac, 0xcd, 0x35, 0xbd, 0xf9, 0x38, 0x14, 0x45, 0xd2, 0xc1, 0xb7, 0x74,
- 0x5d, 0xe6, 0xea, 0x2d, 0x7c, 0x8c, 0x8f, 0xc0, 0x0a, 0x7f, 0x83, 0x34, 0x8c, 0x36, 0x82, 0xb1,
- 0xcf, 0x74, 0xdd, 0x74, 0x1a, 0xea, 0x69, 0xb0, 0xb2, 0x92, 0x3a, 0x14, 0x3b, 0x1c, 0x20, 0x68,
- 0x2c, 0x12, 0x39, 0xc1, 0xbf, 0xb0, 0x00, 0x5d, 0xa2, 0x4c, 0x9c, 0x72, 0x79, 0x33, 0x7e, 0x1e,
- 0xab, 0x50, 0x1e, 0xba, 0xac, 0xd3, 0xa7, 0x61, 0xa4, 0xf3, 0x17, 0x3d, 0xff, 0x22, 0x12, 0x4f,
- 0x7c, 0x16, 0x56, 0x52, 0xb7, 0x54, 0x3c, 0xad, 0x42, 0xb9, 0xa3, 0x60, 0x2a, 0xe4, 0xc5, 0x73,
- 0xfc, 0x9b, 0x3c, 0x94, 0x75, 0x5a, 0x87, 0xce, 0x42, 0x75, 0xc7, 0xf3, 0x7b, 0x34, 0x1c, 0x85,
- 0x9e, 0x12, 0x81, 0x2d, 0xd3, 0x3c, 0x03, 0x4c, 0xcc, 0x09, 0x7a, 0x02, 0x16, 0xc6, 0x11, 0x0d,
- 0xdf, 0xf2, 0xe4, 0x4b, 0xaf, 0xb4, 0xeb, 0xbb, 0x53, 0xa7, 0xf4, 0x5a, 0x44, 0xc3, 0xcb, 0x9b,
- 0x3c, 0xf8, 0x8c, 0xc5, 0x88, 0xc8, 0xef, 0x2e, 0x7a, 0x59, 0x99, 0xa9, 0x48, 0xe0, 0xda, 0x5f,
- 0xe7, 0xd7, 0xcf, 0xb8, 0xba, 0x51, 0x18, 0x0c, 0x29, 0xeb, 0xd3, 0x71, 0xd4, 0xea, 0x04, 0xc3,
- 0x61, 0xe0, 0xb7, 0x44, 0xef, 0x40, 0x30, 0xcd, 0x23, 0x28, 0xdf, 0xae, 0x2c, 0xf7, 0x06, 0x2c,
- 0xb0, 0x7e, 0x18, 0x8c, 0x7b, 0x7d, 0x11, 0x18, 0x0a, 0xed, 0xf3, 0xf3, 0xd3, 0xd3, 0x14, 0x88,
- 0x1e, 0xa0, 0x87, 0xb9, 0xb4, 0x68, 0xe7, 0x76, 0x34, 0x1e, 0xca, 0xda, 0xb3, 0x5d, 0xdc, 0x9b,
- 0x3a, 0xd6, 0x13, 0x24, 0x06, 0xe3, 0x0b, 0xb0, 0x98, 0x4a, 0x85, 0xd1, 0x19, 0xb0, 0x43, 0xba,
- 0xa3, 0x5d, 0x01, 0xda, 0x9f, 0x31, 0xcb, 0xe8, 0xcf, 0x71, 0x88, 0xf8, 0xc4, 0xdf, 0xcf, 0x83,
- 0x63, 0x54, 0xfd, 0x17, 0x83, 0xf0, 0x15, 0xca, 0x42, 0xaf, 0x73, 0xd5, 0x1d, 0x52, 0x6d, 0x5e,
- 0x0e, 0x54, 0x87, 0x02, 0xf8, 0x96, 0xf1, 0x8a, 0x60, 0x18, 0xe3, 0xa1, 0x87, 0x00, 0xc4, 0xb3,
- 0x93, 0xeb, 0xf2, 0x41, 0x55, 0x04, 0x44, 0x2c, 0x6f, 0xa4, 0x84, 0xdd, 0x9a, 0x53, 0x38, 0x4a,
- 0xc8, 0x97, 0xb3, 0x42, 0x9e, 0x9b, 0x4e, 0x2c, 0x59, 0xf3, 0xb9, 0x14, 0xd3, 0xcf, 0x05, 0xff,
- 0xc3, 0x82, 0xe6, 0x96, 0xbe, 0xf9, 0x7d, 0x8a, 0x43, 0xf3, 0x9b, 0x7f, 0x40, 0xfc, 0x16, 0x1e,
- 0x20, 0xbf, 0x76, 0x86, 0xdf, 0x26, 0xc0, 0x96, 0xe7, 0xd3, 0x8b, 0xde, 0x80, 0xd1, 0x70, 0x46,
- 0x91, 0xf4, 0xc3, 0x42, 0xe2, 0x71, 0x08, 0xdd, 0xd1, 0x32, 0xd8, 0x30, 0xdc, 0xfc, 0x83, 0x60,
- 0x31, 0xff, 0x00, 0x59, 0x2c, 0x64, 0x3c, 0xa0, 0x0f, 0x0b, 0x3b, 0x82, 0x3d, 0x19, 0xb1, 0x53,
- 0xfd, 0xa7, 0x84, 0xf7, 0xf6, 0x37, 0xd5, 0xe1, 0x4f, 0xdf, 0x23, 0xe1, 0x12, 0x7d, 0xc4, 0x56,
- 0x34, 0xf1, 0x99, 0xfb, 0x8e, 0xb1, 0x9f, 0xe8, 0x43, 0x90, 0xab, 0x72, 0xba, 0xe2, 0xcc, 0x9c,
- 0xee, 0x79, 0x75, 0xcc, 0x7f, 0x93, 0xd7, 0xe1, 0xe7, 0x13, 0x07, 0x2b, 0x94, 0xa2, 0x1c, 0xec,
- 0x23, 0xf7, 0x7a, 0xfe, 0xea, 0xd1, 0xff, 0xc1, 0x82, 0xe5, 0x4b, 0x94, 0xa5, 0x73, 0xac, 0xaf,
- 0x90, 0x4a, 0xf1, 0x4b, 0x70, 0xd8, 0xb8, 0xbf, 0xe2, 0xfe, 0xc9, 0x4c, 0x62, 0x75, 0x24, 0xe1,
- 0xff, 0xb2, 0xdf, 0xa5, 0xef, 0xa8, 0x7a, 0x35, 0x9d, 0x53, 0x5d, 0x87, 0xaa, 0xb1, 0x88, 0x2e,
- 0x64, 0xb2, 0xa9, 0x95, 0x4c, 0x9b, 0x96, 0x67, 0x04, 0xed, 0xba, 0xe2, 0x49, 0x56, 0xa5, 0x2a,
- 0x57, 0x8e, 0x33, 0x8f, 0x6d, 0x40, 0x42, 0x5d, 0x82, 0xac, 0x19, 0xfb, 0x04, 0xf4, 0xe5, 0x38,
- 0xad, 0x8a, 0xe7, 0xe8, 0x61, 0xb0, 0xc3, 0xe0, 0xae, 0x4e, 0x93, 0x17, 0x93, 0x23, 0x49, 0x70,
- 0x97, 0x88, 0x25, 0xfc, 0x1c, 0x14, 0x48, 0x70, 0x17, 0x35, 0x01, 0x42, 0xd7, 0xef, 0xd1, 0x9b,
- 0x71, 0x81, 0x56, 0x23, 0x06, 0xe4, 0x80, 0xbc, 0x64, 0x03, 0x0e, 0x9b, 0x37, 0x92, 0xea, 0x5e,
- 0x87, 0x85, 0x57, 0xc7, 0xa6, 0xb8, 0xea, 0x19, 0x71, 0xc9, 0x3e, 0x80, 0x46, 0xe2, 0x36, 0x03,
- 0x09, 0x1c, 0x9d, 0x80, 0x0a, 0x73, 0x6f, 0x0d, 0xe8, 0xd5, 0xc4, 0x05, 0x26, 0x00, 0xbe, 0xca,
- 0x6b, 0xcb, 0x9b, 0x46, 0x82, 0x95, 0x00, 0xd0, 0xe3, 0xb0, 0x9c, 0xdc, 0xf9, 0x7a, 0x48, 0x77,
- 0xbc, 0x77, 0x84, 0x86, 0x6b, 0x64, 0x1f, 0x1c, 0x9d, 0x84, 0x43, 0x09, 0x6c, 0x5b, 0x24, 0x32,
- 0xb6, 0x40, 0xcd, 0x82, 0xb9, 0x6c, 0x04, 0xbb, 0x2f, 0xde, 0x19, 0xbb, 0x03, 0xf1, 0xf8, 0x6a,
- 0xc4, 0x80, 0xe0, 0x3f, 0x5a, 0x70, 0x58, 0xaa, 0x9a, 0xb9, 0xec, 0x2b, 0x69, 0xf5, 0xbf, 0xb4,
- 0x00, 0x99, 0x1c, 0x28, 0xd3, 0xfa, 0x7f, 0xb3, 0xcf, 0xc4, 0x33, 0xa5, 0xaa, 0x28, 0x99, 0x25,
- 0x28, 0x69, 0x15, 0x61, 0x28, 0x75, 0x64, 0x3f, 0x4d, 0x34, 0xc6, 0x65, 0x4d, 0x2e, 0x21, 0x44,
- 0x7d, 0x23, 0x07, 0x8a, 0xb7, 0x26, 0x8c, 0x46, 0xaa, 0xa2, 0x16, 0xad, 0x04, 0x01, 0x20, 0xf2,
- 0x8b, 0x9f, 0x45, 0x7d, 0x26, 0xac, 0xc6, 0x4e, 0xce, 0x52, 0x20, 0xa2, 0x07, 0xf8, 0x5f, 0x79,
- 0x58, 0xbc, 0x19, 0x0c, 0xc6, 0x49, 0xd0, 0xfc, 0x2a, 0x05, 0x8c, 0x54, 0x99, 0x5f, 0xd4, 0x65,
- 0x3e, 0x02, 0x3b, 0x62, 0x74, 0x24, 0x2c, 0xab, 0x40, 0xc4, 0x18, 0x61, 0xa8, 0x31, 0x37, 0xec,
- 0x51, 0x26, 0x8b, 0xa7, 0x46, 0x49, 0x64, 0xb5, 0x29, 0x18, 0x5a, 0x83, 0xaa, 0xdb, 0xeb, 0x85,
- 0xb4, 0xe7, 0x32, 0xda, 0x9e, 0x34, 0x16, 0xc4, 0x61, 0x26, 0x08, 0x5d, 0x81, 0xa5, 0x8e, 0xdb,
- 0xe9, 0x7b, 0x7e, 0xef, 0xda, 0x88, 0x79, 0x81, 0x1f, 0x35, 0xca, 0x22, 0x74, 0x9c, 0x58, 0x37,
- 0x7f, 0x68, 0x5a, 0xdf, 0x48, 0xe1, 0x28, 0x3f, 0x96, 0xd9, 0x89, 0xdf, 0x80, 0x25, 0x2d, 0x78,
- 0x65, 0x1e, 0x67, 0x60, 0xe1, 0x6d, 0x01, 0x99, 0xd1, 0xc2, 0x93, 0xa8, 0x8a, 0x94, 0x46, 0x4b,
- 0xff, 0x54, 0xa1, 0xf9, 0xc7, 0x57, 0xa0, 0x24, 0xd1, 0xd1, 0x09, 0xb3, 0x9c, 0x92, 0x19, 0x25,
- 0x9f, 0xab, 0xda, 0x08, 0x43, 0x49, 0x12, 0x52, 0x46, 0x24, 0xec, 0x4c, 0x42, 0x88, 0xfa, 0xc6,
- 0x3f, 0xca, 0xc3, 0x91, 0x4d, 0xca, 0x68, 0x87, 0xd1, 0xee, 0x45, 0x8f, 0x0e, 0xba, 0x5f, 0x68,
- 0xa5, 0x1f, 0xf7, 0xeb, 0x0a, 0x46, 0xbf, 0x8e, 0xfb, 0xb0, 0x81, 0xe7, 0xd3, 0x2d, 0xa3, 0xe1,
- 0x93, 0x00, 0x12, 0x19, 0x15, 0xcd, 0x56, 0x90, 0xb6, 0x91, 0x92, 0x61, 0x23, 0x49, 0x9b, 0x6f,
- 0x21, 0xd5, 0x99, 0xd4, 0x75, 0x65, 0x39, 0x29, 0x4a, 0xf1, 0xef, 0x2c, 0x38, 0x9a, 0x95, 0x8b,
- 0x52, 0xe3, 0x8b, 0x50, 0xda, 0x11, 0x90, 0xfd, 0xcd, 0xe4, 0xd4, 0x0e, 0xd9, 0x8f, 0x90, 0xa8,
- 0x66, 0x3f, 0x42, 0x42, 0xd0, 0x63, 0xa9, 0x9f, 0xa1, 0xda, 0x2b, 0x7b, 0x53, 0xe7, 0x90, 0x00,
- 0x18, 0xb8, 0x8a, 0x99, 0xd3, 0xf1, 0xc5, 0x0b, 0x49, 0xa3, 0x43, 0x42, 0x4c, 0xc2, 0xaa, 0x6b,
- 0xf9, 0x67, 0x0b, 0x16, 0x53, 0x17, 0x11, 0x22, 0xe2, 0x4f, 0x40, 0x85, 0x07, 0x39, 0x41, 0x8f,
- 0x81, 0xcd, 0x26, 0x23, 0x15, 0x15, 0xda, 0x47, 0x3e, 0x9b, 0x3a, 0x87, 0x53, 0xdb, 0x6e, 0x4c,
- 0x46, 0x94, 0x08, 0x14, 0xfe, 0x72, 0x3a, 0x6e, 0xd8, 0xf5, 0x7c, 0x77, 0xe0, 0x31, 0xa9, 0x1d,
- 0x9b, 0x98, 0x20, 0xee, 0x8e, 0x46, 0x6e, 0x18, 0xe9, 0xd4, 0xae, 0x22, 0xdd, 0x91, 0x02, 0x11,
- 0x3d, 0x10, 0x2d, 0x9b, 0xdb, 0x94, 0x75, 0xfa, 0x32, 0x2c, 0xa8, 0x96, 0x8d, 0x80, 0xa4, 0x5a,
- 0x36, 0x02, 0x82, 0x7f, 0x6e, 0x25, 0xc6, 0x29, 0xdf, 0xf0, 0x97, 0xce, 0x38, 0xf1, 0xb7, 0x13,
- 0x3b, 0xd1, 0x57, 0x54, 0x76, 0xf2, 0x02, 0x2c, 0x75, 0x53, 0x2b, 0x07, 0xdb, 0x8b, 0x6c, 0x47,
- 0x67, 0xd0, 0xf1, 0x38, 0xd1, 0xa3, 0x80, 0x1c, 0xa0, 0xc7, 0x8c, 0x72, 0xf2, 0xfb, 0x95, 0x93,
- 0x48, 0xbd, 0x70, 0x6f, 0xa9, 0x3f, 0xfe, 0x08, 0x54, 0xe2, 0x9f, 0x1e, 0x51, 0x15, 0x16, 0x2e,
- 0x5e, 0x23, 0xaf, 0x5f, 0x20, 0x9b, 0xcb, 0x39, 0x54, 0x83, 0x72, 0xfb, 0xc2, 0xc6, 0xcb, 0x62,
- 0x66, 0x9d, 0xfb, 0x75, 0x49, 0x27, 0x2e, 0x21, 0xfa, 0x06, 0x14, 0x65, 0x36, 0x72, 0x34, 0x61,
- 0xce, 0xfc, 0x55, 0x6e, 0xf5, 0xd8, 0x3e, 0xb8, 0x94, 0x12, 0xce, 0x9d, 0xb1, 0xd0, 0x55, 0xa8,
- 0x0a, 0xa0, 0xea, 0x7b, 0x9f, 0xc8, 0xb6, 0x9f, 0x53, 0x94, 0x1e, 0x3a, 0x60, 0xd5, 0xa0, 0x77,
- 0x1e, 0x8a, 0x52, 0x60, 0x47, 0x33, 0x49, 0xe3, 0x8c, 0xdb, 0xa4, 0x7e, 0x09, 0xc0, 0x39, 0xf4,
- 0x2c, 0xd8, 0x37, 0x5c, 0x6f, 0x80, 0x8c, 0x9c, 0xd5, 0x68, 0x57, 0xaf, 0x1e, 0xcd, 0x82, 0x8d,
- 0x63, 0x9f, 0x8f, 0xbb, 0xee, 0xc7, 0xb2, 0xad, 0x3f, 0xbd, 0xbd, 0xb1, 0x7f, 0x21, 0x3e, 0xf9,
- 0x9a, 0xec, 0x0d, 0xeb, 0x06, 0x14, 0x7a, 0x28, 0x7d, 0x54, 0xa6, 0x5f, 0xb5, 0xda, 0x3c, 0x68,
- 0x39, 0x26, 0xb8, 0x05, 0x55, 0xa3, 0xf9, 0x63, 0x8a, 0x75, 0x7f, 0xe7, 0xca, 0x14, 0xeb, 0x8c,
- 0x8e, 0x11, 0xce, 0xa1, 0x4b, 0x50, 0xe6, 0x99, 0xbe, 0xf8, 0x91, 0xe8, 0x78, 0x36, 0xa1, 0x37,
- 0x12, 0xb9, 0xd5, 0x13, 0xb3, 0x17, 0x63, 0x42, 0xdf, 0x82, 0xca, 0x25, 0xca, 0x54, 0x04, 0x3b,
- 0x96, 0x0d, 0x81, 0x33, 0x24, 0x95, 0x0e, 0xa3, 0x38, 0x87, 0xde, 0x10, 0x45, 0x47, 0xda, 0x3d,
- 0x23, 0xe7, 0x00, 0x37, 0x1c, 0xdf, 0x6b, 0xed, 0x60, 0x84, 0x98, 0xf2, 0xeb, 0x29, 0xca, 0x2a,
- 0x6f, 0x70, 0x0e, 0x78, 0xb0, 0x31, 0x65, 0xe7, 0x1e, 0x7f, 0x21, 0xc1, 0xb9, 0x73, 0x6f, 0xea,
- 0x7f, 0x51, 0x6c, 0xba, 0xcc, 0x45, 0xd7, 0x60, 0x49, 0xc8, 0x32, 0xfe, 0x9b, 0x45, 0xca, 0xe6,
- 0xf7, 0xfd, 0xa7, 0x23, 0x65, 0xf3, 0xfb, 0xff, 0xdb, 0x81, 0x73, 0xed, 0x37, 0x3f, 0xf8, 0xb8,
- 0x99, 0xfb, 0xf0, 0xe3, 0x66, 0xee, 0xd3, 0x8f, 0x9b, 0xd6, 0xf7, 0x76, 0x9b, 0xd6, 0xaf, 0x76,
- 0x9b, 0xd6, 0xfb, 0xbb, 0x4d, 0xeb, 0x83, 0xdd, 0xa6, 0xf5, 0xb7, 0xdd, 0xa6, 0xf5, 0xf7, 0xdd,
- 0x66, 0xee, 0xd3, 0xdd, 0xa6, 0xf5, 0xee, 0x27, 0xcd, 0xdc, 0x07, 0x9f, 0x34, 0x73, 0x1f, 0x7e,
- 0xd2, 0xcc, 0x7d, 0xe7, 0xd1, 0x7b, 0x17, 0xd8, 0xd2, 0x2d, 0x96, 0xc4, 0xd7, 0x93, 0xff, 0x0e,
- 0x00, 0x00, 0xff, 0xff, 0xc7, 0x66, 0x64, 0x64, 0x1d, 0x24, 0x00, 0x00,
+ 0xf5, 0x5c, 0x7e, 0xf3, 0x91, 0x92, 0xe5, 0x11, 0x6d, 0x13, 0xb2, 0xc3, 0x55, 0x06, 0xbf, 0x5f,
+ 0xa2, 0xc4, 0x8e, 0x68, 0x3b, 0x4d, 0x9a, 0x38, 0x4d, 0x53, 0x53, 0x8a, 0x1d, 0x3b, 0x8a, 0xed,
+ 0x8c, 0x1c, 0x27, 0x2d, 0x1a, 0x04, 0x6b, 0x72, 0x44, 0x2e, 0x4c, 0xee, 0xd2, 0xbb, 0xc3, 0x38,
+ 0xbc, 0xf5, 0x1f, 0x28, 0x1a, 0xa0, 0x28, 0xda, 0x5e, 0x0a, 0x14, 0x28, 0xd0, 0xa2, 0x40, 0x2e,
+ 0x45, 0x0f, 0x3d, 0x14, 0xed, 0xa5, 0x40, 0xd3, 0x5b, 0x8e, 0x41, 0x0e, 0x6c, 0xa3, 0x5c, 0x0a,
+ 0x01, 0x05, 0x72, 0x6a, 0x81, 0x9c, 0x8a, 0xf9, 0xda, 0x9d, 0x5d, 0x51, 0x55, 0xe8, 0xba, 0x48,
+ 0x72, 0x21, 0x67, 0xde, 0xbc, 0x79, 0x33, 0xef, 0x63, 0xde, 0x17, 0x09, 0x27, 0x47, 0x77, 0x7a,
+ 0xad, 0x81, 0xdf, 0x1b, 0x05, 0x3e, 0xf3, 0xa3, 0xc1, 0xba, 0xf8, 0x44, 0x65, 0x3d, 0x5f, 0xa9,
+ 0xf7, 0xfc, 0x9e, 0x2f, 0x71, 0xf8, 0x48, 0xae, 0xaf, 0xd8, 0x3d, 0xdf, 0xef, 0x0d, 0x68, 0x4b,
+ 0xcc, 0x6e, 0x8f, 0x77, 0x5a, 0xcc, 0x1d, 0xd2, 0x90, 0x39, 0xc3, 0x91, 0x42, 0x58, 0x55, 0xd4,
+ 0xef, 0x0e, 0x86, 0x7e, 0x97, 0x0e, 0x5a, 0x21, 0x73, 0x58, 0x28, 0x3f, 0x15, 0xc6, 0x32, 0xc7,
+ 0x18, 0x8d, 0xc3, 0xbe, 0xf8, 0x50, 0xc0, 0xb3, 0x1c, 0x18, 0x32, 0x3f, 0x70, 0x7a, 0xb4, 0xd5,
+ 0xe9, 0x8f, 0xbd, 0x3b, 0xad, 0x8e, 0xd3, 0xe9, 0xd3, 0x56, 0x40, 0xc3, 0xf1, 0x80, 0x85, 0x72,
+ 0xc2, 0x26, 0x23, 0xaa, 0xc8, 0xe0, 0xdf, 0x5a, 0x70, 0x6c, 0xcb, 0xb9, 0x4d, 0x07, 0x37, 0xfd,
+ 0x5b, 0xce, 0x60, 0x4c, 0x43, 0x42, 0xc3, 0x91, 0xef, 0x85, 0x14, 0x6d, 0x40, 0x71, 0xc0, 0x17,
+ 0xc2, 0x86, 0xb5, 0x9a, 0x5b, 0xab, 0x9e, 0x3f, 0xbd, 0x1e, 0x31, 0x39, 0x73, 0x83, 0x84, 0x86,
+ 0x2f, 0x7a, 0x2c, 0x98, 0x10, 0xb5, 0x75, 0xe5, 0x16, 0x54, 0x0d, 0x30, 0x5a, 0x82, 0xdc, 0x1d,
+ 0x3a, 0x69, 0x58, 0xab, 0xd6, 0x5a, 0x85, 0xf0, 0x21, 0x3a, 0x07, 0x85, 0xb7, 0x39, 0x99, 0x46,
+ 0x76, 0xd5, 0x5a, 0xab, 0x9e, 0x3f, 0x19, 0x1f, 0xf2, 0x9a, 0xe7, 0xde, 0x1d, 0x53, 0xb1, 0x5b,
+ 0x1d, 0x24, 0x31, 0x2f, 0x64, 0x9f, 0xb1, 0xf0, 0x69, 0x38, 0xba, 0x6f, 0x1d, 0x1d, 0x87, 0xa2,
+ 0xc0, 0x90, 0x37, 0xae, 0x10, 0x35, 0xc3, 0x75, 0x40, 0xdb, 0x2c, 0xa0, 0xce, 0x90, 0x38, 0x8c,
+ 0xdf, 0xf7, 0xee, 0x98, 0x86, 0x0c, 0xbf, 0x02, 0xcb, 0x09, 0xa8, 0x62, 0xfb, 0x69, 0xa8, 0x86,
+ 0x31, 0x58, 0xf1, 0x5e, 0x8f, 0xaf, 0x15, 0xef, 0x21, 0x26, 0x22, 0xfe, 0x99, 0x05, 0x10, 0xaf,
+ 0xa1, 0x26, 0x80, 0x5c, 0x7d, 0xc9, 0x09, 0xfb, 0x82, 0xe1, 0x3c, 0x31, 0x20, 0xe8, 0x0c, 0x1c,
+ 0x8d, 0x67, 0xd7, 0xfc, 0xed, 0xbe, 0x13, 0x74, 0x85, 0x0c, 0xf2, 0x64, 0xff, 0x02, 0x42, 0x90,
+ 0x0f, 0x1c, 0x46, 0x1b, 0xb9, 0x55, 0x6b, 0x2d, 0x47, 0xc4, 0x98, 0x73, 0xcb, 0xa8, 0xe7, 0x78,
+ 0xac, 0x91, 0x17, 0xe2, 0x54, 0x33, 0x0e, 0xe7, 0x16, 0x41, 0xc3, 0x46, 0x61, 0xd5, 0x5a, 0x5b,
+ 0x20, 0x6a, 0x86, 0xff, 0x99, 0x83, 0xda, 0xab, 0x63, 0x1a, 0x4c, 0x94, 0x00, 0x50, 0x13, 0xca,
+ 0x21, 0x1d, 0xd0, 0x0e, 0xf3, 0x03, 0xa9, 0x91, 0x76, 0xb6, 0x61, 0x91, 0x08, 0x86, 0xea, 0x50,
+ 0x18, 0xb8, 0x43, 0x97, 0x89, 0x6b, 0x2d, 0x10, 0x39, 0x41, 0x17, 0xa0, 0x10, 0x32, 0x27, 0x60,
+ 0xe2, 0x2e, 0xd5, 0xf3, 0x2b, 0xeb, 0xd2, 0x94, 0xd7, 0xb5, 0x29, 0xaf, 0xdf, 0xd4, 0xa6, 0xdc,
+ 0x2e, 0xbf, 0x3f, 0xb5, 0x33, 0xef, 0xfe, 0xd5, 0xb6, 0x88, 0xdc, 0x82, 0x9e, 0x86, 0x1c, 0xf5,
+ 0xba, 0xe2, 0xbe, 0x9f, 0x77, 0x27, 0xdf, 0x80, 0xce, 0x41, 0xa5, 0xeb, 0x06, 0xb4, 0xc3, 0x5c,
+ 0xdf, 0x13, 0x5c, 0x2d, 0x9e, 0x5f, 0x8e, 0x35, 0xb2, 0xa9, 0x97, 0x48, 0x8c, 0x85, 0xce, 0x40,
+ 0x31, 0xe4, 0xa2, 0x0b, 0x1b, 0x25, 0x6e, 0x0b, 0xed, 0xfa, 0xde, 0xd4, 0x5e, 0x92, 0x90, 0x33,
+ 0xfe, 0xd0, 0x65, 0x74, 0x38, 0x62, 0x13, 0xa2, 0x70, 0xd0, 0xe3, 0x50, 0xea, 0xd2, 0x01, 0xe5,
+ 0x0a, 0x2f, 0x0b, 0x85, 0x2f, 0x19, 0xe4, 0xc5, 0x02, 0xd1, 0x08, 0xe8, 0x4d, 0xc8, 0x8f, 0x06,
+ 0x8e, 0xd7, 0xa8, 0x08, 0x2e, 0x16, 0x63, 0xc4, 0x1b, 0x03, 0xc7, 0x6b, 0x3f, 0xfb, 0xd1, 0xd4,
+ 0x7e, 0xaa, 0xe7, 0xb2, 0xfe, 0xf8, 0xf6, 0x7a, 0xc7, 0x1f, 0xb6, 0x7a, 0x81, 0xb3, 0xe3, 0x78,
+ 0x4e, 0x6b, 0xe0, 0xdf, 0x71, 0x5b, 0x6f, 0x3f, 0xd9, 0xe2, 0x0f, 0xf4, 0xee, 0x98, 0x06, 0x2e,
+ 0x0d, 0x5a, 0x9c, 0xcc, 0xba, 0x50, 0x09, 0xdf, 0x4a, 0x04, 0x59, 0x74, 0x95, 0xdb, 0x9f, 0x1f,
+ 0xd0, 0x0d, 0xfe, 0x7a, 0xc3, 0x06, 0x88, 0x53, 0x4e, 0xc4, 0xa7, 0x08, 0x38, 0xa1, 0x3b, 0x97,
+ 0x03, 0x7f, 0x3c, 0x6a, 0x1f, 0xd9, 0x9b, 0xda, 0x26, 0x3e, 0x31, 0x27, 0x57, 0xf3, 0xe5, 0xe2,
+ 0x52, 0x09, 0xbf, 0x97, 0x03, 0xb4, 0xed, 0x0c, 0x47, 0x03, 0x3a, 0x97, 0xfa, 0x23, 0x45, 0x67,
+ 0xef, 0x5b, 0xd1, 0xb9, 0x79, 0x15, 0x1d, 0x6b, 0x2d, 0x3f, 0x9f, 0xd6, 0x0a, 0x9f, 0x57, 0x6b,
+ 0xc5, 0x2f, 0xbd, 0xd6, 0x70, 0x03, 0xf2, 0x9c, 0x32, 0x77, 0x96, 0x81, 0x73, 0x4f, 0xe8, 0xa6,
+ 0x46, 0xf8, 0x10, 0x6f, 0x41, 0x51, 0xf2, 0x85, 0x56, 0xd2, 0xca, 0x4b, 0xbe, 0xdb, 0x58, 0x71,
+ 0x39, 0xad, 0x92, 0xa5, 0x58, 0x25, 0x39, 0x21, 0x6c, 0xfc, 0x7b, 0x0b, 0x16, 0x94, 0x45, 0x28,
+ 0xdf, 0x77, 0x1b, 0x4a, 0xd2, 0xf7, 0x68, 0xbf, 0x77, 0x22, 0xed, 0xf7, 0x2e, 0x76, 0x9d, 0x11,
+ 0xa3, 0x41, 0xbb, 0xf5, 0xfe, 0xd4, 0xb6, 0x3e, 0x9a, 0xda, 0x8f, 0x1e, 0x24, 0x34, 0x1d, 0x9d,
+ 0xb4, 0xbf, 0xd4, 0x84, 0xd1, 0x69, 0x71, 0x3b, 0x16, 0x2a, 0xb3, 0x3a, 0xb2, 0x2e, 0x83, 0xda,
+ 0x15, 0xaf, 0x47, 0x43, 0x4e, 0x39, 0xcf, 0x2d, 0x82, 0x48, 0x1c, 0xce, 0xe6, 0x3d, 0x27, 0xf0,
+ 0x5c, 0xaf, 0x17, 0x36, 0x72, 0xc2, 0xa7, 0x47, 0x73, 0xfc, 0x13, 0x0b, 0x96, 0x13, 0x66, 0xad,
+ 0x98, 0x78, 0x06, 0x8a, 0x21, 0xd7, 0x94, 0xe6, 0xc1, 0x30, 0x8a, 0x6d, 0x01, 0x6f, 0x2f, 0xaa,
+ 0xcb, 0x17, 0xe5, 0x9c, 0x28, 0xfc, 0x07, 0x77, 0xb5, 0x3f, 0x59, 0x50, 0x13, 0x81, 0x49, 0xbf,
+ 0x35, 0x04, 0x79, 0xcf, 0x19, 0x52, 0xa5, 0x2a, 0x31, 0x36, 0xa2, 0x15, 0x3f, 0xae, 0xac, 0xa3,
+ 0xd5, 0xbc, 0x0e, 0xd6, 0xba, 0x6f, 0x07, 0x6b, 0xc5, 0xef, 0xae, 0x0e, 0x05, 0x6e, 0xde, 0x13,
+ 0xe1, 0x5c, 0x2b, 0x44, 0x4e, 0xf0, 0xa3, 0xb0, 0xa0, 0xb8, 0x50, 0xa2, 0x3d, 0x28, 0xc0, 0x0e,
+ 0xa1, 0x28, 0x35, 0x81, 0xfe, 0x0f, 0x2a, 0x51, 0x2a, 0x23, 0xb8, 0xcd, 0xb5, 0x8b, 0x7b, 0x53,
+ 0x3b, 0xcb, 0x42, 0x12, 0x2f, 0x20, 0xdb, 0x0c, 0xfa, 0x56, 0xbb, 0xb2, 0x37, 0xb5, 0x25, 0x40,
+ 0x85, 0x78, 0x74, 0x0a, 0xf2, 0x7d, 0x1e, 0x37, 0xb9, 0x08, 0xf2, 0xed, 0xf2, 0xde, 0xd4, 0x16,
+ 0x73, 0x22, 0x3e, 0xf1, 0x65, 0xa8, 0x6d, 0xd1, 0x9e, 0xd3, 0x99, 0xa8, 0x43, 0xeb, 0x9a, 0x1c,
+ 0x3f, 0xd0, 0xd2, 0x34, 0x1e, 0x86, 0x5a, 0x74, 0xe2, 0x5b, 0xc3, 0x50, 0xbd, 0x86, 0x6a, 0x04,
+ 0x7b, 0x25, 0xc4, 0x3f, 0xb5, 0x40, 0xd9, 0x00, 0xc2, 0x46, 0xb6, 0xc3, 0x7d, 0x21, 0xec, 0x4d,
+ 0x6d, 0x05, 0xd1, 0xc9, 0x0c, 0x7a, 0x0e, 0x4a, 0xa1, 0x38, 0x91, 0x13, 0x4b, 0x9b, 0x96, 0x58,
+ 0x68, 0x1f, 0xe1, 0x26, 0xb2, 0x37, 0xb5, 0x35, 0x22, 0xd1, 0x03, 0xb4, 0x9e, 0x48, 0x08, 0x24,
+ 0x63, 0x8b, 0x7b, 0x53, 0xdb, 0x80, 0x9a, 0x09, 0x02, 0xfe, 0xcc, 0x82, 0xea, 0x4d, 0xc7, 0x8d,
+ 0x4c, 0xa8, 0xa1, 0x55, 0x14, 0xfb, 0x6a, 0x09, 0xe0, 0x96, 0xd8, 0xa5, 0x03, 0x67, 0x72, 0xc9,
+ 0x0f, 0x04, 0xdd, 0x05, 0x12, 0xcd, 0xe3, 0x18, 0x9e, 0x9f, 0x19, 0xc3, 0x0b, 0xf3, 0xbb, 0xf6,
+ 0xff, 0xad, 0x23, 0xbd, 0x9a, 0x2f, 0x67, 0x97, 0x72, 0xf8, 0x3d, 0x0b, 0x6a, 0x92, 0x79, 0x65,
+ 0x79, 0xdf, 0x85, 0xa2, 0x94, 0x8d, 0x60, 0xff, 0x3f, 0x38, 0xa6, 0xd3, 0xf3, 0x38, 0x25, 0x45,
+ 0x13, 0xbd, 0x00, 0x8b, 0xdd, 0xc0, 0x1f, 0x8d, 0x68, 0x77, 0x5b, 0xb9, 0xbf, 0x6c, 0xda, 0xfd,
+ 0x6d, 0x9a, 0xeb, 0x24, 0x85, 0x8e, 0xff, 0x62, 0xc1, 0x82, 0x72, 0x26, 0x4a, 0x5d, 0x91, 0x88,
+ 0xad, 0xfb, 0x8e, 0x9e, 0xd9, 0x79, 0xa3, 0xe7, 0x71, 0x28, 0xf6, 0x78, 0x7c, 0xd1, 0x0e, 0x49,
+ 0xcd, 0xe6, 0x8b, 0xaa, 0xf8, 0x2a, 0x2c, 0x6a, 0x56, 0x0e, 0xf0, 0xa8, 0x2b, 0x69, 0x8f, 0x7a,
+ 0xa5, 0x4b, 0x3d, 0xe6, 0xee, 0xb8, 0x91, 0x8f, 0x54, 0xf8, 0xf8, 0x07, 0x16, 0x2c, 0xa5, 0x51,
+ 0xd0, 0x66, 0xaa, 0xb0, 0x78, 0xe4, 0x60, 0x72, 0x66, 0x4d, 0xa1, 0x49, 0xab, 0xca, 0xe2, 0xa9,
+ 0xc3, 0x2a, 0x8b, 0xba, 0xe9, 0x64, 0x2a, 0xca, 0x2b, 0xe0, 0x1f, 0x5b, 0xb0, 0x90, 0xd0, 0x25,
+ 0x7a, 0x06, 0xf2, 0x3b, 0x81, 0x3f, 0x9c, 0x4b, 0x51, 0x62, 0x07, 0xfa, 0x1a, 0x64, 0x99, 0x3f,
+ 0x97, 0x9a, 0xb2, 0xcc, 0xe7, 0x5a, 0x52, 0xec, 0xe7, 0x64, 0xde, 0x2e, 0x67, 0xf8, 0x29, 0xa8,
+ 0x08, 0x86, 0x6e, 0x38, 0x6e, 0x30, 0x33, 0x60, 0xcc, 0x66, 0xe8, 0x39, 0x38, 0x22, 0x9d, 0xe1,
+ 0xec, 0xcd, 0xb5, 0x59, 0x9b, 0x6b, 0x7a, 0xf3, 0x49, 0x28, 0x88, 0xa4, 0x83, 0x6f, 0xe9, 0x3a,
+ 0xcc, 0xd1, 0x5b, 0xf8, 0x18, 0x1f, 0x83, 0x65, 0xfe, 0x06, 0x69, 0x10, 0x6e, 0xf8, 0x63, 0x8f,
+ 0xe9, 0xba, 0xe9, 0x0c, 0xd4, 0x93, 0x60, 0x65, 0x25, 0x75, 0x28, 0x74, 0x38, 0x40, 0xd0, 0x58,
+ 0x20, 0x72, 0x82, 0x7f, 0x61, 0x01, 0xba, 0x4c, 0x99, 0x38, 0xe5, 0xca, 0x66, 0xf4, 0x3c, 0x56,
+ 0xa0, 0x3c, 0x74, 0x58, 0xa7, 0x4f, 0x83, 0x50, 0xe7, 0x2f, 0x7a, 0xfe, 0x45, 0x24, 0x9e, 0xf8,
+ 0x1c, 0x2c, 0x27, 0x6e, 0xa9, 0x78, 0x5a, 0x81, 0x72, 0x47, 0xc1, 0x54, 0xc8, 0x8b, 0xe6, 0xf8,
+ 0x37, 0x59, 0x28, 0xeb, 0xb4, 0x0e, 0x9d, 0x83, 0xea, 0x8e, 0xeb, 0xf5, 0x68, 0x30, 0x0a, 0x5c,
+ 0x25, 0x82, 0xbc, 0x4c, 0xf3, 0x0c, 0x30, 0x31, 0x27, 0xe8, 0x09, 0x28, 0x8d, 0x43, 0x1a, 0xbc,
+ 0xe5, 0xca, 0x97, 0x5e, 0x69, 0xd7, 0x77, 0xa7, 0x76, 0xf1, 0xb5, 0x90, 0x06, 0x57, 0x36, 0x79,
+ 0xf0, 0x19, 0x8b, 0x11, 0x91, 0xdf, 0x5d, 0xf4, 0xb2, 0x32, 0x53, 0x91, 0xc0, 0xb5, 0xbf, 0xce,
+ 0xaf, 0x9f, 0x72, 0x75, 0xa3, 0xc0, 0x1f, 0x52, 0xd6, 0xa7, 0xe3, 0xb0, 0xd5, 0xf1, 0x87, 0x43,
+ 0xdf, 0x6b, 0x89, 0xde, 0x81, 0x60, 0x9a, 0x47, 0x50, 0xbe, 0x5d, 0x59, 0xee, 0x4d, 0x28, 0xb1,
+ 0x7e, 0xe0, 0x8f, 0x7b, 0x7d, 0x11, 0x18, 0x72, 0xed, 0x0b, 0xf3, 0xd3, 0xd3, 0x14, 0x88, 0x1e,
+ 0xa0, 0x87, 0xb9, 0xb4, 0x68, 0xe7, 0x4e, 0x38, 0x1e, 0xca, 0xda, 0xb3, 0x5d, 0xd8, 0x9b, 0xda,
+ 0xd6, 0x13, 0x24, 0x02, 0xe3, 0x8b, 0xb0, 0x90, 0x48, 0x85, 0xd1, 0x59, 0xc8, 0x07, 0x74, 0x47,
+ 0xbb, 0x02, 0xb4, 0x3f, 0x63, 0x96, 0xd1, 0x9f, 0xe3, 0x10, 0xf1, 0x89, 0xbf, 0x9f, 0x05, 0xdb,
+ 0xa8, 0xfa, 0x2f, 0xf9, 0xc1, 0x2b, 0x94, 0x05, 0x6e, 0xe7, 0x9a, 0x33, 0xa4, 0xda, 0xbc, 0x6c,
+ 0xa8, 0x0e, 0x05, 0xf0, 0x2d, 0xe3, 0x15, 0xc1, 0x30, 0xc2, 0x43, 0x0f, 0x01, 0x88, 0x67, 0x27,
+ 0xd7, 0xe5, 0x83, 0xaa, 0x08, 0x88, 0x58, 0xde, 0x48, 0x08, 0xbb, 0x35, 0xa7, 0x70, 0x94, 0x90,
+ 0xaf, 0xa4, 0x85, 0x3c, 0x37, 0x9d, 0x48, 0xb2, 0xe6, 0x73, 0x29, 0x24, 0x9f, 0x0b, 0xfe, 0x87,
+ 0x05, 0xcd, 0x2d, 0x7d, 0xf3, 0xfb, 0x14, 0x87, 0xe6, 0x37, 0xfb, 0x80, 0xf8, 0xcd, 0x3d, 0x40,
+ 0x7e, 0xf3, 0x29, 0x7e, 0x9b, 0x00, 0x5b, 0xae, 0x47, 0x2f, 0xb9, 0x03, 0x46, 0x83, 0x19, 0x45,
+ 0xd2, 0x0f, 0x73, 0xb1, 0xc7, 0x21, 0x74, 0x47, 0xcb, 0x60, 0xc3, 0x70, 0xf3, 0x0f, 0x82, 0xc5,
+ 0xec, 0x03, 0x64, 0x31, 0x97, 0xf2, 0x80, 0x1e, 0x94, 0x76, 0x04, 0x7b, 0x32, 0x62, 0x27, 0xfa,
+ 0x4f, 0x31, 0xef, 0xed, 0x6f, 0xaa, 0xc3, 0x9f, 0x3e, 0x24, 0xe1, 0x12, 0x7d, 0xc4, 0x56, 0x38,
+ 0xf1, 0x98, 0xf3, 0x8e, 0xb1, 0x9f, 0xe8, 0x43, 0x90, 0xa3, 0x72, 0xba, 0xc2, 0xcc, 0x9c, 0xee,
+ 0x79, 0x75, 0xcc, 0x7f, 0x93, 0xd7, 0xe1, 0x5e, 0xec, 0x60, 0x85, 0x52, 0x94, 0x83, 0x7d, 0xe4,
+ 0xb0, 0xe7, 0x2f, 0x1f, 0x3d, 0x5a, 0x4b, 0x96, 0x66, 0xb5, 0xa8, 0x34, 0xeb, 0xd2, 0x77, 0x12,
+ 0x75, 0x19, 0xfe, 0x83, 0x05, 0x4b, 0x97, 0x29, 0x4b, 0x66, 0x63, 0x5f, 0x21, 0xe5, 0xe3, 0x97,
+ 0xe0, 0xa8, 0x71, 0x7f, 0x25, 0xa7, 0x27, 0x53, 0x29, 0xd8, 0xb1, 0x58, 0x52, 0x42, 0x06, 0xaa,
+ 0xb2, 0x4d, 0x66, 0x5f, 0x37, 0xa0, 0x6a, 0x2c, 0xa2, 0x8b, 0xa9, 0xbc, 0x6b, 0x39, 0xd5, 0xd0,
+ 0xe5, 0xb9, 0x43, 0xbb, 0xae, 0x78, 0x92, 0xf5, 0xab, 0xca, 0xaa, 0xa3, 0x1c, 0x65, 0x1b, 0x90,
+ 0x50, 0xac, 0x20, 0x6b, 0x46, 0x49, 0x01, 0x7d, 0x39, 0x4a, 0xc0, 0xa2, 0x39, 0x7a, 0x18, 0xf2,
+ 0x81, 0x7f, 0x4f, 0x27, 0xd4, 0x0b, 0xf1, 0x91, 0xc4, 0xbf, 0x47, 0xc4, 0x12, 0x7e, 0x0e, 0x72,
+ 0xc4, 0xbf, 0x87, 0x9a, 0x00, 0x81, 0xe3, 0xf5, 0xe8, 0xad, 0xa8, 0x94, 0xab, 0x11, 0x03, 0x72,
+ 0x40, 0x06, 0xb3, 0x01, 0x47, 0xcd, 0x1b, 0x49, 0x75, 0xaf, 0x43, 0xe9, 0xd5, 0xb1, 0x29, 0xae,
+ 0x7a, 0x4a, 0x5c, 0xb2, 0x63, 0xa0, 0x91, 0xb8, 0xcd, 0x40, 0x0c, 0x47, 0xa7, 0xa0, 0xc2, 0x9c,
+ 0xdb, 0x03, 0x7a, 0x2d, 0x76, 0x96, 0x31, 0x80, 0xaf, 0xf2, 0x2a, 0xf4, 0x96, 0x91, 0x8a, 0xc5,
+ 0x00, 0xf4, 0x38, 0x2c, 0xc5, 0x77, 0xbe, 0x11, 0xd0, 0x1d, 0xf7, 0x1d, 0xa1, 0xe1, 0x1a, 0xd9,
+ 0x07, 0x47, 0x6b, 0x70, 0x24, 0x86, 0x6d, 0x8b, 0x94, 0x27, 0x2f, 0x50, 0xd3, 0x60, 0x2e, 0x1b,
+ 0xc1, 0xee, 0x8b, 0x77, 0xc7, 0xce, 0x40, 0x3c, 0xd3, 0x1a, 0x31, 0x20, 0xf8, 0x8f, 0x16, 0x1c,
+ 0x95, 0xaa, 0xe6, 0x6f, 0xe0, 0xab, 0x68, 0xf5, 0xbf, 0xb4, 0x00, 0x99, 0x1c, 0x28, 0xd3, 0xfa,
+ 0x7f, 0xb3, 0x23, 0xc5, 0x73, 0xaa, 0xaa, 0x28, 0xae, 0x25, 0x28, 0x6e, 0x2a, 0x61, 0x28, 0x76,
+ 0x64, 0xe7, 0x4d, 0xb4, 0xd0, 0x65, 0xf5, 0x2e, 0x21, 0x44, 0x7d, 0x23, 0x1b, 0x0a, 0xb7, 0x27,
+ 0x8c, 0x86, 0xaa, 0xf6, 0x16, 0x4d, 0x07, 0x01, 0x20, 0xf2, 0x8b, 0x9f, 0x45, 0x3d, 0x26, 0xac,
+ 0x26, 0x1f, 0x9f, 0xa5, 0x40, 0x44, 0x0f, 0xf0, 0xbf, 0xb2, 0xb0, 0x70, 0xcb, 0x1f, 0x8c, 0xe3,
+ 0xf0, 0xfa, 0x55, 0x0a, 0x2d, 0x89, 0x86, 0x40, 0x41, 0x37, 0x04, 0x10, 0xe4, 0x43, 0x46, 0x47,
+ 0xc2, 0xb2, 0x72, 0x44, 0x8c, 0x11, 0x86, 0x1a, 0x73, 0x82, 0x1e, 0x65, 0xb2, 0xcc, 0x6a, 0x14,
+ 0x45, 0xfe, 0x9b, 0x80, 0xa1, 0x55, 0xa8, 0x3a, 0xbd, 0x5e, 0x40, 0x7b, 0x0e, 0xa3, 0xed, 0x49,
+ 0xa3, 0x24, 0x0e, 0x33, 0x41, 0xe8, 0x2a, 0x2c, 0x76, 0x9c, 0x4e, 0xdf, 0xf5, 0x7a, 0xd7, 0x47,
+ 0xcc, 0xf5, 0xbd, 0xb0, 0x51, 0x16, 0x1e, 0xfc, 0xd4, 0xba, 0xf9, 0x93, 0xd4, 0xfa, 0x46, 0x02,
+ 0x47, 0xf9, 0xb1, 0xd4, 0x4e, 0xfc, 0x06, 0x2c, 0x6a, 0xc1, 0x2b, 0xf3, 0x38, 0x0b, 0xa5, 0xb7,
+ 0x05, 0x64, 0x46, 0xb3, 0x4f, 0xa2, 0x2a, 0x52, 0x1a, 0x2d, 0xf9, 0xa3, 0x86, 0xe6, 0x1f, 0x5f,
+ 0x85, 0xa2, 0x44, 0x47, 0xa7, 0xcc, 0xc2, 0x4b, 0xe6, 0x9e, 0x7c, 0xae, 0xaa, 0x28, 0x0c, 0x45,
+ 0x49, 0x48, 0x19, 0x91, 0xb0, 0x33, 0x09, 0x21, 0xea, 0x1b, 0xff, 0x28, 0x0b, 0xc7, 0x36, 0x29,
+ 0xa3, 0x1d, 0x46, 0xbb, 0x97, 0x5c, 0x3a, 0xe8, 0x7e, 0xa1, 0x3d, 0x81, 0xa8, 0xb3, 0x97, 0x33,
+ 0x3a, 0x7b, 0xdc, 0x87, 0x0d, 0x5c, 0x8f, 0x6e, 0x19, 0xad, 0xa1, 0x18, 0x10, 0xcb, 0xa8, 0x60,
+ 0x36, 0x8d, 0xb4, 0x8d, 0x14, 0x0d, 0x1b, 0x89, 0x1b, 0x82, 0xa5, 0x44, 0x0f, 0x53, 0x57, 0xa0,
+ 0xe5, 0xb8, 0x7c, 0xc5, 0xbf, 0xb3, 0xe0, 0x78, 0x5a, 0x2e, 0x4a, 0x8d, 0x2f, 0x42, 0x71, 0x47,
+ 0x40, 0xf6, 0xb7, 0x9d, 0x13, 0x3b, 0x64, 0xe7, 0x42, 0xa2, 0x9a, 0x9d, 0x0b, 0x09, 0x41, 0x8f,
+ 0x25, 0x7e, 0xb0, 0x6a, 0x2f, 0xef, 0x4d, 0xed, 0x23, 0x02, 0x60, 0xe0, 0x2a, 0x66, 0xce, 0x44,
+ 0x17, 0xcf, 0xc5, 0x2d, 0x11, 0x09, 0x31, 0x09, 0xab, 0xfe, 0xe6, 0x9f, 0x2d, 0x58, 0x48, 0x5c,
+ 0x44, 0x88, 0x88, 0x3f, 0x01, 0x15, 0x1e, 0xe4, 0x04, 0x3d, 0x06, 0x79, 0x36, 0x19, 0xa9, 0xa8,
+ 0xd0, 0x3e, 0xf6, 0xd9, 0xd4, 0x3e, 0x9a, 0xd8, 0x76, 0x73, 0x32, 0xa2, 0x44, 0xa0, 0xf0, 0x97,
+ 0xd3, 0x71, 0x82, 0xae, 0xeb, 0x39, 0x03, 0x97, 0x49, 0xed, 0xe4, 0x89, 0x09, 0xe2, 0xee, 0x68,
+ 0xe4, 0x04, 0xa1, 0x4e, 0x02, 0x2b, 0xd2, 0x1d, 0x29, 0x10, 0xd1, 0x03, 0xd1, 0xdc, 0xb9, 0x43,
+ 0x59, 0xa7, 0x2f, 0xc3, 0x82, 0x6a, 0xee, 0x08, 0x48, 0xa2, 0xb9, 0x23, 0x20, 0xf8, 0xe7, 0x56,
+ 0x6c, 0x9c, 0xf2, 0x0d, 0x7f, 0xe9, 0x8c, 0x13, 0x7f, 0x3b, 0xb6, 0x13, 0x7d, 0x45, 0x65, 0x27,
+ 0x2f, 0xc0, 0x62, 0x37, 0xb1, 0x72, 0xb0, 0xbd, 0xc8, 0xc6, 0x75, 0x0a, 0x1d, 0x8f, 0x63, 0x3d,
+ 0x0a, 0xc8, 0x01, 0x7a, 0x4c, 0x29, 0x27, 0xbb, 0x5f, 0x39, 0xb1, 0xd4, 0x73, 0x87, 0x4b, 0xfd,
+ 0xf1, 0x47, 0xa0, 0x12, 0xfd, 0x48, 0x89, 0xaa, 0x50, 0xba, 0x74, 0x9d, 0xbc, 0x7e, 0x91, 0x6c,
+ 0x2e, 0x65, 0x50, 0x0d, 0xca, 0xed, 0x8b, 0x1b, 0x2f, 0x8b, 0x99, 0x75, 0xfe, 0xd7, 0x45, 0x9d,
+ 0xb8, 0x04, 0xe8, 0x1b, 0x50, 0x90, 0xd9, 0xc8, 0xf1, 0x98, 0x39, 0xf3, 0xf7, 0xbb, 0x95, 0x13,
+ 0xfb, 0xe0, 0x52, 0x4a, 0x38, 0x73, 0xd6, 0x42, 0xd7, 0xa0, 0x2a, 0x80, 0xaa, 0x43, 0x7e, 0x2a,
+ 0xdd, 0xa8, 0x4e, 0x50, 0x7a, 0xe8, 0x80, 0x55, 0x83, 0xde, 0x05, 0x28, 0x48, 0x81, 0x1d, 0x4f,
+ 0x25, 0x8d, 0x33, 0x6e, 0x93, 0xf8, 0xcd, 0x00, 0x67, 0xd0, 0xb3, 0x90, 0xbf, 0xe9, 0xb8, 0x03,
+ 0x64, 0xe4, 0xac, 0x46, 0x63, 0x7b, 0xe5, 0x78, 0x1a, 0x6c, 0x1c, 0xfb, 0x7c, 0xd4, 0x9f, 0x3f,
+ 0x91, 0x6e, 0x12, 0xea, 0xed, 0x8d, 0xfd, 0x0b, 0xd1, 0xc9, 0xd7, 0x65, 0x17, 0x59, 0xb7, 0xaa,
+ 0xd0, 0x43, 0xc9, 0xa3, 0x52, 0x9d, 0xad, 0x95, 0xe6, 0x41, 0xcb, 0x11, 0xc1, 0x2d, 0xa8, 0x1a,
+ 0x6d, 0x22, 0x53, 0xac, 0xfb, 0x7b, 0x5c, 0xa6, 0x58, 0x67, 0xf4, 0x96, 0x70, 0x06, 0x5d, 0x86,
+ 0x32, 0xcf, 0xf4, 0xc5, 0xcf, 0x49, 0x27, 0xd3, 0x09, 0xbd, 0x91, 0xc8, 0xad, 0x9c, 0x9a, 0xbd,
+ 0x18, 0x11, 0xfa, 0x16, 0x54, 0x2e, 0x53, 0xa6, 0x22, 0xd8, 0x89, 0x74, 0x08, 0x9c, 0x21, 0xa9,
+ 0x64, 0x18, 0xc5, 0x19, 0xf4, 0x86, 0x28, 0x3a, 0x92, 0xee, 0x19, 0xd9, 0x07, 0xb8, 0xe1, 0xe8,
+ 0x5e, 0xab, 0x07, 0x23, 0x44, 0x94, 0x5f, 0x4f, 0x50, 0x56, 0x79, 0x83, 0x7d, 0xc0, 0x83, 0x8d,
+ 0x28, 0xdb, 0x87, 0xfc, 0xd9, 0x04, 0x67, 0xce, 0xbf, 0xa9, 0xff, 0x6f, 0xb1, 0xe9, 0x30, 0x07,
+ 0x5d, 0x87, 0x45, 0x21, 0xcb, 0xe8, 0x0f, 0x19, 0x09, 0x9b, 0xdf, 0xf7, 0xef, 0x8f, 0x84, 0xcd,
+ 0xef, 0xff, 0x17, 0x08, 0xce, 0xb4, 0xdf, 0xfc, 0xe0, 0xe3, 0x66, 0xe6, 0xc3, 0x8f, 0x9b, 0x99,
+ 0x4f, 0x3f, 0x6e, 0x5a, 0xdf, 0xdb, 0x6d, 0x5a, 0xbf, 0xda, 0x6d, 0x5a, 0xef, 0xef, 0x36, 0xad,
+ 0x0f, 0x76, 0x9b, 0xd6, 0xdf, 0x76, 0x9b, 0xd6, 0xdf, 0x77, 0x9b, 0x99, 0x4f, 0x77, 0x9b, 0xd6,
+ 0xbb, 0x9f, 0x34, 0x33, 0x1f, 0x7c, 0xd2, 0xcc, 0x7c, 0xf8, 0x49, 0x33, 0xf3, 0x9d, 0x47, 0x0f,
+ 0x2f, 0xc5, 0xa5, 0x5b, 0x2c, 0x8a, 0xaf, 0x27, 0xff, 0x1d, 0x00, 0x00, 0xff, 0xff, 0xc0, 0xc9,
+ 0x6a, 0x9e, 0x47, 0x24, 0x00, 0x00,
}
func (x Direction) String() string {
@@ -4540,6 +4549,9 @@ func (this *GetChunkRefResponse) Equal(that interface{}) bool {
return false
}
}
+ if !this.Stats.Equal(&that1.Stats) {
+ return false
+ }
return true
}
func (this *GetSeriesRequest) Equal(that interface{}) bool {
@@ -5610,11 +5622,12 @@ func (this *GetChunkRefResponse) GoString() string {
if this == nil {
return "nil"
}
- s := make([]string, 0, 5)
+ s := make([]string, 0, 6)
s = append(s, "&logproto.GetChunkRefResponse{")
if this.Refs != nil {
s = append(s, "Refs: "+fmt.Sprintf("%#v", this.Refs)+",\n")
}
+ s = append(s, "Stats: "+strings.Replace(this.Stats.GoString(), `&`, ``, 1)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
@@ -8116,6 +8129,16 @@ func (m *GetChunkRefResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ {
+ size, err := m.Stats.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintLogproto(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
if len(m.Refs) > 0 {
for iNdEx := len(m.Refs) - 1; iNdEx >= 0; iNdEx-- {
{
@@ -8718,21 +8741,21 @@ func (m *DetectedFieldsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i--
dAtA[i] = 0x1a
}
- n25, err25 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.End, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.End):])
- if err25 != nil {
- return 0, err25
- }
- i -= n25
- i = encodeVarintLogproto(dAtA, i, uint64(n25))
- i--
- dAtA[i] = 0x12
- n26, err26 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Start):])
+ n26, err26 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.End, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.End):])
if err26 != nil {
return 0, err26
}
i -= n26
i = encodeVarintLogproto(dAtA, i, uint64(n26))
i--
+ dAtA[i] = 0x12
+ n27, err27 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Start):])
+ if err27 != nil {
+ return 0, err27
+ }
+ i -= n27
+ i = encodeVarintLogproto(dAtA, i, uint64(n27))
+ i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
@@ -8873,21 +8896,21 @@ func (m *DetectedLabelsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i--
dAtA[i] = 0x1a
}
- n27, err27 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.End, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.End):])
- if err27 != nil {
- return 0, err27
- }
- i -= n27
- i = encodeVarintLogproto(dAtA, i, uint64(n27))
- i--
- dAtA[i] = 0x12
- n28, err28 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Start):])
+ n28, err28 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.End, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.End):])
if err28 != nil {
return 0, err28
}
i -= n28
i = encodeVarintLogproto(dAtA, i, uint64(n28))
i--
+ dAtA[i] = 0x12
+ n29, err29 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Start):])
+ if err29 != nil {
+ return 0, err29
+ }
+ i -= n29
+ i = encodeVarintLogproto(dAtA, i, uint64(n29))
+ i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
@@ -9699,6 +9722,8 @@ func (m *GetChunkRefResponse) Size() (n int) {
n += 1 + l + sovLogproto(uint64(l))
}
}
+ l = m.Stats.Size()
+ n += 1 + l + sovLogproto(uint64(l))
return n
}
@@ -10569,6 +10594,7 @@ func (this *GetChunkRefResponse) String() string {
repeatedStringForRefs += "}"
s := strings.Join([]string{`&GetChunkRefResponse{`,
`Refs:` + repeatedStringForRefs + `,`,
+ `Stats:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Stats), "Index", "stats.Index", 1), `&`, ``, 1) + `,`,
`}`,
}, "")
return s
@@ -15701,6 +15727,39 @@ func (m *GetChunkRefResponse) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Stats", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Stats.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipLogproto(dAtA[iNdEx:])
diff --git a/pkg/logproto/logproto.proto b/pkg/logproto/logproto.proto
index 5bf2d37e2d6bf..543a346cc5c7d 100644
--- a/pkg/logproto/logproto.proto
+++ b/pkg/logproto/logproto.proto
@@ -352,6 +352,7 @@ message GetChunkRefRequest {
message GetChunkRefResponse {
repeated ChunkRef refs = 1;
+ stats.Index stats = 2 [(gogoproto.nullable) = false];
}
message GetSeriesRequest {
diff --git a/pkg/logql/log/fmt.go b/pkg/logql/log/fmt.go
index c69aa3d40bb01..74eb0262d39d2 100644
--- a/pkg/logql/log/fmt.go
+++ b/pkg/logql/log/fmt.go
@@ -391,9 +391,9 @@ func (lf *LabelsFormatter) Process(ts int64, l []byte, lbs *LabelsBuilder) ([]by
defer smp.Put(m)
for _, f := range lf.formats {
if f.Rename {
- v, category, ok := lbs.GetWithCategory(f.Value)
+ v, _, ok := lbs.GetWithCategory(f.Value)
if ok {
- lbs.Set(category, f.Name, v)
+ lbs.Set(ParsedLabel, f.Name, v)
lbs.Del(f.Value)
}
continue
diff --git a/pkg/logql/log/fmt_test.go b/pkg/logql/log/fmt_test.go
index 2028d2e00bf8f..9cb449a7bfddf 100644
--- a/pkg/logql/log/fmt_test.go
+++ b/pkg/logql/log/fmt_test.go
@@ -515,6 +515,22 @@ func Test_labelsFormatter_Format(t *testing.T) {
in labels.Labels
want labels.Labels
}{
+ {
+ "rename label",
+ mustNewLabelsFormatter([]LabelFmt{
+ NewRenameLabelFmt("baz", "foo"),
+ }),
+ labels.FromStrings("foo", "blip", "bar", "blop"),
+ labels.FromStrings("bar", "blop", "baz", "blip"),
+ },
+ {
+ "rename and overwrite existing label",
+ mustNewLabelsFormatter([]LabelFmt{
+ NewRenameLabelFmt("bar", "foo"),
+ }),
+ labels.FromStrings("foo", "blip", "bar", "blop"),
+ labels.FromStrings("bar", "blip"),
+ },
{
"combined with template",
mustNewLabelsFormatter([]LabelFmt{NewTemplateLabelFmt("foo", "{{.foo}} and {{.bar}}")}),
diff --git a/pkg/logql/log/parser.go b/pkg/logql/log/parser.go
index c8e65061ba41d..50d973eb8b7db 100644
--- a/pkg/logql/log/parser.go
+++ b/pkg/logql/log/parser.go
@@ -625,6 +625,8 @@ func (j *JSONExpressionParser) Process(_ int64, line []byte, lbs *LabelsBuilder)
switch typ {
case jsonparser.Null:
lbs.Set(ParsedLabel, key, "")
+ case jsonparser.Object:
+ lbs.Set(ParsedLabel, key, string(data))
default:
lbs.Set(ParsedLabel, key, unescapeJSONString(data))
}
diff --git a/pkg/logql/log/parser_test.go b/pkg/logql/log/parser_test.go
index 5ac57b9ef0540..5ac3a87503634 100644
--- a/pkg/logql/log/parser_test.go
+++ b/pkg/logql/log/parser_test.go
@@ -542,13 +542,35 @@ func TestJSONExpressionParser(t *testing.T) {
),
NoParserHints(),
},
+ {
+ "nested object with escaped value",
+ []byte(`{"app":{"name":"great \"loki\""}`),
+ []LabelExtractionExpr{
+ NewLabelExtractionExpr("app", `app`),
+ },
+ labels.FromStrings("foo", "bar"),
+ labels.FromStrings("foo", "bar",
+ "app", `{"name":"great \"loki\""}`,
+ ),
+ NoParserHints(),
+ },
+ {
+ "field with escaped value inside the json string",
+ []byte(`{"app":"{\"name\":\"great \\\"loki\\\"\"}"}`),
+ []LabelExtractionExpr{
+ NewLabelExtractionExpr("app", `app`),
+ },
+ labels.FromStrings("foo", "bar"),
+ labels.FromStrings("foo", "bar",
+ "app", `{"name":"great \"loki\""}`,
+ ),
+ NoParserHints(),
+ },
}
for _, tt := range tests {
- j, err := NewJSONExpressionParser(tt.expressions)
- if err != nil {
- t.Fatalf("cannot create JSON expression parser: %s", err.Error())
- }
t.Run(tt.name, func(t *testing.T) {
+ j, err := NewJSONExpressionParser(tt.expressions)
+ require.NoError(t, err, "cannot create JSON expression parser")
b := NewBaseLabelsBuilderWithGrouping(nil, tt.hints, false, false).ForLabels(tt.lbs, tt.lbs.Hash())
b.Reset()
_, _ = j.Process(0, tt.line, b)
diff --git a/pkg/logqlmodel/stats/context.go b/pkg/logqlmodel/stats/context.go
index a0509be31f6d2..42d07635edaa3 100644
--- a/pkg/logqlmodel/stats/context.go
+++ b/pkg/logqlmodel/stats/context.go
@@ -103,6 +103,11 @@ func (c *Context) Store() Store {
return c.store
}
+// Index returns the index statistics accumulated so far.
+func (c *Context) Index() Index {
+ return c.index
+}
+
// Caches returns the cache statistics accumulated so far.
func (c *Context) Caches() Caches {
return Caches{
@@ -402,6 +407,14 @@ func (c *Context) AddChunksRef(i int64) {
atomic.AddInt64(&c.store.TotalChunksRef, i)
}
+func (c *Context) AddIndexTotalChunkRefs(i int64) {
+ atomic.AddInt64(&c.index.TotalChunks, i)
+}
+
+func (c *Context) AddIndexPostFilterChunkRefs(i int64) {
+ atomic.AddInt64(&c.index.PostFilterChunks, i)
+}
+
// AddCacheEntriesFound counts the number of cache entries requested and found
func (c *Context) AddCacheEntriesFound(t CacheType, i int) {
stats := c.getCacheStatsByType(t)
diff --git a/pkg/loki/common/common.go b/pkg/loki/common/common.go
index b7bb08e2cd46e..f1a3c58055d0e 100644
--- a/pkg/loki/common/common.go
+++ b/pkg/loki/common/common.go
@@ -6,6 +6,7 @@ import (
"github.com/grafana/dskit/flagext"
"github.com/grafana/dskit/netutil"
+ "github.com/grafana/loki/v3/pkg/storage/bucket"
"github.com/grafana/loki/v3/pkg/storage/chunk/client/alibaba"
"github.com/grafana/loki/v3/pkg/storage/chunk/client/aws"
"github.com/grafana/loki/v3/pkg/storage/chunk/client/azure"
@@ -78,6 +79,7 @@ type Storage struct {
Hedging hedging.Config `yaml:"hedging"`
COS ibmcloud.COSConfig `yaml:"cos"`
CongestionControl congestion.Config `yaml:"congestion_control,omitempty"`
+ ObjectStore bucket.Config `yaml:"object_store" doc:"hidden"`
}
func (s *Storage) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
@@ -91,6 +93,8 @@ func (s *Storage) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
s.Hedging.RegisterFlagsWithPrefix(prefix, f)
s.COS.RegisterFlagsWithPrefix(prefix, f)
s.CongestionControl.RegisterFlagsWithPrefix(prefix, f)
+
+ s.ObjectStore.RegisterFlagsWithPrefix(prefix+"object-store.", f)
}
type FilesystemConfig struct {
diff --git a/pkg/loki/config_wrapper.go b/pkg/loki/config_wrapper.go
index 16d25c1ff5e89..a09792cd403ae 100644
--- a/pkg/loki/config_wrapper.go
+++ b/pkg/loki/config_wrapper.go
@@ -566,6 +566,12 @@ func applyStorageConfig(cfg, defaults *ConfigWrapper) error {
}
}
+ if !reflect.DeepEqual(cfg.Common.Storage.ObjectStore, defaults.StorageConfig.ObjectStore) {
+ applyConfig = func(r *ConfigWrapper) {
+ r.StorageConfig.ObjectStore = r.Common.Storage.ObjectStore
+ }
+ }
+
if configsFound > 1 {
return ErrTooManyStorageConfigs
}
diff --git a/pkg/loki/config_wrapper_test.go b/pkg/loki/config_wrapper_test.go
index 5e1ad00bec501..47d92cd8a92cb 100644
--- a/pkg/loki/config_wrapper_test.go
+++ b/pkg/loki/config_wrapper_test.go
@@ -736,7 +736,7 @@ ruler:
})
t.Run("explicit storage config provided via config file is preserved", func(t *testing.T) {
- specificRulerConfig := `common:
+ explicitStorageConfig := `common:
storage:
gcs:
bucket_name: foobar
@@ -749,7 +749,7 @@ storage_config:
access_key_id: abc123
secret_access_key: def789`
- config, defaults := testContext(specificRulerConfig, nil)
+ config, defaults := testContext(explicitStorageConfig, nil)
assert.Equal(t, "s3://foo-bucket", config.StorageConfig.AWSStorageConfig.S3Config.Endpoint)
assert.Equal(t, "us-east1", config.StorageConfig.AWSStorageConfig.S3Config.Region)
@@ -765,6 +765,43 @@ storage_config:
assert.EqualValues(t, defaults.Ruler.StoreConfig.S3, config.Ruler.StoreConfig.S3)
})
+ t.Run("when common object_store config is provided, storage_config and rulers should use it", func(t *testing.T) {
+ commonStorageConfig := `common:
+ storage:
+ object_store:
+ gcs:
+ bucket_name: foobar
+ chunk_buffer_size: 17`
+
+ config, _ := testContext(commonStorageConfig, nil)
+
+ assert.Equal(t, "foobar", config.StorageConfig.ObjectStore.GCS.BucketName)
+ assert.Equal(t, 17, config.StorageConfig.ObjectStore.GCS.ChunkBufferSize)
+
+ // TODO: common config should be set on ruler bucket config
+ })
+
+ t.Run("explicit thanos object storage config provided via config file is preserved", func(t *testing.T) {
+ explicitStorageConfig := `common:
+ storage:
+ object_store:
+ gcs:
+ bucket_name: foobar
+ chunk_buffer_size: 17
+storage_config:
+ object_store:
+ gcs:
+ bucket_name: barfoo
+ chunk_buffer_size: 27`
+
+ config, _ := testContext(explicitStorageConfig, nil)
+
+ assert.Equal(t, "barfoo", config.StorageConfig.ObjectStore.GCS.BucketName)
+ assert.Equal(t, 27, config.StorageConfig.ObjectStore.GCS.ChunkBufferSize)
+
+ // TODO: common config should be set on ruler bucket config
+ })
+
t.Run("named storage config provided via config file is preserved", func(t *testing.T) {
namedStoresConfig := `common:
storage:
diff --git a/pkg/loki/loki.go b/pkg/loki/loki.go
index f59218307e7d7..cd12d6654519b 100644
--- a/pkg/loki/loki.go
+++ b/pkg/loki/loki.go
@@ -92,6 +92,7 @@ type Config struct {
Frontend lokifrontend.Config `yaml:"frontend,omitempty"`
QueryRange queryrange.Config `yaml:"query_range,omitempty"`
Ruler ruler.Config `yaml:"ruler,omitempty"`
+ RulerStorage rulestore.Config `yaml:"ruler_storage,omitempty" doc:"hidden"`
IngesterClient ingester_client.Config `yaml:"ingester_client,omitempty"`
IngesterRF1Client ingester_client.Config `yaml:"ingester_rf1_client,omitempty"`
Ingester ingester.Config `yaml:"ingester,omitempty"`
@@ -179,6 +180,7 @@ func (c *Config) RegisterFlags(f *flag.FlagSet) {
c.TableManager.RegisterFlags(f)
c.Frontend.RegisterFlags(f)
c.Ruler.RegisterFlags(f)
+ c.RulerStorage.RegisterFlags(f)
c.Worker.RegisterFlags(f)
c.QueryRange.RegisterFlags(f)
c.RuntimeConfig.RegisterFlags(f)
@@ -262,6 +264,9 @@ func (c *Config) Validate() error {
if err := c.Ruler.Validate(); err != nil {
errs = append(errs, errors.Wrap(err, "CONFIG ERROR: invalid ruler config"))
}
+ if err := c.RulerStorage.Validate(); err != nil {
+ errs = append(errs, errors.Wrap(err, "CONFIG ERROR: invalid ruler_storage config"))
+ }
if err := c.Ingester.Validate(); err != nil {
errs = append(errs, errors.Wrap(err, "CONFIG ERROR: invalid ingester config"))
}
diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go
index 24e19d7c58e05..8e5c76e8828de 100644
--- a/pkg/loki/modules.go
+++ b/pkg/loki/modules.go
@@ -638,6 +638,7 @@ func (t *Loki) initPatternIngester() (_ services.Service, err error) {
t.Cfg.Pattern.LifecyclerConfig.ListenPort = t.Cfg.Server.GRPCListenPort
t.PatternIngester, err = pattern.New(
t.Cfg.Pattern,
+ t.Overrides,
t.PatternRingClient,
t.Cfg.MetricsNamespace,
prometheus.DefaultRegisterer,
@@ -728,8 +729,7 @@ func (t *Loki) initTableManager() (services.Service, error) {
}
reg := prometheus.WrapRegistererWith(prometheus.Labels{"component": "table-manager-store"}, prometheus.DefaultRegisterer)
-
- tableClient, err := storage.NewTableClient(lastConfig.IndexType, *lastConfig, t.Cfg.StorageConfig, t.ClientMetrics, reg, util_log.Logger)
+ tableClient, err := storage.NewTableClient(lastConfig.IndexType, "table-manager", *lastConfig, t.Cfg.StorageConfig, t.ClientMetrics, reg, util_log.Logger)
if err != nil {
return nil, err
}
@@ -1232,7 +1232,8 @@ func (t *Loki) initRulerStorage() (_ services.Service, err error) {
// to determine if it's unconfigured. the following check, however, correctly tests this.
// Single binary integration tests will break if this ever drifts
legacyReadMode := t.Cfg.LegacyReadTarget && t.Cfg.isTarget(Read)
- if (t.Cfg.isTarget(All) || legacyReadMode || t.Cfg.isTarget(Backend)) && t.Cfg.Ruler.StoreConfig.IsDefaults() {
+ storageNotConfigured := (t.Cfg.StorageConfig.UseThanosObjstore && t.Cfg.RulerStorage.IsDefaults()) || t.Cfg.Ruler.StoreConfig.IsDefaults()
+ if (t.Cfg.isTarget(All) || legacyReadMode || t.Cfg.isTarget(Backend)) && storageNotConfigured {
level.Info(util_log.Logger).Log("msg", "Ruler storage is not configured; ruler will not be started.")
return
}
@@ -1245,7 +1246,11 @@ func (t *Loki) initRulerStorage() (_ services.Service, err error) {
}
}
- t.RulerStorage, err = base_ruler.NewLegacyRuleStore(t.Cfg.Ruler.StoreConfig, t.Cfg.StorageConfig.Hedging, t.ClientMetrics, ruler.GroupLoader{}, util_log.Logger)
+ if t.Cfg.StorageConfig.UseThanosObjstore {
+ t.RulerStorage, err = base_ruler.NewRuleStore(context.Background(), t.Cfg.RulerStorage, t.Overrides, ruler.GroupLoader{}, util_log.Logger)
+ } else {
+ t.RulerStorage, err = base_ruler.NewLegacyRuleStore(t.Cfg.Ruler.StoreConfig, t.Cfg.StorageConfig.Hedging, t.ClientMetrics, ruler.GroupLoader{}, util_log.Logger)
+ }
return
}
@@ -1419,7 +1424,7 @@ func (t *Loki) initCompactor() (services.Service, error) {
continue
}
- objectClient, err := storage.NewObjectClient(periodConfig.ObjectType, t.Cfg.StorageConfig, t.ClientMetrics)
+ objectClient, err := storage.NewObjectClient(periodConfig.ObjectType, "compactor", t.Cfg.StorageConfig, t.ClientMetrics)
if err != nil {
return nil, fmt.Errorf("failed to create object client: %w", err)
}
@@ -1430,7 +1435,8 @@ func (t *Loki) initCompactor() (services.Service, error) {
var deleteRequestStoreClient client.ObjectClient
if t.Cfg.CompactorConfig.RetentionEnabled {
if deleteStore := t.Cfg.CompactorConfig.DeleteRequestStore; deleteStore != "" {
- if deleteRequestStoreClient, err = storage.NewObjectClient(deleteStore, t.Cfg.StorageConfig, t.ClientMetrics); err != nil {
+ deleteRequestStoreClient, err = storage.NewObjectClient(deleteStore, "delete-store", t.Cfg.StorageConfig, t.ClientMetrics)
+ if err != nil {
return nil, fmt.Errorf("failed to create delete request store object client: %w", err)
}
} else {
@@ -1495,7 +1501,7 @@ func (t *Loki) initIndexGateway() (services.Service, error) {
}
tableRange := period.GetIndexTableNumberRange(periodEndTime)
- indexClient, err := storage.NewIndexClient(period, tableRange, t.Cfg.StorageConfig, t.Cfg.SchemaConfig, t.Overrides, t.ClientMetrics, shardingStrategy,
+ indexClient, err := storage.NewIndexClient("index-store", period, tableRange, t.Cfg.StorageConfig, t.Cfg.SchemaConfig, t.Overrides, t.ClientMetrics, shardingStrategy,
prometheus.DefaultRegisterer, log.With(util_log.Logger, "index-store", fmt.Sprintf("%s-%s", period.IndexType, period.From.String())), t.Cfg.MetricsNamespace,
)
if err != nil {
@@ -1741,7 +1747,7 @@ func (t *Loki) initAnalytics() (services.Service, error) {
return nil, err
}
- objectClient, err := storage.NewObjectClient(period.ObjectType, t.Cfg.StorageConfig, t.ClientMetrics)
+ objectClient, err := storage.NewObjectClient(period.ObjectType, "analytics", t.Cfg.StorageConfig, t.ClientMetrics)
if err != nil {
level.Info(util_log.Logger).Log("msg", "failed to initialize usage report", "err", err)
return nil, nil
diff --git a/pkg/pattern/drain/drain.go b/pkg/pattern/drain/drain.go
index 9e6062432cc6a..fcfd017884fed 100644
--- a/pkg/pattern/drain/drain.go
+++ b/pkg/pattern/drain/drain.go
@@ -47,6 +47,10 @@ type Config struct {
MaxAllowedLineLength int
}
+type Limits interface {
+ PatternIngesterTokenizableJSONFields(userID string) []string
+}
+
func createLogClusterCache(maxSize int, onEvict func(int, *LogCluster)) *LogClusterCache {
if maxSize == 0 {
maxSize = math.MaxInt
@@ -135,7 +139,7 @@ func DefaultConfig() *Config {
}
}
-func New(config *Config, format string, metrics *Metrics) *Drain {
+func New(tenantID string, config *Config, limits Limits, format string, metrics *Metrics) *Drain {
if config.LogClusterDepth < 3 {
panic("depth argument must be at least 3")
}
@@ -153,7 +157,8 @@ func New(config *Config, format string, metrics *Metrics) *Drain {
var tokenizer LineTokenizer
switch format {
case FormatJSON:
- tokenizer = newJSONTokenizer(config.ParamString, config.MaxAllowedLineLength)
+ fieldsToTokenize := limits.PatternIngesterTokenizableJSONFields(tenantID)
+ tokenizer = newJSONTokenizer(config.ParamString, config.MaxAllowedLineLength, fieldsToTokenize)
case FormatLogfmt:
tokenizer = newLogfmtTokenizer(config.ParamString, config.MaxAllowedLineLength)
default:
diff --git a/pkg/pattern/drain/drain_benchmark_test.go b/pkg/pattern/drain/drain_benchmark_test.go
index e7c95f721ed4c..5313f10db396d 100644
--- a/pkg/pattern/drain/drain_benchmark_test.go
+++ b/pkg/pattern/drain/drain_benchmark_test.go
@@ -35,7 +35,7 @@ func BenchmarkDrain_TrainExtractsPatterns(b *testing.B) {
line := scanner.Text()
lines = append(lines, line)
}
- drain := New(DefaultConfig(), DetectLogFormat(lines[0]), nil)
+ drain := New("", DefaultConfig(), &fakeLimits{}, DetectLogFormat(lines[0]), nil)
b.ReportAllocs()
b.ResetTimer()
diff --git a/pkg/pattern/drain/drain_test.go b/pkg/pattern/drain/drain_test.go
index 9359feb8dd343..c2beda4b44d5f 100644
--- a/pkg/pattern/drain/drain_test.go
+++ b/pkg/pattern/drain/drain_test.go
@@ -14,12 +14,15 @@ import (
"github.com/grafana/loki/v3/pkg/logql/log/pattern"
)
+const (
+ testTenant = "fake"
+)
+
func TestDrain_TrainExtractsPatterns(t *testing.T) {
t.Parallel()
// Set this so the test will print the patterns found, in string slice format for easy copy-paste
outputPatternsForTestUpdate := false
-
tests := []struct {
drain *Drain
inputFile string
@@ -27,7 +30,7 @@ func TestDrain_TrainExtractsPatterns(t *testing.T) {
format string
}{
{
- drain: New(DefaultConfig(), "", nil),
+ drain: New(testTenant, DefaultConfig(), &fakeLimits{}, "", nil),
inputFile: `testdata/agent-logfmt.txt`,
format: FormatLogfmt,
patterns: []string{
@@ -56,7 +59,7 @@ func TestDrain_TrainExtractsPatterns(t *testing.T) {
},
},
{
- drain: New(DefaultConfig(), "", nil),
+ drain: New(testTenant, DefaultConfig(), &fakeLimits{}, "", nil),
inputFile: `testdata/ingester-logfmt.txt`,
format: FormatLogfmt,
patterns: []string{
@@ -66,7 +69,7 @@ func TestDrain_TrainExtractsPatterns(t *testing.T) {
},
},
{
- drain: New(DefaultConfig(), "", nil),
+ drain: New(testTenant, DefaultConfig(), &fakeLimits{}, "", nil),
inputFile: `testdata/drone-json.txt`,
format: FormatJSON,
patterns: []string{
@@ -79,7 +82,7 @@ func TestDrain_TrainExtractsPatterns(t *testing.T) {
},
},
{
- drain: New(DefaultConfig(), "", nil),
+ drain: New(testTenant, DefaultConfig(), &fakeLimits{}, "", nil),
inputFile: "testdata/distributor-logfmt.txt",
format: FormatLogfmt,
patterns: []string{
@@ -91,7 +94,7 @@ func TestDrain_TrainExtractsPatterns(t *testing.T) {
},
},
{
- drain: New(DefaultConfig(), "", nil),
+ drain: New(testTenant, DefaultConfig(), &fakeLimits{}, "", nil),
inputFile: "testdata/journald.txt",
format: FormatUnknown,
patterns: []string{
@@ -211,7 +214,7 @@ func TestDrain_TrainExtractsPatterns(t *testing.T) {
},
},
{
- drain: New(DefaultConfig(), "", nil),
+ drain: New(testTenant, DefaultConfig(), &fakeLimits{}, "", nil),
inputFile: "testdata/kafka.txt",
format: FormatUnknown,
patterns: []string{
@@ -232,7 +235,7 @@ func TestDrain_TrainExtractsPatterns(t *testing.T) {
},
},
{
- drain: New(DefaultConfig(), "", nil),
+ drain: New(testTenant, DefaultConfig(), &fakeLimits{}, "", nil),
inputFile: "testdata/kubernetes.txt",
format: FormatUnknown,
patterns: []string{
@@ -273,7 +276,7 @@ func TestDrain_TrainExtractsPatterns(t *testing.T) {
},
},
{
- drain: New(DefaultConfig(), "", nil),
+ drain: New(testTenant, DefaultConfig(), &fakeLimits{}, "", nil),
inputFile: "testdata/vault.txt",
format: FormatUnknown,
patterns: []string{
@@ -281,7 +284,7 @@ func TestDrain_TrainExtractsPatterns(t *testing.T) {
},
},
{
- drain: New(DefaultConfig(), "", nil),
+ drain: New(testTenant, DefaultConfig(), &fakeLimits{}, "", nil),
inputFile: "testdata/calico.txt",
format: FormatUnknown,
patterns: []string{
@@ -374,7 +377,7 @@ func TestDrain_TrainExtractsPatterns(t *testing.T) {
},
},
{
- drain: New(DefaultConfig(), "", nil),
+ drain: New(testTenant, DefaultConfig(), &fakeLimits{}, "", nil),
inputFile: "testdata/grafana-ruler.txt",
format: FormatLogfmt,
patterns: []string{
@@ -470,7 +473,7 @@ func TestDrain_TrainGeneratesPatternsMatchableByLokiPatternFilter(t *testing.T)
}{
{
name: "should extract patterns that all lines match",
- drain: New(DefaultConfig(), "", nil),
+ drain: New(testTenant, DefaultConfig(), &fakeLimits{}, "", nil),
inputLines: []string{
"test 1 test test",
"test 2 test test",
@@ -480,7 +483,7 @@ func TestDrain_TrainGeneratesPatternsMatchableByLokiPatternFilter(t *testing.T)
},
{
name: "should extract patterns that match if line ends with newlines",
- drain: New(DefaultConfig(), "", nil),
+ drain: New(testTenant, DefaultConfig(), &fakeLimits{}, "", nil),
inputLines: []string{
`test 1 test test
`,
@@ -494,7 +497,7 @@ func TestDrain_TrainGeneratesPatternsMatchableByLokiPatternFilter(t *testing.T)
},
{
name: "should extract patterns that match if line ends with empty space",
- drain: New(DefaultConfig(), "", nil),
+ drain: New(testTenant, DefaultConfig(), &fakeLimits{}, "", nil),
inputLines: []string{
`test 1 test test `,
`test 2 test test `,
@@ -504,7 +507,7 @@ func TestDrain_TrainGeneratesPatternsMatchableByLokiPatternFilter(t *testing.T)
},
{
name: "should extract patterns that match if line starts with empty space",
- drain: New(DefaultConfig(), "", nil),
+ drain: New(testTenant, DefaultConfig(), &fakeLimits{}, "", nil),
inputLines: []string{
` test 1 test test`,
` test 2 test test`,
@@ -514,7 +517,7 @@ func TestDrain_TrainGeneratesPatternsMatchableByLokiPatternFilter(t *testing.T)
},
{
name: "Scheduler patterns are matchable",
- drain: New(DefaultConfig(), "", nil),
+ drain: New(testTenant, DefaultConfig(), &fakeLimits{}, "", nil),
inputLines: []string{
`ts=2024-05-30T12:50:36.648377186Z caller=scheduler_processor.go:143 level=warn msg="error contacting scheduler" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: EOF\"" addr=10.0.151.101:9095`,
`ts=2024-05-30T12:50:36.350575929Z caller=scheduler_processor.go:143 level=warn msg="error contacting scheduler" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: EOF\"" addr=10.0.151.101:9095`,
@@ -611,7 +614,7 @@ func TestDrain_PruneTreeClearsOldBranches(t *testing.T) {
}{
{
name: "should prune old branches",
- drain: New(DefaultConfig(), "", nil),
+ drain: New(testTenant, DefaultConfig(), &fakeLimits{}, "", nil),
inputLines: []string{
"test test test A",
"test test test B",
@@ -665,3 +668,11 @@ func countNodes(node *Node) int {
}
return total
}
+
+type fakeLimits struct {
+ Limits
+}
+
+func (f *fakeLimits) PatternIngesterTokenizableJSONFields(_ string) []string {
+ return []string{"log", "message", "msg", "msg_", "_msg", "content"}
+}
diff --git a/pkg/pattern/drain/line_tokenizer.go b/pkg/pattern/drain/line_tokenizer.go
index 87b98afaea6de..4d758181399b4 100644
--- a/pkg/pattern/drain/line_tokenizer.go
+++ b/pkg/pattern/drain/line_tokenizer.go
@@ -263,17 +263,23 @@ func (t *logfmtTokenizer) Clone(tokens []string, _ interface{}) ([]string, inter
type jsonTokenizer struct {
*punctuationTokenizer
- varReplace string
- maxLineLength int
+ varReplace string
+ maxLineLength int
+ fieldsToTokenize []string
}
-func newJSONTokenizer(varReplace string, maxLineLength int) *jsonTokenizer {
- return &jsonTokenizer{newPunctuationTokenizer(maxLineLength), varReplace, maxLineLength}
+func newJSONTokenizer(varReplace string, maxLineLength int, fieldsToTokenize []string) *jsonTokenizer {
+ return &jsonTokenizer{
+ punctuationTokenizer: newPunctuationTokenizer(maxLineLength),
+ varReplace: varReplace,
+ maxLineLength: maxLineLength,
+ fieldsToTokenize: fieldsToTokenize,
+ }
}
func (t *jsonTokenizer) Tokenize(line string, tokens []string, state interface{}) ([]string, interface{}) {
var found []byte
- for _, key := range []string{"log", "message", "msg", "msg_", "_msg", "content"} {
+ for _, key := range t.fieldsToTokenize {
msg, ty, _, err := jsonparser.Get(unsafeBytes(line), key)
if err == nil && ty == jsonparser.String {
found = msg
diff --git a/pkg/pattern/drain/line_tokenizer_test.go b/pkg/pattern/drain/line_tokenizer_test.go
index f825a8d86bbc6..a2c8013b14c3b 100644
--- a/pkg/pattern/drain/line_tokenizer_test.go
+++ b/pkg/pattern/drain/line_tokenizer_test.go
@@ -325,7 +325,8 @@ func TestJsonTokenizer(t *testing.T) {
},
}
- tokenizer := newJSONTokenizer(param, DefaultConfig().MaxAllowedLineLength)
+ fieldsToTokenize := []string{"log", "message", "msg", "msg_", "_msg", "content"}
+ tokenizer := newJSONTokenizer(param, DefaultConfig().MaxAllowedLineLength, fieldsToTokenize)
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
diff --git a/pkg/pattern/flush_test.go b/pkg/pattern/flush_test.go
index be6a8f3253335..318492f3406cd 100644
--- a/pkg/pattern/flush_test.go
+++ b/pkg/pattern/flush_test.go
@@ -41,7 +41,7 @@ func TestSweepInstance(t *testing.T) {
ring: fakeRing,
}
- ing, err := New(defaultIngesterTestConfig(t), ringClient, "foo", nil, log.NewNopLogger())
+ ing, err := New(defaultIngesterTestConfig(t), &fakeLimits{}, ringClient, "foo", nil, log.NewNopLogger())
require.NoError(t, err)
defer services.StopAndAwaitTerminated(context.Background(), ing) //nolint:errcheck
err = services.StartAndAwaitRunning(context.Background(), ing)
diff --git a/pkg/pattern/ingester.go b/pkg/pattern/ingester.go
index 3c1bb55b76804..60c71920b7d19 100644
--- a/pkg/pattern/ingester.go
+++ b/pkg/pattern/ingester.go
@@ -148,6 +148,10 @@ func (cfg *Config) Validate() error {
return cfg.LifecyclerConfig.Validate()
}
+type Limits interface {
+ drain.Limits
+}
+
type Ingester struct {
services.Service
lifecycler *ring.Lifecycler
@@ -156,6 +160,7 @@ type Ingester struct {
lifecyclerWatcher *services.FailureWatcher
cfg Config
+ limits Limits
registerer prometheus.Registerer
logger log.Logger
@@ -175,6 +180,7 @@ type Ingester struct {
func New(
cfg Config,
+ limits Limits,
ringClient RingClient,
metricsNamespace string,
registerer prometheus.Registerer,
@@ -189,6 +195,7 @@ func New(
i := &Ingester{
cfg: cfg,
+ limits: limits,
ringClient: ringClient,
logger: log.With(logger, "component", "pattern-ingester"),
registerer: registerer,
@@ -416,6 +423,7 @@ func (i *Ingester) GetOrCreateInstance(instanceID string) (*instance, error) { /
i.logger,
i.metrics,
i.drainCfg,
+ i.limits,
i.ringClient,
i.lifecycler.ID,
writer,
diff --git a/pkg/pattern/ingester_test.go b/pkg/pattern/ingester_test.go
index a5dd5cdbaaed4..effa1c1959437 100644
--- a/pkg/pattern/ingester_test.go
+++ b/pkg/pattern/ingester_test.go
@@ -54,6 +54,7 @@ func TestInstancePushQuery(t *testing.T) {
log.NewNopLogger(),
newIngesterMetrics(nil, "test"),
drain.DefaultConfig(),
+ &fakeLimits{},
ringClient,
ingesterID,
mockWriter,
@@ -141,6 +142,7 @@ func TestInstancePushAggregateMetrics(t *testing.T) {
log.NewNopLogger(),
newIngesterMetrics(nil, "test"),
drain.DefaultConfig(),
+ &fakeLimits{},
ringClient,
ingesterID,
mockWriter,
@@ -336,3 +338,11 @@ func (m *mockEntryWriter) WriteEntry(ts time.Time, entry string, lbls labels.Lab
func (m *mockEntryWriter) Stop() {
_ = m.Called()
}
+
+type fakeLimits struct {
+ Limits
+}
+
+func (f *fakeLimits) PatternIngesterTokenizableJSONFields(_ string) []string {
+ return []string{"log", "message", "msg", "msg_", "_msg", "content"}
+}
diff --git a/pkg/pattern/instance.go b/pkg/pattern/instance.go
index 6e3a3de998be5..46c355a9bbcfc 100644
--- a/pkg/pattern/instance.go
+++ b/pkg/pattern/instance.go
@@ -32,16 +32,17 @@ const indexShards = 32
// instance is a tenant instance of the pattern ingester.
type instance struct {
- instanceID string
- buf []byte // buffer used to compute fps.
- mapper *ingester.FpMapper // using of mapper no longer needs mutex because reading from streams is lock-free
- streams *streamsMap
- index *index.BitPrefixInvertedIndex
- logger log.Logger
- metrics *ingesterMetrics
- drainCfg *drain.Config
- ringClient RingClient
- ingesterID string
+ instanceID string
+ buf []byte // buffer used to compute fps.
+ mapper *ingester.FpMapper // using of mapper no longer needs mutex because reading from streams is lock-free
+ streams *streamsMap
+ index *index.BitPrefixInvertedIndex
+ logger log.Logger
+ metrics *ingesterMetrics
+ drainCfg *drain.Config
+ drainLimits drain.Limits
+ ringClient RingClient
+ ingesterID string
aggMetricsLock sync.Mutex
aggMetricsByStreamAndLevel map[string]map[string]*aggregatedMetrics
@@ -59,6 +60,7 @@ func newInstance(
logger log.Logger,
metrics *ingesterMetrics,
drainCfg *drain.Config,
+ drainLimits drain.Limits,
ringClient RingClient,
ingesterID string,
writer aggregation.EntryWriter,
@@ -75,6 +77,7 @@ func newInstance(
index: index,
metrics: metrics,
drainCfg: drainCfg,
+ drainLimits: drainLimits,
ringClient: ringClient,
ingesterID: ingesterID,
aggMetricsByStreamAndLevel: make(map[string]map[string]*aggregatedMetrics),
@@ -220,7 +223,7 @@ func (i *instance) createStream(_ context.Context, pushReqStream logproto.Stream
fp := i.getHashForLabels(labels)
sortedLabels := i.index.Add(logproto.FromLabelsToLabelAdapters(labels), fp)
firstEntryLine := pushReqStream.Entries[0].Line
- s, err := newStream(fp, sortedLabels, i.metrics, i.logger, drain.DetectLogFormat(firstEntryLine), i.instanceID, i.drainCfg)
+ s, err := newStream(fp, sortedLabels, i.metrics, i.logger, drain.DetectLogFormat(firstEntryLine), i.instanceID, i.drainCfg, i.drainLimits)
if err != nil {
return nil, fmt.Errorf("failed to create stream: %w", err)
}
diff --git a/pkg/pattern/stream.go b/pkg/pattern/stream.go
index 9452def376827..7f53c5777cfeb 100644
--- a/pkg/pattern/stream.go
+++ b/pkg/pattern/stream.go
@@ -35,6 +35,7 @@ func newStream(
guessedFormat string,
instanceID string,
drainCfg *drain.Config,
+ drainLimits drain.Limits,
) (*stream, error) {
return &stream{
fp: fp,
@@ -42,7 +43,7 @@ func newStream(
labelsString: labels.String(),
labelHash: labels.Hash(),
logger: logger,
- patterns: drain.New(drainCfg, guessedFormat, &drain.Metrics{
+ patterns: drain.New(instanceID, drainCfg, drainLimits, guessedFormat, &drain.Metrics{
PatternsEvictedTotal: metrics.patternsDiscardedTotal.WithLabelValues(instanceID, guessedFormat, "false"),
PatternsPrunedTotal: metrics.patternsDiscardedTotal.WithLabelValues(instanceID, guessedFormat, "true"),
PatternsDetectedTotal: metrics.patternsDetectedTotal.WithLabelValues(instanceID, guessedFormat),
diff --git a/pkg/pattern/stream_test.go b/pkg/pattern/stream_test.go
index 201a5566e728f..adf18af33972d 100644
--- a/pkg/pattern/stream_test.go
+++ b/pkg/pattern/stream_test.go
@@ -18,15 +18,7 @@ import (
func TestAddStream(t *testing.T) {
lbs := labels.New(labels.Label{Name: "test", Value: "test"})
- stream, err := newStream(
- model.Fingerprint(lbs.Hash()),
- lbs,
- newIngesterMetrics(nil, "test"),
- log.NewNopLogger(),
- drain.FormatUnknown,
- "123",
- drain.DefaultConfig(),
- )
+ stream, err := newStream(model.Fingerprint(lbs.Hash()), lbs, newIngesterMetrics(nil, "test"), log.NewNopLogger(), drain.FormatUnknown, "123", drain.DefaultConfig(), &fakeLimits{})
require.NoError(t, err)
err = stream.Push(context.Background(), []push.Entry{
@@ -54,15 +46,7 @@ func TestAddStream(t *testing.T) {
func TestPruneStream(t *testing.T) {
lbs := labels.New(labels.Label{Name: "test", Value: "test"})
- stream, err := newStream(
- model.Fingerprint(lbs.Hash()),
- lbs,
- newIngesterMetrics(nil, "test"),
- log.NewNopLogger(),
- drain.FormatUnknown,
- "123",
- drain.DefaultConfig(),
- )
+ stream, err := newStream(model.Fingerprint(lbs.Hash()), lbs, newIngesterMetrics(nil, "test"), log.NewNopLogger(), drain.FormatUnknown, "123", drain.DefaultConfig(), &fakeLimits{})
require.NoError(t, err)
err = stream.Push(context.Background(), []push.Entry{
diff --git a/pkg/ruler/base/storage.go b/pkg/ruler/base/storage.go
index c2548f0e46830..068718f5491a6 100644
--- a/pkg/ruler/base/storage.go
+++ b/pkg/ruler/base/storage.go
@@ -7,13 +7,10 @@ import (
"github.com/go-kit/log"
"github.com/pkg/errors"
- "github.com/prometheus/client_golang/prometheus"
promRules "github.com/prometheus/prometheus/rules"
- configClient "github.com/grafana/loki/v3/pkg/configs/client"
"github.com/grafana/loki/v3/pkg/ruler/rulestore"
"github.com/grafana/loki/v3/pkg/ruler/rulestore/bucketclient"
- "github.com/grafana/loki/v3/pkg/ruler/rulestore/configdb"
"github.com/grafana/loki/v3/pkg/ruler/rulestore/local"
"github.com/grafana/loki/v3/pkg/ruler/rulestore/objectclient"
"github.com/grafana/loki/v3/pkg/storage"
@@ -30,7 +27,6 @@ import (
)
// RuleStoreConfig configures a rule store.
-// TODO remove this legacy config in Cortex 1.11.
type RuleStoreConfig struct {
Type string `yaml:"type"`
@@ -123,29 +119,18 @@ func NewLegacyRuleStore(cfg RuleStoreConfig, hedgeCfg hedging.Config, clientMetr
}
// NewRuleStore returns a rule store backend client based on the provided cfg.
-func NewRuleStore(ctx context.Context, cfg rulestore.Config, cfgProvider bucket.TenantConfigProvider, loader promRules.GroupLoader, logger log.Logger, reg prometheus.Registerer) (rulestore.RuleStore, error) {
- if cfg.Backend == configdb.Name {
- c, err := configClient.New(cfg.ConfigDB)
- if err != nil {
- return nil, err
- }
-
- return configdb.NewConfigRuleStore(c), nil
- }
-
+func NewRuleStore(ctx context.Context, cfg rulestore.Config, cfgProvider bucket.SSEConfigProvider, loader promRules.GroupLoader, logger log.Logger) (rulestore.RuleStore, error) {
if cfg.Backend == local.Name {
+ if loader == nil {
+ loader = promRules.FileLoader{}
+ }
return local.NewLocalRulesClient(cfg.Local, loader)
}
- bucketClient, err := bucket.NewClient(ctx, cfg.Config, "ruler-storage", logger, reg)
- if err != nil {
- return nil, err
- }
-
- store := bucketclient.NewBucketRuleStore(bucketClient, cfgProvider, logger)
+ bucketClient, err := bucket.NewClient(ctx, cfg.Backend, cfg.Config, "ruler-storage", logger)
if err != nil {
return nil, err
}
- return store, nil
+ return bucketclient.NewBucketRuleStore(bucketClient, cfgProvider, logger), nil
}
diff --git a/pkg/ruler/rulestore/bucketclient/bucket_client.go b/pkg/ruler/rulestore/bucketclient/bucket_client.go
index a39a8b03532da..89ad69f2e3c62 100644
--- a/pkg/ruler/rulestore/bucketclient/bucket_client.go
+++ b/pkg/ruler/rulestore/bucketclient/bucket_client.go
@@ -38,11 +38,11 @@ var (
// using the Thanos objstore.Bucket interface
type BucketRuleStore struct {
bucket objstore.Bucket
- cfgProvider bucket.TenantConfigProvider
+ cfgProvider bucket.SSEConfigProvider
logger log.Logger
}
-func NewBucketRuleStore(bkt objstore.Bucket, cfgProvider bucket.TenantConfigProvider, logger log.Logger) *BucketRuleStore {
+func NewBucketRuleStore(bkt objstore.Bucket, cfgProvider bucket.SSEConfigProvider, logger log.Logger) *BucketRuleStore {
return &BucketRuleStore{
bucket: bucket.NewPrefixedBucketClient(bkt, rulesPrefix),
cfgProvider: cfgProvider,
diff --git a/pkg/ruler/rulestore/config.go b/pkg/ruler/rulestore/config.go
index 1f0602424cdb9..334e43de0917d 100644
--- a/pkg/ruler/rulestore/config.go
+++ b/pkg/ruler/rulestore/config.go
@@ -6,8 +6,6 @@ import (
"github.com/grafana/dskit/flagext"
- "github.com/grafana/loki/v3/pkg/configs/client"
- "github.com/grafana/loki/v3/pkg/ruler/rulestore/configdb"
"github.com/grafana/loki/v3/pkg/ruler/rulestore/local"
"github.com/grafana/loki/v3/pkg/storage/bucket"
)
@@ -15,17 +13,17 @@ import (
// Config configures a rule store.
type Config struct {
bucket.Config `yaml:",inline"`
- ConfigDB client.Config `yaml:"configdb"`
- Local local.Config `yaml:"local"`
+ Backend string `yaml:"backend"`
+ Local local.Config `yaml:"local"`
}
// RegisterFlags registers the backend storage config.
func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
prefix := "ruler-storage."
- cfg.ExtraBackends = []string{configdb.Name, local.Name}
- cfg.ConfigDB.RegisterFlagsWithPrefix(prefix, f)
+ cfg.ExtraBackends = []string{local.Name}
cfg.Local.RegisterFlagsWithPrefix(prefix, f)
+ f.StringVar(&cfg.Backend, prefix+"backend", "filesystem", "Backend storage to use. Supported backends are: s3, gcs, azure, swift, filesystem.")
cfg.RegisterFlagsWithPrefix(prefix, f)
}
diff --git a/pkg/ruler/rulestore/configdb/store.go b/pkg/ruler/rulestore/configdb/store.go
deleted file mode 100644
index e4a0526386fe4..0000000000000
--- a/pkg/ruler/rulestore/configdb/store.go
+++ /dev/null
@@ -1,135 +0,0 @@
-package configdb
-
-import (
- "context"
- "errors"
-
- "github.com/grafana/loki/v3/pkg/configs/client"
- "github.com/grafana/loki/v3/pkg/configs/userconfig"
- "github.com/grafana/loki/v3/pkg/ruler/rulespb"
-)
-
-const (
- Name = "configdb"
-)
-
-// ConfigRuleStore is a concrete implementation of RuleStore that sources rules from the config service
-type ConfigRuleStore struct {
- configClient client.Client
- since userconfig.ID
- ruleGroupList map[string]rulespb.RuleGroupList
-}
-
-func (c *ConfigRuleStore) SupportsModifications() bool {
- return false
-}
-
-// NewConfigRuleStore constructs a ConfigRuleStore
-func NewConfigRuleStore(c client.Client) *ConfigRuleStore {
- return &ConfigRuleStore{
- configClient: c,
- since: 0,
- ruleGroupList: make(map[string]rulespb.RuleGroupList),
- }
-}
-
-func (c *ConfigRuleStore) ListAllUsers(ctx context.Context) ([]string, error) {
- m, err := c.ListAllRuleGroups(ctx)
-
- result := make([]string, 0, len(m))
- for u := range m {
- result = append(result, u)
- }
-
- return result, err
-}
-
-// ListAllRuleGroups implements RuleStore
-func (c *ConfigRuleStore) ListAllRuleGroups(ctx context.Context) (map[string]rulespb.RuleGroupList, error) {
- configs, err := c.configClient.GetRules(ctx, c.since)
-
- if err != nil {
- return nil, err
- }
-
- for user, cfg := range configs {
- userRules := rulespb.RuleGroupList{}
- if cfg.IsDeleted() {
- delete(c.ruleGroupList, user)
- continue
- }
- rMap, err := cfg.Config.ParseFormatted()
- if err != nil {
- return nil, err
- }
- for file, rgs := range rMap {
- for _, rg := range rgs.Groups {
- userRules = append(userRules, rulespb.ToProto(user, file, rg))
- }
- }
- c.ruleGroupList[user] = userRules
- }
-
- c.since = getLatestConfigID(configs, c.since)
-
- return c.ruleGroupList, nil
-}
-
-// getLatestConfigID gets the latest configs ID.
-// max [latest, max (map getID cfgs)]
-func getLatestConfigID(cfgs map[string]userconfig.VersionedRulesConfig, latest userconfig.ID) userconfig.ID {
- ret := latest
- for _, config := range cfgs {
- if config.ID > ret {
- ret = config.ID
- }
- }
- return ret
-}
-
-func (c *ConfigRuleStore) ListRuleGroupsForUserAndNamespace(ctx context.Context, userID string, namespace string) (rulespb.RuleGroupList, error) {
- r, err := c.ListAllRuleGroups(ctx)
- if err != nil {
- return nil, err
- }
-
- if namespace == "" {
- return r[userID], nil
- }
-
- list := r[userID]
- for ix := 0; ix < len(list); {
- if list[ix].GetNamespace() != namespace {
- list = append(list[:ix], list[ix+1:]...)
- } else {
- ix++
- }
- }
-
- return list, nil
-}
-
-func (c *ConfigRuleStore) LoadRuleGroups(_ context.Context, _ map[string]rulespb.RuleGroupList) error {
- // Since ConfigRuleStore already Loads the rules in the List methods, there is nothing left to do here.
- return nil
-}
-
-// GetRuleGroup is not implemented
-func (c *ConfigRuleStore) GetRuleGroup(_ context.Context, _, _, _ string) (*rulespb.RuleGroupDesc, error) {
- return nil, errors.New("not implemented by the config service rule store")
-}
-
-// SetRuleGroup is not implemented
-func (c *ConfigRuleStore) SetRuleGroup(_ context.Context, _, _ string, _ *rulespb.RuleGroupDesc) error {
- return errors.New("not implemented by the config service rule store")
-}
-
-// DeleteRuleGroup is not implemented
-func (c *ConfigRuleStore) DeleteRuleGroup(_ context.Context, _, _ string, _ string) error {
- return errors.New("not implemented by the config service rule store")
-}
-
-// DeleteNamespace is not implemented
-func (c *ConfigRuleStore) DeleteNamespace(_ context.Context, _, _ string) error {
- return errors.New("not implemented by the config service rule store")
-}
diff --git a/pkg/ruler/rulestore/configdb/store_test.go b/pkg/ruler/rulestore/configdb/store_test.go
deleted file mode 100644
index 4d39581cb6492..0000000000000
--- a/pkg/ruler/rulestore/configdb/store_test.go
+++ /dev/null
@@ -1,184 +0,0 @@
-package configdb
-
-import (
- "context"
- fmt "fmt"
- "testing"
- time "time"
-
- "github.com/stretchr/testify/assert"
-
- "github.com/grafana/loki/v3/pkg/configs/client"
- "github.com/grafana/loki/v3/pkg/configs/userconfig"
-)
-
-var zeroTime time.Time
-
-type MockClient struct {
- cfgs map[string]userconfig.VersionedRulesConfig
- err error
-}
-
-func (c *MockClient) GetRules(_ context.Context, _ userconfig.ID) (map[string]userconfig.VersionedRulesConfig, error) {
- return c.cfgs, c.err
-}
-
-func (c *MockClient) GetAlerts(_ context.Context, _ userconfig.ID) (*client.ConfigsResponse, error) {
- return nil, nil
-}
-
-func Test_ConfigRuleStoreError(t *testing.T) {
- mock := &MockClient{
- cfgs: nil,
- err: fmt.Errorf("Error"),
- }
-
- store := NewConfigRuleStore(mock)
- _, err := store.ListAllRuleGroups(context.Background())
-
- assert.Equal(t, mock.err, err, "Unexpected error returned")
-}
-
-func Test_ConfigRuleStoreReturn(t *testing.T) {
- id := userconfig.ID(10)
- mock := &MockClient{
- cfgs: map[string]userconfig.VersionedRulesConfig{
- "user": {
- ID: id,
- Config: fakeRuleConfig(),
- DeletedAt: zeroTime,
- },
- },
- err: nil,
- }
-
- store := NewConfigRuleStore(mock)
- rules, _ := store.ListAllRuleGroups(context.Background())
-
- assert.Equal(t, 1, len(rules["user"]))
- assert.Equal(t, id, store.since)
-}
-
-func Test_ConfigRuleStoreDelete(t *testing.T) {
- mock := &MockClient{
- cfgs: map[string]userconfig.VersionedRulesConfig{
- "user": {
- ID: 1,
- Config: fakeRuleConfig(),
- DeletedAt: zeroTime,
- },
- },
- err: nil,
- }
-
- store := NewConfigRuleStore(mock)
- _, _ = store.ListAllRuleGroups(context.Background())
-
- mock.cfgs["user"] = userconfig.VersionedRulesConfig{
- ID: 1,
- Config: userconfig.RulesConfig{},
- DeletedAt: time.Unix(0, 1),
- }
-
- rules, _ := store.ListAllRuleGroups(context.Background())
-
- assert.Equal(t, 0, len(rules["user"]))
-}
-
-func Test_ConfigRuleStoreAppend(t *testing.T) {
- mock := &MockClient{
- cfgs: map[string]userconfig.VersionedRulesConfig{
- "user": {
- ID: 1,
- Config: fakeRuleConfig(),
- DeletedAt: zeroTime,
- },
- },
- err: nil,
- }
-
- store := NewConfigRuleStore(mock)
- _, _ = store.ListAllRuleGroups(context.Background())
-
- delete(mock.cfgs, "user")
- mock.cfgs["user2"] = userconfig.VersionedRulesConfig{
- ID: 1,
- Config: fakeRuleConfig(),
- DeletedAt: zeroTime,
- }
-
- rules, _ := store.ListAllRuleGroups(context.Background())
-
- assert.Equal(t, 2, len(rules))
-}
-
-func Test_ConfigRuleStoreSinceSet(t *testing.T) {
- mock := &MockClient{
- cfgs: map[string]userconfig.VersionedRulesConfig{
- "user": {
- ID: 1,
- Config: fakeRuleConfig(),
- DeletedAt: zeroTime,
- },
- "user1": {
- ID: 10,
- Config: fakeRuleConfig(),
- DeletedAt: zeroTime,
- },
- "user2": {
- ID: 100,
- Config: fakeRuleConfig(),
- DeletedAt: zeroTime,
- },
- },
- err: nil,
- }
-
- store := NewConfigRuleStore(mock)
- _, _ = store.ListAllRuleGroups(context.Background())
- assert.Equal(t, userconfig.ID(100), store.since)
-
- delete(mock.cfgs, "user")
- delete(mock.cfgs, "user1")
- mock.cfgs["user2"] = userconfig.VersionedRulesConfig{
- ID: 50,
- Config: fakeRuleConfig(),
- DeletedAt: zeroTime,
- }
-
- _, _ = store.ListAllRuleGroups(context.Background())
- assert.Equal(t, userconfig.ID(100), store.since)
-
- mock.cfgs["user2"] = userconfig.VersionedRulesConfig{
- ID: 101,
- Config: fakeRuleConfig(),
- DeletedAt: zeroTime,
- }
-
- _, _ = store.ListAllRuleGroups(context.Background())
- assert.Equal(t, userconfig.ID(101), store.since)
-}
-
-func fakeRuleConfig() userconfig.RulesConfig {
- return userconfig.RulesConfig{
- FormatVersion: userconfig.RuleFormatV2,
- Files: map[string]string{
- "test": `
-# Config no. 1.
-groups:
-- name: example
- rules:
- - alert: ScrapeFailed
- expr: 'up != 1'
- for: 10m
- labels:
- severity: warning
- annotations:
- summary: "Scrape of {{$labels.job}} (pod: {{$labels.instance}}) failed."
- description: "Prometheus cannot reach the /metrics page on the {{$labels.instance}} pod."
- impact: "We have no monitoring data for {{$labels.job}} - {{$labels.instance}}. At worst, it's completely down. At best, we cannot reliably respond to operational issues."
- dashboardURL: "$${base_url}/admin/prometheus/targets"
-`,
- },
- }
-}
diff --git a/pkg/storage/async_store.go b/pkg/storage/async_store.go
index ffc8779328ab3..5d9752f8813ea 100644
--- a/pkg/storage/async_store.go
+++ b/pkg/storage/async_store.go
@@ -372,15 +372,8 @@ func mergeShardsFromIngestersAndStore(
shards := sharding.LinearShards(int(totalBytes/targetBytesPerShard), totalBytes)
- // increment the total chunks by the number seen from ingesters
- // NB(owen-d): this isn't perfect as it mixes signals a bit by joining
- // store chunks which _could_ possibly be filtered with ingester chunks which can't,
- // but it's still directionally helpful
- updatedStats := storeResp.Statistics
- updatedStats.Index.TotalChunks += int64(statsResp.Chunks)
return &logproto.ShardsResponse{
- Shards: shards,
- Statistics: updatedStats,
+ Shards: shards,
// explicitly nil chunkgroups when we've changed the shards+included chunkrefs from ingesters
ChunkGroups: nil,
}
diff --git a/pkg/storage/async_store_test.go b/pkg/storage/async_store_test.go
index 9cf80868c861d..fc0507b73eb2e 100644
--- a/pkg/storage/async_store_test.go
+++ b/pkg/storage/async_store_test.go
@@ -6,15 +6,12 @@ import (
"time"
"github.com/go-kit/log"
-
- "github.com/grafana/loki/v3/pkg/logproto"
- "github.com/grafana/loki/v3/pkg/logqlmodel/stats"
-
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/labels"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
+ "github.com/grafana/loki/v3/pkg/logproto"
"github.com/grafana/loki/v3/pkg/storage/chunk"
"github.com/grafana/loki/v3/pkg/storage/chunk/fetcher"
"github.com/grafana/loki/v3/pkg/storage/config"
@@ -374,14 +371,9 @@ func TestMergeShardsFromIngestersAndStore(t *testing.T) {
}
// creates n shards with bytesPerShard * n bytes and chks chunks
- mkShards := func(n int, bytesPerShard uint64, chks int64) logproto.ShardsResponse {
+ mkShards := func(n int, bytesPerShard uint64) logproto.ShardsResponse {
return logproto.ShardsResponse{
Shards: sharding.LinearShards(n, bytesPerShard*uint64(n)),
- Statistics: stats.Result{
- Index: stats.Index{
- TotalChunks: chks,
- },
- },
}
}
@@ -396,32 +388,32 @@ func TestMergeShardsFromIngestersAndStore(t *testing.T) {
{
desc: "zero bytes returns one full shard",
ingester: mkStats(0, 0),
- store: mkShards(0, 0, 0),
- exp: mkShards(1, 0, 0),
+ store: mkShards(0, 0),
+ exp: mkShards(1, 0),
},
{
desc: "zero ingester bytes honors store",
ingester: mkStats(0, 0),
- store: mkShards(10, uint64(targetBytesPerShard), 10),
- exp: mkShards(10, uint64(targetBytesPerShard), 10),
+ store: mkShards(10, uint64(targetBytesPerShard)),
+ exp: mkShards(10, uint64(targetBytesPerShard)),
},
{
desc: "zero store bytes honors ingester",
ingester: mkStats(uint64(targetBytesPerShard*10), 10),
- store: mkShards(0, 0, 0),
- exp: mkShards(10, uint64(targetBytesPerShard), 10),
+ store: mkShards(0, 0),
+ exp: mkShards(10, uint64(targetBytesPerShard)),
},
{
desc: "ingester bytes below threshold ignored",
- ingester: mkStats(uint64(targetBytesPerShard*2), 10), // 2 shards worth from ingesters
- store: mkShards(10, uint64(targetBytesPerShard), 10), // 10 shards worth from store
- exp: mkShards(10, uint64(targetBytesPerShard), 10), // use the store's resp
+ ingester: mkStats(uint64(targetBytesPerShard*2), 10), // 2 shards worth from ingesters
+ store: mkShards(10, uint64(targetBytesPerShard)), // 10 shards worth from store
+ exp: mkShards(10, uint64(targetBytesPerShard)), // use the store's resp
},
{
desc: "ingester bytes above threshold recreate shards",
- ingester: mkStats(uint64(targetBytesPerShard*4), 10), // 4 shards worth from ingesters
- store: mkShards(10, uint64(targetBytesPerShard), 10), // 10 shards worth from store
- exp: mkShards(14, uint64(targetBytesPerShard), 20), // regenerate 14 shards
+ ingester: mkStats(uint64(targetBytesPerShard*4), 10), // 4 shards worth from ingesters
+ store: mkShards(10, uint64(targetBytesPerShard)), // 10 shards worth from store
+ exp: mkShards(14, uint64(targetBytesPerShard)), // regenerate 14 shards
},
} {
@@ -434,7 +426,6 @@ func TestMergeShardsFromIngestersAndStore(t *testing.T) {
)
require.Equal(t, tc.exp.Statistics, got.Statistics)
require.Equal(t, tc.exp.ChunkGroups, got.ChunkGroups)
- require.Equal(t, tc.exp.Statistics.Index.TotalChunks, got.Statistics.Index.TotalChunks)
for i, shard := range tc.exp.Shards {
require.Equal(t, shard, got.Shards[i], "shard %d", i)
}
diff --git a/pkg/storage/bloom/v1/archive_test.go b/pkg/storage/bloom/v1/archive_test.go
index f91039cac3691..b000a09ac9d78 100644
--- a/pkg/storage/bloom/v1/archive_test.go
+++ b/pkg/storage/bloom/v1/archive_test.go
@@ -22,10 +22,7 @@ func TestArchive(t *testing.T) {
builder, err := NewBlockBuilder(
BlockOptions{
- Schema: Schema{
- version: CurrentSchemaVersion,
- encoding: compression.None,
- },
+ Schema: NewSchema(CurrentSchemaVersion, compression.None),
SeriesPageSize: 100,
BloomPageSize: 10 << 10,
},
@@ -33,7 +30,7 @@ func TestArchive(t *testing.T) {
)
require.Nil(t, err)
- itr := v2.NewSliceIter[SeriesWithBlooms](data)
+ itr := v2.NewSliceIter(data)
_, err = builder.BuildFrom(itr)
require.Nil(t, err)
@@ -104,10 +101,7 @@ func TestArchiveCompression(t *testing.T) {
builder, err := NewBlockBuilder(
BlockOptions{
- Schema: Schema{
- version: CurrentSchemaVersion,
- encoding: compression.None,
- },
+ Schema: NewSchema(CurrentSchemaVersion, compression.None),
SeriesPageSize: 100,
BloomPageSize: 10 << 10,
},
@@ -115,7 +109,7 @@ func TestArchiveCompression(t *testing.T) {
)
require.Nil(t, err)
- itr := v2.NewSliceIter[SeriesWithBlooms](data)
+ itr := v2.NewSliceIter(data)
_, err = builder.BuildFrom(itr)
require.Nil(t, err)
diff --git a/pkg/storage/bloom/v1/block.go b/pkg/storage/bloom/v1/block.go
index c309cb7fec29c..7ae55ae222c23 100644
--- a/pkg/storage/bloom/v1/block.go
+++ b/pkg/storage/bloom/v1/block.go
@@ -137,7 +137,7 @@ func (bq *BlockQuerier) Schema() (Schema, error) {
func (bq *BlockQuerier) Reset() error {
bq.blooms.Reset()
- return bq.LazySeriesIter.Seek(0)
+ return bq.Seek(0)
}
func (bq *BlockQuerier) Err() error {
diff --git a/pkg/storage/bloom/v1/block_writer.go b/pkg/storage/bloom/v1/block_writer.go
index a50c2f81e4b8a..483a5425640c8 100644
--- a/pkg/storage/bloom/v1/block_writer.go
+++ b/pkg/storage/bloom/v1/block_writer.go
@@ -14,7 +14,6 @@ import (
)
const (
- FileMode = 0644
BloomFileName = "bloom"
SeriesFileName = "series"
)
diff --git a/pkg/storage/bloom/v1/bloom.go b/pkg/storage/bloom/v1/bloom.go
index 82c85bd9f441b..da283794923e4 100644
--- a/pkg/storage/bloom/v1/bloom.go
+++ b/pkg/storage/bloom/v1/bloom.go
@@ -47,18 +47,6 @@ func (b *Bloom) Encode(enc *encoding.Encbuf) error {
return nil
}
-func (b *Bloom) DecodeCopy(dec *encoding.Decbuf) error {
- ln := dec.Uvarint()
- data := dec.Bytes(ln)
-
- _, err := b.ReadFrom(bytes.NewReader(data))
- if err != nil {
- return errors.Wrap(err, "decoding copy of bloom filter")
- }
-
- return nil
-}
-
func (b *Bloom) Decode(dec *encoding.Decbuf) error {
ln := dec.Uvarint()
data := dec.Bytes(ln)
diff --git a/pkg/storage/bloom/v1/bloom_builder.go b/pkg/storage/bloom/v1/bloom_builder.go
index ea54ba248f7c4..c327f5d6bfd95 100644
--- a/pkg/storage/bloom/v1/bloom_builder.go
+++ b/pkg/storage/bloom/v1/bloom_builder.go
@@ -28,20 +28,13 @@ func NewBloomBlockBuilder(opts BlockOptions, writer io.WriteCloser) *BloomBlockB
}
}
-func (b *BloomBlockBuilder) WriteSchema() error {
- b.scratch.Reset()
- b.opts.Schema.Encode(b.scratch)
- if _, err := b.writer.Write(b.scratch.Get()); err != nil {
- return errors.Wrap(err, "writing schema")
- }
- b.writtenSchema = true
- b.offset += b.scratch.Len()
- return nil
+func (b *BloomBlockBuilder) UnflushedSize() int {
+ return b.scratch.Len() + b.page.UnflushedSize()
}
func (b *BloomBlockBuilder) Append(bloom *Bloom) (BloomOffset, error) {
if !b.writtenSchema {
- if err := b.WriteSchema(); err != nil {
+ if err := b.writeSchema(); err != nil {
return BloomOffset{}, errors.Wrap(err, "writing schema")
}
}
@@ -63,7 +56,30 @@ func (b *BloomBlockBuilder) Append(bloom *Bloom) (BloomOffset, error) {
}, nil
}
+func (b *BloomBlockBuilder) writeSchema() error {
+ if b.writtenSchema {
+ return nil
+ }
+
+ b.scratch.Reset()
+ b.opts.Schema.Encode(b.scratch)
+ if _, err := b.writer.Write(b.scratch.Get()); err != nil {
+ return errors.Wrap(err, "writing schema")
+ }
+ b.writtenSchema = true
+ b.offset += b.scratch.Len()
+ return nil
+}
+
func (b *BloomBlockBuilder) Close() (uint32, error) {
+ if !b.writtenSchema {
+ // We will get here only if we haven't appended any bloom filters to the block
+ // This would happen only if all series yielded empty blooms
+ if err := b.writeSchema(); err != nil {
+ return 0, errors.Wrap(err, "writing schema")
+ }
+ }
+
if b.page.Count() > 0 {
if err := b.flushPage(); err != nil {
return 0, errors.Wrap(err, "flushing final bloom page")
diff --git a/pkg/storage/bloom/v1/bloom_tokenizer.go b/pkg/storage/bloom/v1/bloom_tokenizer.go
index 939c91c214398..2cf75b9fbd12a 100644
--- a/pkg/storage/bloom/v1/bloom_tokenizer.go
+++ b/pkg/storage/bloom/v1/bloom_tokenizer.go
@@ -126,12 +126,12 @@ func (bt *BloomTokenizer) Populate(blooms v2iter.SizedIterator[*Bloom], chks v2i
}
func (bt *BloomTokenizer) sendBloom(ch chan<- *BloomCreation, bloom *Bloom, info indexingInfo) {
- fillRatio := bloom.ScalableBloomFilter.FillRatio()
+ fillRatio := bloom.FillRatio()
bt.metrics.hammingWeightRatio.Observe(fillRatio)
bt.metrics.estimatedCount.Observe(
- float64(estimatedCount(bloom.ScalableBloomFilter.Capacity(), fillRatio)),
+ float64(estimatedCount(bloom.Capacity(), fillRatio)),
)
- bt.metrics.bloomSize.Observe(float64(bloom.ScalableBloomFilter.Capacity() / eightBits))
+ bt.metrics.bloomSize.Observe(float64(bloom.Capacity() / eightBits))
bt.metrics.bloomsTotal.Inc()
ch <- &BloomCreation{
Bloom: bloom,
@@ -184,7 +184,7 @@ func (bt *BloomTokenizer) addChunkToBloom(bloom *Bloom, ref ChunkRef, entryIter
}
// maxBloomSize is in bytes, but blooms operate at the bit level; adjust
- collision, full = bloom.ScalableBloomFilter.TestAndAddWithMaxSize([]byte(tok), bt.maxBloomSize*eightBits)
+ collision, full = bloom.TestAndAddWithMaxSize([]byte(tok), bt.maxBloomSize*eightBits)
if collision {
collisionInserts++
diff --git a/pkg/storage/bloom/v1/bloom_tokenizer_test.go b/pkg/storage/bloom/v1/bloom_tokenizer_test.go
index f4c7ec7d831c4..ec5dd475481d4 100644
--- a/pkg/storage/bloom/v1/bloom_tokenizer_test.go
+++ b/pkg/storage/bloom/v1/bloom_tokenizer_test.go
@@ -173,7 +173,7 @@ func TestTokenizerPopulateWontExceedMaxSize(t *testing.T) {
var ct int
for created := range ch {
ct++
- capacity := created.Bloom.ScalableBloomFilter.Capacity() / 8
+ capacity := created.Bloom.Capacity() / 8
t.Log(ct, int(capacity), maxSize)
require.Less(t, int(capacity), maxSize)
}
diff --git a/pkg/storage/bloom/v1/bounds.go b/pkg/storage/bloom/v1/bounds.go
index d33a3dec6aa63..aebcece85138f 100644
--- a/pkg/storage/bloom/v1/bounds.go
+++ b/pkg/storage/bloom/v1/bounds.go
@@ -5,11 +5,9 @@ import (
"hash"
"math"
"strings"
- "unsafe"
"github.com/pkg/errors"
"github.com/prometheus/common/model"
- "golang.org/x/exp/slices"
iter "github.com/grafana/loki/v3/pkg/iter/v2"
"github.com/grafana/loki/v3/pkg/logproto"
@@ -41,13 +39,6 @@ func BoundsFromProto(pb logproto.FPBounds) FingerprintBounds {
return FingerprintBounds(pb)
}
-// Unsafe cast to avoid allocation. This _requires_ that the underlying types are the same
-// which is checked by the compiler above
-func MultiBoundsFromProto(pb []logproto.FPBounds) MultiFingerprintBounds {
- //nolint:unconvert
- return MultiFingerprintBounds(*(*MultiFingerprintBounds)(unsafe.Pointer(&pb)))
-}
-
// ParseBoundsFromAddr parses a fingerprint bounds from a string
func ParseBoundsFromAddr(s string) (FingerprintBounds, error) {
parts := strings.Split(s, "-")
@@ -207,40 +198,6 @@ func (b FingerprintBounds) Range() uint64 {
return uint64(b.Max - b.Min)
}
-type MultiFingerprintBounds []FingerprintBounds
-
-func (mb MultiFingerprintBounds) Union(target FingerprintBounds) MultiFingerprintBounds {
- if len(mb) == 0 {
- return MultiFingerprintBounds{target}
- }
- if len(mb) == 1 {
- return mb[0].Union(target)
- }
-
- mb = append(mb, target)
- slices.SortFunc(mb, func(a, b FingerprintBounds) int {
- if a.Less(b) {
- return -1
- } else if a.Equal(b) {
- return 0
- }
- return 1
- })
-
- var union MultiFingerprintBounds
- for i := 0; i < len(mb); i++ {
- j := len(union) - 1 // index of last item of union
- if j >= 0 && union[j].Max >= mb[i].Min-1 {
- union[j] = NewBounds(union[j].Min, max(mb[i].Max, union[j].Max))
- } else {
- union = append(union, mb[i])
- }
- }
-
- mb = union
- return mb
-}
-
// unused, but illustrative
type BoundedIter[V any] struct {
iter.Iterator[V]
@@ -249,7 +206,7 @@ type BoundedIter[V any] struct {
func (bi *BoundedIter[V]) Next() bool {
for bi.Iterator.Next() {
- switch bi.cmp(bi.Iterator.At()) {
+ switch bi.cmp(bi.At()) {
case Before:
continue
case After:
diff --git a/pkg/storage/bloom/v1/bounds_test.go b/pkg/storage/bloom/v1/bounds_test.go
index 5baaf07e900df..e6f64827e10d8 100644
--- a/pkg/storage/bloom/v1/bounds_test.go
+++ b/pkg/storage/bloom/v1/bounds_test.go
@@ -17,17 +17,6 @@ func TestBoundsFromProto(t *testing.T) {
assert.Equal(t, NewBounds(10, 2000), bounds)
}
-func TestMultiBoundsFromProto(t *testing.T) {
- bounds := MultiBoundsFromProto([]logproto.FPBounds{
- {Min: 10, Max: 2000},
- {Min: 2001, Max: 4000},
- })
- assert.Equal(t, MultiFingerprintBounds{
- NewBounds(10, 2000),
- NewBounds(2001, 4000),
- }, bounds)
-}
-
func Test_ParseFingerprint(t *testing.T) {
t.Parallel()
fp, err := model.ParseFingerprint("7d0")
@@ -150,132 +139,3 @@ func Test_FingerprintBounds_Unless(t *testing.T) {
}, NewBounds(5, 25).Unless(target))
assert.Nil(t, NewBounds(14, 15).Unless(target))
}
-
-func Test_MultiFingerprintBounds(t *testing.T) {
- for _, tc := range []struct {
- desc string
- mb MultiFingerprintBounds
- target FingerprintBounds
- exp MultiFingerprintBounds
- }{
- {
- desc: "no elements",
- mb: MultiFingerprintBounds{},
- target: NewBounds(0, 9),
- exp: MultiFingerprintBounds{
- NewBounds(0, 9),
- },
- },
- {
- desc: "single element before",
- mb: MultiFingerprintBounds{
- NewBounds(5, 9),
- },
- target: NewBounds(15, 19),
- exp: MultiFingerprintBounds{
- NewBounds(5, 9),
- NewBounds(15, 19),
- },
- },
- {
- desc: "single element after",
- mb: MultiFingerprintBounds{
- NewBounds(5, 9),
- },
- target: NewBounds(0, 3),
- exp: MultiFingerprintBounds{
- NewBounds(0, 3),
- NewBounds(5, 9),
- },
- },
- {
- desc: "single element overlapping",
- mb: MultiFingerprintBounds{
- NewBounds(5, 9),
- },
- target: NewBounds(0, 14),
- exp: MultiFingerprintBounds{
- NewBounds(0, 14),
- },
- },
- {
- desc: "multiple elements single overlapping",
- mb: MultiFingerprintBounds{
- NewBounds(5, 9),
- NewBounds(15, 19),
- },
- target: NewBounds(0, 6),
- exp: MultiFingerprintBounds{
- NewBounds(0, 9),
- NewBounds(15, 19),
- },
- },
- {
- desc: "multiple elements single overlapping",
- mb: MultiFingerprintBounds{
- NewBounds(5, 9),
- NewBounds(15, 19),
- },
- target: NewBounds(11, 25),
- exp: MultiFingerprintBounds{
- NewBounds(5, 9),
- NewBounds(11, 25),
- },
- },
- {
- desc: "multiple elements combining overlapping",
- mb: MultiFingerprintBounds{
- NewBounds(5, 9),
- NewBounds(15, 19),
- },
- target: NewBounds(9, 15),
- exp: MultiFingerprintBounds{
- NewBounds(5, 19),
- },
- },
- {
- desc: "combination",
- mb: MultiFingerprintBounds{
- NewBounds(0, 2),
- NewBounds(5, 9),
- NewBounds(15, 19),
- NewBounds(25, 29),
- },
- target: NewBounds(9, 15),
- exp: MultiFingerprintBounds{
- NewBounds(0, 2),
- NewBounds(5, 19),
- NewBounds(25, 29),
- },
- },
- {
- desc: "overlapping ranges",
- mb: MultiFingerprintBounds{
- NewBounds(0, 6),
- NewBounds(5, 15),
- },
- target: NewBounds(8, 10),
- exp: MultiFingerprintBounds{
- NewBounds(0, 15),
- },
- },
- {
- desc: "disjoint ranges and target is between",
- mb: MultiFingerprintBounds{
- NewBounds(0, 9),
- NewBounds(30, 39),
- },
- target: NewBounds(15, 19),
- exp: MultiFingerprintBounds{
- NewBounds(0, 9),
- NewBounds(15, 19),
- NewBounds(30, 39),
- },
- },
- } {
- t.Run(tc.desc, func(t *testing.T) {
- res := tc.mb.Union(tc.target)
- assert.Equal(t, tc.exp, res)
- })
- }
-}
diff --git a/pkg/storage/bloom/v1/builder.go b/pkg/storage/bloom/v1/builder.go
index 0d291b5a9ea28..466687aa44b9a 100644
--- a/pkg/storage/bloom/v1/builder.go
+++ b/pkg/storage/bloom/v1/builder.go
@@ -5,6 +5,8 @@ import (
"hash"
"io"
+ "github.com/go-kit/log"
+ "github.com/go-kit/log/level"
"github.com/pkg/errors"
"github.com/grafana/loki/v3/pkg/compression"
@@ -67,10 +69,8 @@ func (b BlockOptions) Encode(enc *encoding.Encbuf) {
}
func NewBlockOptions(enc compression.Codec, maxBlockSizeBytes, maxBloomSizeBytes uint64) BlockOptions {
- opts := NewBlockOptionsFromSchema(Schema{
- version: CurrentSchemaVersion,
- encoding: enc,
- }, maxBloomSizeBytes)
+ schema := NewSchema(CurrentSchemaVersion, enc)
+ opts := NewBlockOptionsFromSchema(schema, maxBloomSizeBytes)
opts.BlockSize = maxBlockSizeBytes
opts.UnencodedBlockOptions.MaxBloomSizeBytes = maxBloomSizeBytes
return opts
@@ -114,6 +114,10 @@ func (w *PageWriter) Reset() {
w.n = 0
}
+func (w *PageWriter) UnflushedSize() int {
+ return w.enc.Len()
+}
+
func (w *PageWriter) SpaceFor(numBytes int) bool {
// if a single bloom exceeds the target size, still accept it
// otherwise only accept it if adding it would not exceed the target size
@@ -191,6 +195,7 @@ type MergeBuilder struct {
// Add chunks of a single series to a bloom
populate BloomPopulatorFunc
metrics *Metrics
+ logger log.Logger
}
type BloomPopulatorFunc func(series *Series, preExistingBlooms iter.SizedIterator[*Bloom], chunksToAdd ChunkRefs, ch chan *BloomCreation)
@@ -204,12 +209,13 @@ func NewMergeBuilder(
store iter.Iterator[*Series],
populate BloomPopulatorFunc,
metrics *Metrics,
+ logger log.Logger,
) *MergeBuilder {
// combinedSeriesIter handles series with fingerprint collisions:
// because blooms dont contain the label-set (only the fingerprint),
// in the case of a fingerprint collision we simply treat it as one
// series with multiple chunks.
- combinedSeriesIter := iter.NewDedupingIter[*Series, *Series](
+ combinedSeriesIter := iter.NewDedupingIter(
// eq
func(s1, s2 *Series) bool {
return s1.Fingerprint == s2.Fingerprint
@@ -223,7 +229,7 @@ func NewMergeBuilder(
Chunks: s1.Chunks.Union(s2.Chunks),
}
},
- iter.NewPeekIter[*Series](store),
+ iter.NewPeekIter(store),
)
return &MergeBuilder{
@@ -231,6 +237,7 @@ func NewMergeBuilder(
store: combinedSeriesIter,
populate: populate,
metrics: metrics,
+ logger: logger,
}
}
@@ -295,7 +302,7 @@ func (mb *MergeBuilder) processNextSeries(
chunksCopied += len(nextInStore.Chunks) - len(chunksToAdd)
preExistingBlooms = nextInBlocks.Blooms
// we also need to carry over existing indexed fields from the series metadata
- info.indexedFields.Union(nextInBlocks.Series.Meta.Fields)
+ info.indexedFields.Union(nextInBlocks.Series.Fields)
}
chunksIndexed += len(chunksToAdd)
@@ -308,6 +315,12 @@ func (mb *MergeBuilder) processNextSeries(
if creation.Err != nil {
return nil, info.sourceBytes, 0, false, false, errors.Wrap(creation.Err, "populating bloom")
}
+
+ if creation.Bloom.IsEmpty() {
+ level.Debug(mb.logger).Log("msg", "received empty bloom. Adding to index but skipping offsets", "fingerprint", nextInStore.Fingerprint)
+ continue
+ }
+
offset, err := builder.AddBloom(creation.Bloom)
if err != nil {
return nil, info.sourceBytes, 0, false, false, errors.Wrapf(
diff --git a/pkg/storage/bloom/v1/builder_test.go b/pkg/storage/bloom/v1/builder_test.go
index 662c1375809be..fa8ccbc87a3d9 100644
--- a/pkg/storage/bloom/v1/builder_test.go
+++ b/pkg/storage/bloom/v1/builder_test.go
@@ -6,6 +6,7 @@ import (
"sort"
"testing"
+ "github.com/go-kit/log"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
@@ -42,10 +43,7 @@ func TestBlockOptions_BloomPageSize(t *testing.T) {
func TestBlockOptions_RoundTrip(t *testing.T) {
t.Parallel()
opts := BlockOptions{
- Schema: Schema{
- version: CurrentSchemaVersion,
- encoding: compression.Snappy,
- },
+ Schema: NewSchema(CurrentSchemaVersion, compression.Snappy),
SeriesPageSize: 100,
BloomPageSize: 10 << 10,
BlockSize: 10 << 20,
@@ -100,10 +98,7 @@ func TestBlockBuilder_RoundTrip(t *testing.T) {
desc := fmt.Sprintf("%s/%s", tc.desc, enc)
t.Run(desc, func(t *testing.T) {
blockOpts := BlockOptions{
- Schema: Schema{
- version: CurrentSchemaVersion,
- encoding: enc,
- },
+ Schema: NewSchema(CurrentSchemaVersion, enc),
SeriesPageSize: 100,
BloomPageSize: 10 << 10,
BlockSize: tc.maxBlockSize,
@@ -192,7 +187,7 @@ func TestBlockBuilder_RoundTrip(t *testing.T) {
func dedupedBlocks(blocks []iter.PeekIterator[*SeriesWithBlooms]) iter.Iterator[*SeriesWithBlooms] {
orderedBlocks := NewHeapIterForSeriesWithBloom(blocks...)
- return iter.NewDedupingIter[*SeriesWithBlooms](
+ return iter.NewDedupingIter(
func(a *SeriesWithBlooms, b *SeriesWithBlooms) bool {
return a.Series.Fingerprint == b.Series.Fingerprint
},
@@ -203,7 +198,7 @@ func dedupedBlocks(blocks []iter.PeekIterator[*SeriesWithBlooms]) iter.Iterator[
}
return b
},
- iter.NewPeekIter[*SeriesWithBlooms](orderedBlocks),
+ iter.NewPeekIter(orderedBlocks),
)
}
@@ -215,10 +210,7 @@ func TestMergeBuilder(t *testing.T) {
blocks := make([]iter.PeekIterator[*SeriesWithBlooms], 0, nBlocks)
data, _ := MkBasicSeriesWithBlooms(numSeries, 0, 0xffff, 0, 10000)
blockOpts := BlockOptions{
- Schema: Schema{
- version: CurrentSchemaVersion,
- encoding: compression.Snappy,
- },
+ Schema: NewSchema(CurrentSchemaVersion, compression.Snappy),
SeriesPageSize: 100,
BloomPageSize: 10 << 10,
}
@@ -244,10 +236,10 @@ func TestMergeBuilder(t *testing.T) {
)
require.Nil(t, err)
- itr := iter.NewSliceIter[SeriesWithBlooms](data[min:max])
+ itr := iter.NewSliceIter(data[min:max])
_, err = builder.BuildFrom(itr)
require.Nil(t, err)
- blocks = append(blocks, iter.NewPeekIter[*SeriesWithBlooms](NewBlockQuerier(NewBlock(reader, NewMetrics(nil)), &mempool.SimpleHeapAllocator{}, DefaultMaxPageSize).Iter()))
+ blocks = append(blocks, iter.NewPeekIter(NewBlockQuerier(NewBlock(reader, NewMetrics(nil)), &mempool.SimpleHeapAllocator{}, DefaultMaxPageSize).Iter()))
}
// We're not testing the ability to extend a bloom in this test
@@ -264,15 +256,15 @@ func TestMergeBuilder(t *testing.T) {
// storage should contain references to all the series we ingested,
// regardless of block allocation/overlap.
- storeItr := iter.NewMapIter[SeriesWithBlooms, *Series](
- iter.NewSliceIter[SeriesWithBlooms](data),
+ storeItr := iter.NewMapIter(
+ iter.NewSliceIter(data),
func(swb SeriesWithBlooms) *Series {
return &swb.Series.Series
},
)
// Ensure that the merge builder combines all the blocks correctly
- mergeBuilder := NewMergeBuilder(dedupedBlocks(blocks), storeItr, populate, NewMetrics(nil))
+ mergeBuilder := NewMergeBuilder(dedupedBlocks(blocks), storeItr, populate, NewMetrics(nil), log.NewNopLogger())
indexBuf := bytes.NewBuffer(nil)
bloomsBuf := bytes.NewBuffer(nil)
writer := NewMemoryBlockWriter(indexBuf, bloomsBuf)
@@ -287,16 +279,16 @@ func TestMergeBuilder(t *testing.T) {
block := NewBlock(reader, NewMetrics(nil))
querier := NewBlockQuerier(block, &mempool.SimpleHeapAllocator{}, DefaultMaxPageSize)
- EqualIterators[*SeriesWithBlooms](
+ EqualIterators(
t,
func(a, b *SeriesWithBlooms) {
require.Equal(t, a.Series.Series, b.Series.Series, "expected series %+v, got %+v", a.Series.Series, b.Series.Series)
- require.Equal(t, a.Series.Meta.Fields, b.Series.Meta.Fields, "expected fields %+v, got %+v", a.Series.Meta.Fields, b.Series.Meta.Fields)
+ require.Equal(t, a.Series.Fields, b.Series.Fields, "expected fields %+v, got %+v", a.Series.Fields, b.Series.Fields)
// TODO(chaudum): Investigate why offsets not match
// This has not been tested before, so I'm not too worried about something being broken.
// require.Equal(t, a.Series.Meta.Offsets, b.Series.Meta.Offsets, "expected offsets %+v, got %+v", a.Series.Meta.Offsets, b.Series.Meta.Offsets)
},
- iter.NewSliceIter[*SeriesWithBlooms](PointerSlice(data)),
+ iter.NewSliceIter(PointerSlice(data)),
querier.Iter(),
)
}
@@ -312,10 +304,7 @@ func TestMergeBuilderFingerprintCollision(t *testing.T) {
reader := NewByteReader(indexBuf, bloomsBuf)
blockOpts := BlockOptions{
- Schema: Schema{
- version: CurrentSchemaVersion,
- encoding: compression.Snappy,
- },
+ Schema: NewSchema(CurrentSchemaVersion, compression.Snappy),
SeriesPageSize: 100,
BloomPageSize: 10 << 10,
}
@@ -362,6 +351,8 @@ func TestMergeBuilderFingerprintCollision(t *testing.T) {
// We're not testing the ability to extend a bloom in this test
pop := func(_ *Series, _ iter.SizedIterator[*Bloom], _ ChunkRefs, ch chan *BloomCreation) {
bloom := NewBloom()
+ // Add something to the bloom so it's not empty
+ bloom.Add([]byte("hello"))
stats := indexingInfo{
sourceBytes: int(bloom.Capacity()) / 8,
indexedFields: NewSetFromLiteral[Field]("__all__"),
@@ -379,6 +370,7 @@ func TestMergeBuilderFingerprintCollision(t *testing.T) {
iter.NewSliceIter(data),
pop,
NewMetrics(nil),
+ log.NewNopLogger(),
)
_, _, err = mergeBuilder.Build(builder)
@@ -409,10 +401,7 @@ func TestBlockReset(t *testing.T) {
writer := NewMemoryBlockWriter(indexBuf, bloomsBuf)
reader := NewByteReader(indexBuf, bloomsBuf)
- schema := Schema{
- version: CurrentSchemaVersion,
- encoding: compression.Snappy,
- }
+ schema := NewSchema(CurrentSchemaVersion, compression.Snappy)
builder, err := NewBlockBuilder(
BlockOptions{
@@ -424,7 +413,7 @@ func TestBlockReset(t *testing.T) {
)
require.Nil(t, err)
- itr := iter.NewSliceIter[SeriesWithBlooms](data)
+ itr := iter.NewSliceIter(data)
_, err = builder.BuildFrom(itr)
require.Nil(t, err)
block := NewBlock(reader, NewMetrics(nil))
@@ -434,7 +423,7 @@ func TestBlockReset(t *testing.T) {
for i := 0; i < len(rounds); i++ {
for querier.Next() {
- rounds[i] = append(rounds[i], querier.At().Series.Fingerprint)
+ rounds[i] = append(rounds[i], querier.At().Fingerprint)
}
err = querier.Seek(0) // reset at end
@@ -465,10 +454,9 @@ func TestMergeBuilder_Roundtrip(t *testing.T) {
}
blockOpts := BlockOptions{
- Schema: Schema{
- version: CurrentSchemaVersion,
- encoding: compression.Snappy, // test with different encodings?
- },
+ // test with different encodings?
+ Schema: NewSchema(CurrentSchemaVersion, compression.Snappy),
+
SeriesPageSize: 100,
BloomPageSize: 10 << 10,
}
@@ -487,7 +475,7 @@ func TestMergeBuilder_Roundtrip(t *testing.T) {
require.Nil(t, err)
// each set of copies gets a different slice of the data
minIdx, maxIdx := i*len(xs)/len(sets), (i+1)*len(xs)/len(sets)
- itr := iter.NewSliceIter[SeriesWithBlooms](xs[minIdx:maxIdx])
+ itr := iter.NewSliceIter(xs[minIdx:maxIdx])
_, err = builder.BuildFrom(itr)
require.Nil(t, err)
block := NewBlock(reader, NewMetrics(nil))
@@ -509,12 +497,12 @@ func TestMergeBuilder_Roundtrip(t *testing.T) {
var store []iter.PeekIterator[*SeriesWithBlooms]
for _, x := range data {
- blocks = append(blocks, iter.NewPeekIter[*SeriesWithBlooms](iter.NewSliceIter[*SeriesWithBlooms](x)))
- store = append(store, iter.NewPeekIter[*SeriesWithBlooms](iter.NewSliceIter[*SeriesWithBlooms](x)))
+ blocks = append(blocks, iter.NewPeekIter(iter.NewSliceIter(x)))
+ store = append(store, iter.NewPeekIter(iter.NewSliceIter(x)))
}
orderedStore := NewHeapIterForSeriesWithBloom(store...)
- dedupedStore := iter.NewDedupingIter[*SeriesWithBlooms, *Series](
+ dedupedStore := iter.NewDedupingIter(
func(a *SeriesWithBlooms, b *Series) bool {
return a.Series.Fingerprint == b.Fingerprint
},
@@ -527,7 +515,7 @@ func TestMergeBuilder_Roundtrip(t *testing.T) {
}
return b
},
- iter.NewPeekIter[*SeriesWithBlooms](orderedStore),
+ iter.NewPeekIter(orderedStore),
)
// We're not testing the ability to extend a bloom in this test
@@ -555,6 +543,7 @@ func TestMergeBuilder_Roundtrip(t *testing.T) {
dedupedStore,
pop,
NewMetrics(nil),
+ log.NewNopLogger(),
)
builder, err := NewBlockBuilder(blockOpts, writer)
require.Nil(t, err)
@@ -568,9 +557,9 @@ func TestMergeBuilder_Roundtrip(t *testing.T) {
// ensure the new block contains one copy of all the data
// by comparing it against an iterator over the source data
mergedBlockQuerier := NewBlockQuerier(NewBlock(reader, NewMetrics(nil)), &mempool.SimpleHeapAllocator{}, DefaultMaxPageSize)
- sourceItr := iter.NewSliceIter[*SeriesWithBlooms](PointerSlice[SeriesWithBlooms](xs))
+ sourceItr := iter.NewSliceIter(PointerSlice(xs))
- EqualIterators[*SeriesWithBlooms](
+ EqualIterators(
t,
func(a, b *SeriesWithBlooms) {
require.Equal(t, a.Series.Fingerprint, b.Series.Fingerprint)
diff --git a/pkg/storage/bloom/v1/dedupe_test.go b/pkg/storage/bloom/v1/dedupe_test.go
index 8c4dd43629b5a..a75c8064030d8 100644
--- a/pkg/storage/bloom/v1/dedupe_test.go
+++ b/pkg/storage/bloom/v1/dedupe_test.go
@@ -18,7 +18,7 @@ func TestMergeDedupeIter(t *testing.T) {
)
for i := 0; i < len(queriers); i++ {
- queriers[i] = iter.NewPeekIter[*SeriesWithBlooms](iter.NewSliceIter[*SeriesWithBlooms](dataPtr))
+ queriers[i] = iter.NewPeekIter(iter.NewSliceIter(dataPtr))
}
mbq := NewHeapIterForSeriesWithBloom(queriers...)
@@ -28,11 +28,11 @@ func TestMergeDedupeIter(t *testing.T) {
merge := func(a, _ *SeriesWithBlooms) *SeriesWithBlooms {
return a
}
- deduper := iter.NewDedupingIter[*SeriesWithBlooms, *SeriesWithBlooms](
+ deduper := iter.NewDedupingIter(
eq,
iter.Identity[*SeriesWithBlooms],
merge,
- iter.NewPeekIter[*SeriesWithBlooms](mbq),
+ iter.NewPeekIter(mbq),
)
for i := 0; i < len(data); i++ {
diff --git a/pkg/storage/bloom/v1/filter/buckets.go b/pkg/storage/bloom/v1/filter/buckets.go
index 95c55f394e1fa..89836ba441610 100644
--- a/pkg/storage/bloom/v1/filter/buckets.go
+++ b/pkg/storage/bloom/v1/filter/buckets.go
@@ -17,10 +17,6 @@ import (
"math/bits"
)
-type BucketGetter interface {
- Get(bucket uint) uint32
-}
-
// Buckets is a fast, space-efficient array of buckets where each bucket can
// store up to a configured maximum value.
type Buckets struct {
diff --git a/pkg/storage/bloom/v1/filter/scalable.go b/pkg/storage/bloom/v1/filter/scalable.go
index 3e59a99b011f9..bc3b84a271a83 100644
--- a/pkg/storage/bloom/v1/filter/scalable.go
+++ b/pkg/storage/bloom/v1/filter/scalable.go
@@ -71,9 +71,7 @@ type ScalableBloomFilter struct {
const fillCheckFraction = 100
// NewScalableBloomFilter creates a new Scalable Bloom Filter with the
-// specified target false-positive rate and tightening ratio. Use
-// NewDefaultScalableBloomFilter if you don't want to calculate these
-// parameters.
+// specified target false-positive rate and tightening ratio.
func NewScalableBloomFilter(hint uint, fpRate, r float64) *ScalableBloomFilter {
s := &ScalableBloomFilter{
filters: make([]*PartitionedBloomFilter, 0, 1),
@@ -88,11 +86,6 @@ func NewScalableBloomFilter(hint uint, fpRate, r float64) *ScalableBloomFilter {
return s
}
-// NewDefaultScalableBloomFilter creates a new Scalable Bloom Filter.
-func NewDefaultScalableBloomFilter() *ScalableBloomFilter {
- return NewScalableBloomFilter(10e3, 0.1, 0.8)
-}
-
// Capacity returns the current Scalable Bloom Filter capacity, which is the
// sum of the capacities for the contained series of Bloom filters.
func (s *ScalableBloomFilter) Capacity() uint {
@@ -210,14 +203,6 @@ func (s *ScalableBloomFilter) TestAndAddWithMaxSize(data []byte, maxSize int) (e
return member, full
}
-// Reset restores the Bloom filter to its original state. It returns the filter
-// to allow for chaining.
-func (s *ScalableBloomFilter) Reset() *ScalableBloomFilter {
- s.filters = make([]*PartitionedBloomFilter, 0, 1)
- s.addFilter()
- return s
-}
-
func (s *ScalableBloomFilter) nextFilterCapacity() (m uint, fpRate float64) {
fpRate = s.fp * math.Pow(s.r, float64(len(s.filters)))
diff --git a/pkg/storage/bloom/v1/filter/scalable_test.go b/pkg/storage/bloom/v1/filter/scalable_test.go
index 2456277f2e93a..bf46bfba4ec16 100644
--- a/pkg/storage/bloom/v1/filter/scalable_test.go
+++ b/pkg/storage/bloom/v1/filter/scalable_test.go
@@ -17,24 +17,6 @@ import (
"github.com/d4l3k/messagediff"
)
-// Ensures that NewDefaultScalableBloomFilter creates a Scalable Bloom Filter
-// with hint = 10000 and r = 0.8.
-func TestNewDefaultScalableBloomFilter(t *testing.T) {
- f := NewDefaultScalableBloomFilter()
-
- if f.fp != 0.1 {
- t.Errorf("Expected 0.1, got %f", f.fp)
- }
-
- if f.hint != 10000 {
- t.Errorf("Expected 10000, got %d", f.hint)
- }
-
- if f.r != 0.8 {
- t.Errorf("Expected 0.8, got %f", f.r)
- }
-}
-
// Ensures that K returns the number of hash functions used in each Bloom
// filter.
func TestScalableBloomK(t *testing.T) {
@@ -111,34 +93,6 @@ func TestScalableBloomTestAndAdd(t *testing.T) {
}
}
-// Ensures that Reset removes all Bloom filters and resets the initial one.
-func TestScalableBloomReset(t *testing.T) {
- f := NewScalableBloomFilter(10, 0.1, 0.8)
- for i := 0; i < 1000; i++ {
- f.Add([]byte(strconv.Itoa(i)))
- }
-
- if len(f.filters) < 2 {
- t.Errorf("Expected more than 1 filter, got %d", len(f.filters))
- }
-
- if f.Reset() != f {
- t.Error("Returned ScalableBloomFilter should be the same instance")
- }
-
- if len(f.filters) != 1 {
- t.Errorf("Expected 1 filter, got %d", len(f.filters))
- }
-
- for _, partition := range f.filters[0].partitions {
- for i := uint(0); i < partition.Count(); i++ {
- if partition.Get(i) != 0 {
- t.Error("Expected all bits to be unset")
- }
- }
- }
-}
-
// Ensures that ScalableBloomFilter can be serialized and deserialized without errors.
func TestScalableBloomGob(t *testing.T) {
f := NewScalableBloomFilter(10, 0.1, 0.8)
diff --git a/pkg/storage/bloom/v1/fuse.go b/pkg/storage/bloom/v1/fuse.go
index f579ce6527849..b25743ea4c541 100644
--- a/pkg/storage/bloom/v1/fuse.go
+++ b/pkg/storage/bloom/v1/fuse.go
@@ -32,6 +32,8 @@ func NewBloomRecorder(ctx context.Context, id string) *BloomRecorder {
chunksSkipped: atomic.NewInt64(0),
seriesMissed: atomic.NewInt64(0),
chunksMissed: atomic.NewInt64(0),
+ seriesEmpty: atomic.NewInt64(0),
+ chunksEmpty: atomic.NewInt64(0),
chunksFiltered: atomic.NewInt64(0),
}
}
@@ -45,6 +47,8 @@ type BloomRecorder struct {
seriesSkipped, chunksSkipped *atomic.Int64
// not found in bloom
seriesMissed, chunksMissed *atomic.Int64
+ // exists in block index but empty offsets
+ seriesEmpty, chunksEmpty *atomic.Int64
// filtered out
chunksFiltered *atomic.Int64
}
@@ -56,6 +60,8 @@ func (r *BloomRecorder) Merge(other *BloomRecorder) {
r.chunksSkipped.Add(other.chunksSkipped.Load())
r.seriesMissed.Add(other.seriesMissed.Load())
r.chunksMissed.Add(other.chunksMissed.Load())
+ r.seriesEmpty.Add(other.seriesEmpty.Load())
+ r.chunksEmpty.Add(other.chunksEmpty.Load())
r.chunksFiltered.Add(other.chunksFiltered.Load())
}
@@ -66,13 +72,15 @@ func (r *BloomRecorder) Report(logger log.Logger, metrics *Metrics) {
seriesFound = r.seriesFound.Load()
seriesSkipped = r.seriesSkipped.Load()
seriesMissed = r.seriesMissed.Load()
- seriesRequested = seriesFound + seriesSkipped + seriesMissed
+ seriesEmpty = r.seriesEmpty.Load()
+ seriesRequested = seriesFound + seriesSkipped + seriesMissed + seriesEmpty
chunksFound = r.chunksFound.Load()
chunksSkipped = r.chunksSkipped.Load()
chunksMissed = r.chunksMissed.Load()
chunksFiltered = r.chunksFiltered.Load()
- chunksRequested = chunksFound + chunksSkipped + chunksMissed
+ chunksEmpty = r.chunksEmpty.Load()
+ chunksRequested = chunksFound + chunksSkipped + chunksMissed + chunksEmpty
)
level.Debug(logger).Log(
"recorder_msg", "bloom search results",
@@ -82,11 +90,13 @@ func (r *BloomRecorder) Report(logger log.Logger, metrics *Metrics) {
"recorder_series_found", seriesFound,
"recorder_series_skipped", seriesSkipped,
"recorder_series_missed", seriesMissed,
+ "recorder_series_empty", seriesEmpty,
"recorder_chunks_requested", chunksRequested,
"recorder_chunks_found", chunksFound,
"recorder_chunks_skipped", chunksSkipped,
"recorder_chunks_missed", chunksMissed,
+ "recorder_chunks_empty", chunksEmpty,
"recorder_chunks_filtered", chunksFiltered,
)
@@ -94,25 +104,27 @@ func (r *BloomRecorder) Report(logger log.Logger, metrics *Metrics) {
metrics.recorderSeries.WithLabelValues(recorderRequested).Add(float64(seriesRequested))
metrics.recorderSeries.WithLabelValues(recorderFound).Add(float64(seriesFound))
metrics.recorderSeries.WithLabelValues(recorderSkipped).Add(float64(seriesSkipped))
+ metrics.recorderSeries.WithLabelValues(recorderEmpty).Add(float64(seriesEmpty))
metrics.recorderSeries.WithLabelValues(recorderMissed).Add(float64(seriesMissed))
metrics.recorderChunks.WithLabelValues(recorderRequested).Add(float64(chunksRequested))
metrics.recorderChunks.WithLabelValues(recorderFound).Add(float64(chunksFound))
metrics.recorderChunks.WithLabelValues(recorderSkipped).Add(float64(chunksSkipped))
metrics.recorderChunks.WithLabelValues(recorderMissed).Add(float64(chunksMissed))
+ metrics.recorderChunks.WithLabelValues(recorderEmpty).Add(float64(chunksEmpty))
metrics.recorderChunks.WithLabelValues(recorderFiltered).Add(float64(chunksFiltered))
}
}
-func (r *BloomRecorder) record(
- seriesFound, chunksFound, seriesSkipped, chunksSkipped, seriesMissed, chunksMissed, chunksFiltered int,
-) {
+func (r *BloomRecorder) record(seriesFound, chunksFound, seriesSkipped, chunksSkipped, seriesMissed, chunksMissed, seriesEmpty, chunksEmpty, chunksFiltered int) {
r.seriesFound.Add(int64(seriesFound))
r.chunksFound.Add(int64(chunksFound))
r.seriesSkipped.Add(int64(seriesSkipped))
r.chunksSkipped.Add(int64(chunksSkipped))
r.seriesMissed.Add(int64(seriesMissed))
r.chunksMissed.Add(int64(chunksMissed))
+ r.seriesEmpty.Add(int64(seriesEmpty))
+ r.chunksEmpty.Add(int64(chunksEmpty))
r.chunksFiltered.Add(int64(chunksFiltered))
}
@@ -137,14 +149,14 @@ type FusedQuerier struct {
}
func NewFusedQuerier(bq *BlockQuerier, inputs []iter.PeekIterator[Request], logger log.Logger) *FusedQuerier {
- heap := NewHeapIterator[Request](
+ heap := NewHeapIterator(
func(a, b Request) bool {
return a.Fp < b.Fp
},
inputs...,
)
- merging := iter.NewDedupingIter[Request, []Request](
+ merging := iter.NewDedupingIter(
func(a Request, b []Request) bool {
return a.Fp == b[0].Fp
},
@@ -152,7 +164,7 @@ func NewFusedQuerier(bq *BlockQuerier, inputs []iter.PeekIterator[Request], logg
func(a Request, b []Request) []Request {
return append(b, a)
},
- iter.NewPeekIter[Request](heap),
+ iter.NewPeekIter(heap),
)
return &FusedQuerier{
bq: bq,
@@ -170,6 +182,7 @@ func (fq *FusedQuerier) recordMissingFp(
0, 0, // found
0, 0, // skipped
1, len(input.Chks), // missed
+ 0, 0, // empty
0, // chunks filtered
)
})
@@ -184,6 +197,22 @@ func (fq *FusedQuerier) recordSkippedFp(
0, 0, // found
1, len(input.Chks), // skipped
0, 0, // missed
+ 0, 0, // empty
+ 0, // chunks filtered
+ )
+ })
+}
+
+func (fq *FusedQuerier) recordEmptyFp(
+ batch []Request,
+ fp model.Fingerprint,
+) {
+ fq.noRemovals(batch, fp, func(input Request) {
+ input.Recorder.record(
+ 0, 0, // found
+ 0, 0, // skipped
+ 0, 0, // missed
+ 1, len(input.Chks), // empty
0, // chunks filtered
)
})
@@ -263,9 +292,10 @@ func (fq *FusedQuerier) runSeries(_ Schema, series *SeriesWithMeta, reqs []Reque
Missing ChunkRefs // chunks that do not exist in the blooms and cannot be queried
InBlooms ChunkRefs // chunks which do exist in the blooms and can be queried
- found map[int]bool // map of the index in `InBlooms` to whether the chunk
- // was found in _any_ of the blooms for the series. In order to
- // be eligible for removal, a chunk must be found in _no_ blooms.
+ // Map of the index in `InBlooms` to whether the chunk was found in _any_
+ // of the blooms for the series. In order to be eligible for removal, a
+ // chunk must be found in _no_ blooms.
+ found map[int]bool
}
inputs := make([]inputChunks, 0, len(reqs))
@@ -279,6 +309,19 @@ func (fq *FusedQuerier) runSeries(_ Schema, series *SeriesWithMeta, reqs []Reque
})
}
+ if len(series.Offsets) == 0 {
+ // We end up here for series with no structured metadata fields.
+ // While building blooms, these series would yield empty blooms.
+ // We add these series to the index of the block so we don't report them as missing,
+ // but we don't filter any chunks for them.
+ level.Debug(fq.logger).Log(
+ "msg", "series with empty offsets",
+ "fp", series.Fingerprint,
+ )
+ fq.recordEmptyFp(reqs, series.Fingerprint)
+ return
+ }
+
for i, offset := range series.Offsets {
skip := fq.bq.blooms.LoadOffset(offset)
if skip {
@@ -354,13 +397,13 @@ func (fq *FusedQuerier) runSeries(_ Schema, series *SeriesWithMeta, reqs []Reque
}
for i, req := range reqs {
-
removals := removalsFor(inputs[i].InBlooms, inputs[i].found)
req.Recorder.record(
1, len(inputs[i].InBlooms), // found
0, 0, // skipped
0, len(inputs[i].Missing), // missed
+ 0, 0, // empty
len(removals), // filtered
)
req.Response <- Output{
diff --git a/pkg/storage/bloom/v1/fuse_test.go b/pkg/storage/bloom/v1/fuse_test.go
index 4a22b91e70099..3283dc6ccdb59 100644
--- a/pkg/storage/bloom/v1/fuse_test.go
+++ b/pkg/storage/bloom/v1/fuse_test.go
@@ -58,17 +58,14 @@ func TestFusedQuerier(t *testing.T) {
builder, err := NewBlockBuilder(
BlockOptions{
- Schema: Schema{
- version: CurrentSchemaVersion,
- encoding: compression.Snappy,
- },
+ Schema: NewSchema(CurrentSchemaVersion, compression.Snappy),
SeriesPageSize: 100,
BloomPageSize: 10 << 10,
},
writer,
)
require.Nil(t, err)
- itr := v2.NewSliceIter[SeriesWithBlooms](data)
+ itr := v2.NewSliceIter(data)
_, err = builder.BuildFrom(itr)
require.NoError(t, err)
require.False(t, itr.Next())
@@ -99,7 +96,7 @@ func TestFusedQuerier(t *testing.T) {
var itrs []v2.PeekIterator[Request]
for _, reqs := range inputs {
- itrs = append(itrs, v2.NewPeekIter[Request](v2.NewSliceIter[Request](reqs)))
+ itrs = append(itrs, v2.NewPeekIter(v2.NewSliceIter(reqs)))
}
resps := make([][]Output, nReqs)
@@ -145,10 +142,7 @@ func TestFusedQuerier_MultiPage(t *testing.T) {
builder, err := NewBlockBuilder(
BlockOptions{
- Schema: Schema{
- version: CurrentSchemaVersion,
- encoding: compression.Snappy,
- },
+ Schema: NewSchema(CurrentSchemaVersion, compression.Snappy),
SeriesPageSize: 100,
BloomPageSize: 10, // So we force one bloom per page
},
@@ -294,17 +288,14 @@ func TestLazyBloomIter_Seek_ResetError(t *testing.T) {
builder, err := NewBlockBuilder(
BlockOptions{
- Schema: Schema{
- version: CurrentSchemaVersion,
- encoding: compression.Snappy,
- },
+ Schema: NewSchema(CurrentSchemaVersion, compression.Snappy),
SeriesPageSize: 100,
BloomPageSize: 10, // So we force one series per page
},
writer,
)
require.Nil(t, err)
- itr := v2.NewSliceIter[SeriesWithBlooms](data)
+ itr := v2.NewSliceIter(data)
_, err = builder.BuildFrom(itr)
require.NoError(t, err)
require.False(t, itr.Next())
@@ -352,10 +343,7 @@ func TestFusedQuerier_SkipsEmptyBlooms(t *testing.T) {
builder, err := NewBlockBuilder(
BlockOptions{
- Schema: Schema{
- version: CurrentSchemaVersion,
- encoding: compression.None,
- },
+ Schema: NewSchema(CurrentSchemaVersion, compression.Snappy),
SeriesPageSize: 100,
BloomPageSize: 10 << 10,
},
@@ -379,7 +367,7 @@ func TestFusedQuerier_SkipsEmptyBlooms(t *testing.T) {
Blooms: v2.NewSliceIter([]*Bloom{NewBloom()}),
}
- itr := v2.NewSliceIter[SeriesWithBlooms]([]SeriesWithBlooms{data})
+ itr := v2.NewSliceIter([]SeriesWithBlooms{data})
_, err = builder.BuildFrom(itr)
require.NoError(t, err)
require.False(t, itr.Next())
@@ -394,7 +382,7 @@ func TestFusedQuerier_SkipsEmptyBlooms(t *testing.T) {
}
err = NewBlockQuerier(block, BloomPagePool, DefaultMaxPageSize).Fuse(
[]v2.PeekIterator[Request]{
- v2.NewPeekIter[Request](v2.NewSliceIter[Request]([]Request{req})),
+ v2.NewPeekIter(v2.NewSliceIter([]Request{req})),
},
log.NewNopLogger(),
).Run()
@@ -413,17 +401,14 @@ func setupBlockForBenchmark(b *testing.B) (*BlockQuerier, [][]Request, []chan Ou
builder, err := NewBlockBuilder(
BlockOptions{
- Schema: Schema{
- version: CurrentSchemaVersion,
- encoding: compression.Snappy,
- },
+ Schema: NewSchema(CurrentSchemaVersion, compression.Snappy),
SeriesPageSize: 256 << 10, // 256k
BloomPageSize: 1 << 20, // 1MB
},
writer,
)
require.Nil(b, err)
- itr := v2.NewSliceIter[SeriesWithBlooms](data)
+ itr := v2.NewSliceIter(data)
_, err = builder.BuildFrom(itr)
require.Nil(b, err)
block := NewBlock(reader, NewMetrics(nil))
@@ -485,7 +470,7 @@ func BenchmarkBlockQuerying(b *testing.B) {
for i := 0; i < b.N; i++ {
itrs = itrs[:0]
for _, reqs := range requestChains {
- itrs = append(itrs, v2.NewPeekIter[Request](v2.NewSliceIter[Request](reqs)))
+ itrs = append(itrs, v2.NewPeekIter(v2.NewSliceIter(reqs)))
}
fused := querier.Fuse(itrs, log.NewNopLogger())
_ = fused.Run()
diff --git a/pkg/storage/bloom/v1/index.go b/pkg/storage/bloom/v1/index.go
index a9e03efc41af9..8be1a45d35c21 100644
--- a/pkg/storage/bloom/v1/index.go
+++ b/pkg/storage/bloom/v1/index.go
@@ -153,7 +153,8 @@ func aggregateHeaders(xs []SeriesHeader) SeriesHeader {
fromFp, _ := xs[0].Bounds.Bounds()
_, throughFP := xs[len(xs)-1].Bounds.Bounds()
res := SeriesHeader{
- Bounds: NewBounds(fromFp, throughFP),
+ NumSeries: len(xs),
+ Bounds: NewBounds(fromFp, throughFP),
}
for i, x := range xs {
diff --git a/pkg/storage/bloom/v1/index_builder.go b/pkg/storage/bloom/v1/index_builder.go
index 067a79ad03f4e..9703177f1200b 100644
--- a/pkg/storage/bloom/v1/index_builder.go
+++ b/pkg/storage/bloom/v1/index_builder.go
@@ -35,6 +35,10 @@ func NewIndexBuilder(opts BlockOptions, writer io.WriteCloser) *IndexBuilder {
}
}
+func (b *IndexBuilder) UnflushedSize() int {
+ return b.scratch.Len() + b.page.UnflushedSize()
+}
+
func (b *IndexBuilder) WriteOpts() error {
b.scratch.Reset()
b.opts.Encode(b.scratch)
diff --git a/pkg/storage/bloom/v1/index_querier.go b/pkg/storage/bloom/v1/index_querier.go
index fe05f7bcddfda..cd1f548978169 100644
--- a/pkg/storage/bloom/v1/index_querier.go
+++ b/pkg/storage/bloom/v1/index_querier.go
@@ -5,15 +5,8 @@ import (
"github.com/efficientgo/core/errors"
"github.com/prometheus/common/model"
-
- iter "github.com/grafana/loki/v3/pkg/iter/v2"
)
-type SeriesIterator interface {
- iter.Iterator[*SeriesWithMeta]
- Reset()
-}
-
type LazySeriesIter struct {
b *Block
diff --git a/pkg/storage/bloom/v1/merge.go b/pkg/storage/bloom/v1/merge.go
index 0e94d0d506408..53348dcf333cd 100644
--- a/pkg/storage/bloom/v1/merge.go
+++ b/pkg/storage/bloom/v1/merge.go
@@ -69,11 +69,6 @@ func (mbq *HeapIterator[T]) At() T {
return mbq.cache
}
-func (mbq *HeapIterator[T]) push(x iter.PeekIterator[T]) {
- mbq.itrs = append(mbq.itrs, x)
- mbq.up(mbq.Len() - 1)
-}
-
func (mbq *HeapIterator[T]) pop() (T, bool) {
for {
if mbq.Len() == 0 {
diff --git a/pkg/storage/bloom/v1/merge_test.go b/pkg/storage/bloom/v1/merge_test.go
index f57c629d75429..9256cdf9b8252 100644
--- a/pkg/storage/bloom/v1/merge_test.go
+++ b/pkg/storage/bloom/v1/merge_test.go
@@ -21,7 +21,7 @@ func TestMergeBlockQuerier_NonOverlapping(t *testing.T) {
for j := 0; j < numSeries/numQueriers; j++ {
ptrs = append(ptrs, &data[i*numSeries/numQueriers+j])
}
- queriers = append(queriers, v2.NewPeekIter[*SeriesWithBlooms](v2.NewSliceIter[*SeriesWithBlooms](ptrs)))
+ queriers = append(queriers, v2.NewPeekIter(v2.NewSliceIter(ptrs)))
}
mbq := NewHeapIterForSeriesWithBloom(queriers...)
@@ -46,11 +46,7 @@ func TestMergeBlockQuerier_Duplicate(t *testing.T) {
for i := 0; i < numQueriers; i++ {
queriers = append(
queriers,
- v2.NewPeekIter[*SeriesWithBlooms](
- v2.NewSliceIter[*SeriesWithBlooms](
- PointerSlice[SeriesWithBlooms](data),
- ),
- ),
+ v2.NewPeekIter(v2.NewSliceIter(PointerSlice(data))),
)
}
@@ -79,7 +75,7 @@ func TestMergeBlockQuerier_Overlapping(t *testing.T) {
slices[i%numQueriers] = append(slices[i%numQueriers], &data[i])
}
for i := 0; i < numQueriers; i++ {
- queriers = append(queriers, v2.NewPeekIter[*SeriesWithBlooms](v2.NewSliceIter[*SeriesWithBlooms](slices[i])))
+ queriers = append(queriers, v2.NewPeekIter(v2.NewSliceIter(slices[i])))
}
mbq := NewHeapIterForSeriesWithBloom(queriers...)
diff --git a/pkg/storage/bloom/v1/metrics.go b/pkg/storage/bloom/v1/metrics.go
index e2ce99a4702d1..0ad86848df1b1 100644
--- a/pkg/storage/bloom/v1/metrics.go
+++ b/pkg/storage/bloom/v1/metrics.go
@@ -56,6 +56,7 @@ const (
recorderFound = "found"
recorderSkipped = "skipped"
recorderMissed = "missed"
+ recorderEmpty = "empty"
recorderFiltered = "filtered"
)
diff --git a/pkg/storage/bloom/v1/reader.go b/pkg/storage/bloom/v1/reader.go
index d589aa19c4927..245ead867ff1d 100644
--- a/pkg/storage/bloom/v1/reader.go
+++ b/pkg/storage/bloom/v1/reader.go
@@ -61,7 +61,7 @@ func (r *ByteReader) TarEntries() (iter.Iterator[TarEntry], error) {
},
}
- return iter.NewSliceIter[TarEntry](entries), err
+ return iter.NewSliceIter(entries), err
}
func (r *ByteReader) Cleanup() error {
@@ -162,7 +162,7 @@ func (r *DirectoryBlockReader) TarEntries() (iter.Iterator[TarEntry], error) {
},
}
- return iter.NewSliceIter[TarEntry](entries), nil
+ return iter.NewSliceIter(entries), nil
}
func (r *DirectoryBlockReader) Cleanup() error {
diff --git a/pkg/storage/bloom/v1/schema.go b/pkg/storage/bloom/v1/schema.go
index 7c0271434b2b4..0a7e2881a3a3f 100644
--- a/pkg/storage/bloom/v1/schema.go
+++ b/pkg/storage/bloom/v1/schema.go
@@ -33,7 +33,6 @@ const (
var (
SupportedVersions = []Version{V3}
- ErrInvalidSchemaVersion = errors.New("invalid schema version")
ErrUnsupportedSchemaVersion = errors.New("unsupported schema version")
)
@@ -42,10 +41,10 @@ type Schema struct {
encoding compression.Codec
}
-func NewSchema() Schema {
+func NewSchema(version Version, encoding compression.Codec) Schema {
return Schema{
- version: CurrentSchemaVersion,
- encoding: compression.None,
+ version: version,
+ encoding: encoding,
}
}
@@ -80,7 +79,6 @@ func (s *Schema) Encode(enc *encoding.Encbuf) {
enc.PutBE32(magicNumber)
enc.PutByte(byte(s.version))
enc.PutByte(byte(s.encoding))
-
}
func (s *Schema) DecodeFrom(r io.ReadSeeker) error {
diff --git a/pkg/storage/bloom/v1/test_util.go b/pkg/storage/bloom/v1/test_util.go
index 4d036ba4809df..f040ab4297282 100644
--- a/pkg/storage/bloom/v1/test_util.go
+++ b/pkg/storage/bloom/v1/test_util.go
@@ -28,17 +28,14 @@ func MakeBlock(t testing.TB, nth int, fromFp, throughFp model.Fingerprint, fromT
builder, err := NewBlockBuilder(
BlockOptions{
- Schema: Schema{
- version: CurrentSchemaVersion,
- encoding: compression.Snappy,
- },
+ Schema: NewSchema(CurrentSchemaVersion, compression.Snappy),
SeriesPageSize: 100,
BloomPageSize: 10 << 10,
},
writer,
)
require.Nil(t, err)
- itr := iter.NewSliceIter[SeriesWithBlooms](data)
+ itr := iter.NewSliceIter(data)
_, err = builder.BuildFrom(itr)
require.Nil(t, err)
block := NewBlock(reader, NewMetrics(nil))
@@ -135,9 +132,11 @@ func CompareIterators[A, B any](
a iter.Iterator[A],
b iter.Iterator[B],
) {
+ var i int
for a.Next() {
- require.True(t, b.Next())
+ require.Truef(t, b.Next(), "'a' has %dth element but 'b' does not'", i)
f(t, a.At(), b.At())
+ i++
}
require.False(t, b.Next())
require.NoError(t, a.Err())
diff --git a/pkg/storage/bloom/v1/util.go b/pkg/storage/bloom/v1/util.go
index 6745ccaec7c61..31a51d8d2e862 100644
--- a/pkg/storage/bloom/v1/util.go
+++ b/pkg/storage/bloom/v1/util.go
@@ -26,10 +26,6 @@ var (
SeriesPagePool = mempool.NewBytePoolAllocator(1<<10, 128<<10, 2)
)
-func newCRC32() hash.Hash32 {
- return crc32.New(castagnoliTable)
-}
-
type ChecksumPool struct {
sync.Pool
}
diff --git a/pkg/storage/bloom/v1/versioned_builder.go b/pkg/storage/bloom/v1/versioned_builder.go
index 4f1881c441e70..960c6bcdde928 100644
--- a/pkg/storage/bloom/v1/versioned_builder.go
+++ b/pkg/storage/bloom/v1/versioned_builder.go
@@ -81,7 +81,7 @@ func (b *V3Builder) BuildFrom(itr iter.Iterator[SeriesWithBlooms]) (uint32, erro
return 0, errors.Wrap(err, "iterating blooms")
}
- blockFull, err := b.AddSeries(at.Series.Series, offsets, at.Series.Meta.Fields)
+ blockFull, err := b.AddSeries(at.Series.Series, offsets, at.Series.Fields)
if err != nil {
return 0, errors.Wrapf(err, "writing series")
}
@@ -125,10 +125,35 @@ func (b *V3Builder) AddSeries(series Series, offsets []BloomOffset, fields Set[F
return false, errors.Wrapf(err, "writing index for series %v", series.Fingerprint)
}
- full, _, err := b.writer.Full(b.opts.BlockSize)
+ full, err := b.full()
if err != nil {
return false, errors.Wrap(err, "checking if block is full")
}
return full, nil
}
+
+func (b *V3Builder) full() (bool, error) {
+ if b.opts.BlockSize == 0 {
+ // Unlimited block size
+ return false, nil
+ }
+
+ full, writtenSize, err := b.writer.Full(b.opts.BlockSize)
+ if err != nil {
+ return false, errors.Wrap(err, "checking if block writer is full")
+ }
+ if full {
+ return true, nil
+ }
+
+ // Even if the block writer is not full, we may have unflushed data in the bloom builders.
+ // Check if by flushing these, we would exceed the block size.
+ unflushedIndexSize := b.index.UnflushedSize()
+ unflushedBloomSize := b.blooms.UnflushedSize()
+ if uint64(writtenSize+unflushedIndexSize+unflushedBloomSize) > b.opts.BlockSize {
+ return true, nil
+ }
+
+ return false, nil
+}
diff --git a/pkg/storage/bloom/v1/versioned_builder_test.go b/pkg/storage/bloom/v1/versioned_builder_test.go
index 9154daf77fc77..6d2cc621be459 100644
--- a/pkg/storage/bloom/v1/versioned_builder_test.go
+++ b/pkg/storage/bloom/v1/versioned_builder_test.go
@@ -4,6 +4,7 @@ import (
"bytes"
"testing"
+ "github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
"github.com/grafana/loki/v3/pkg/compression"
@@ -16,11 +17,8 @@ import (
// characterized by small page sizes
func smallBlockOpts(v Version, enc compression.Codec) BlockOptions {
return BlockOptions{
- Schema: Schema{
- version: v,
- encoding: enc,
- },
- SeriesPageSize: 100,
+ Schema: NewSchema(v, enc),
+ SeriesPageSize: 4 << 10,
BloomPageSize: 2 << 10,
BlockSize: 0, // unlimited
}
@@ -55,11 +53,11 @@ func TestV3Roundtrip(t *testing.T) {
block := NewBlock(reader, NewMetrics(nil))
querier := NewBlockQuerier(block, &mempool.SimpleHeapAllocator{}, DefaultMaxPageSize).Iter()
- CompareIterators[SeriesWithBlooms, *SeriesWithBlooms](
+ CompareIterators(
t,
func(t *testing.T, a SeriesWithBlooms, b *SeriesWithBlooms) {
- require.Equal(t, a.Series.Series.Fingerprint, b.Series.Series.Fingerprint)
- require.ElementsMatch(t, a.Series.Series.Chunks, b.Series.Series.Chunks)
+ require.Equal(t, a.Series.Fingerprint, b.Series.Fingerprint)
+ require.ElementsMatch(t, a.Series.Chunks, b.Series.Chunks)
bloomsA, err := v2.Collect(a.Blooms)
require.NoError(t, err)
bloomsB, err := v2.Collect(b.Blooms)
@@ -81,3 +79,103 @@ func TestV3Roundtrip(t *testing.T) {
querier,
)
}
+
+func seriesWithBlooms(nSeries int, fromFp, throughFp model.Fingerprint) []SeriesWithBlooms {
+ series, _ := MkBasicSeriesWithBlooms(nSeries, fromFp, throughFp, 0, 10000)
+ return series
+}
+
+func seriesWithoutBlooms(nSeries int, fromFp, throughFp model.Fingerprint) []SeriesWithBlooms {
+ series := seriesWithBlooms(nSeries, fromFp, throughFp)
+
+ // remove blooms from series
+ for i := range series {
+ series[i].Blooms = v2.NewEmptyIter[*Bloom]()
+ }
+
+ return series
+}
+func TestFullBlock(t *testing.T) {
+ opts := smallBlockOpts(V3, compression.None)
+ minBlockSize := opts.SeriesPageSize // 1 index page, 4KB
+ const maxEmptySeriesPerBlock = 47
+ for _, tc := range []struct {
+ name string
+ maxBlockSize uint64
+ series []SeriesWithBlooms
+ expected []SeriesWithBlooms
+ }{
+ {
+ name: "only series without blooms",
+ maxBlockSize: minBlockSize,
+ // +1 so we test adding the last series that fills the block
+ series: seriesWithoutBlooms(maxEmptySeriesPerBlock+1, 0, 0xffff),
+ expected: seriesWithoutBlooms(maxEmptySeriesPerBlock+1, 0, 0xffff),
+ },
+ {
+ name: "series without blooms and one with blooms",
+ maxBlockSize: minBlockSize,
+ series: append(
+ seriesWithoutBlooms(maxEmptySeriesPerBlock, 0, 0x7fff),
+ seriesWithBlooms(50, 0x8000, 0xffff)...,
+ ),
+ expected: append(
+ seriesWithoutBlooms(maxEmptySeriesPerBlock, 0, 0x7fff),
+ seriesWithBlooms(1, 0x8000, 0x8001)...,
+ ),
+ },
+ {
+ name: "only one series with bloom",
+ maxBlockSize: minBlockSize,
+ series: seriesWithBlooms(10, 0, 0xffff),
+ expected: seriesWithBlooms(1, 0, 1),
+ },
+ {
+ name: "one huge series with bloom and then series without",
+ maxBlockSize: minBlockSize,
+ series: append(
+ seriesWithBlooms(1, 0, 1),
+ seriesWithoutBlooms(100, 1, 0xffff)...,
+ ),
+ expected: seriesWithBlooms(1, 0, 1),
+ },
+ {
+ name: "big block",
+ maxBlockSize: 1 << 20, // 1MB
+ series: seriesWithBlooms(100, 0, 0xffff),
+ expected: seriesWithBlooms(100, 0, 0xffff),
+ },
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ indexBuf := bytes.NewBuffer(nil)
+ bloomsBuf := bytes.NewBuffer(nil)
+ writer := NewMemoryBlockWriter(indexBuf, bloomsBuf)
+ reader := NewByteReader(indexBuf, bloomsBuf)
+ opts.BlockSize = tc.maxBlockSize
+
+ b, err := NewBlockBuilderV3(opts, writer)
+ require.NoError(t, err)
+
+ _, err = b.BuildFrom(v2.NewSliceIter(tc.series))
+ require.NoError(t, err)
+
+ block := NewBlock(reader, NewMetrics(nil))
+ querier := NewBlockQuerier(block, &mempool.SimpleHeapAllocator{}, DefaultMaxPageSize).Iter()
+
+ CompareIterators(
+ t,
+ func(t *testing.T, a SeriesWithBlooms, b *SeriesWithBlooms) {
+ require.Equal(t, a.Series.Fingerprint, b.Series.Fingerprint)
+ require.ElementsMatch(t, a.Series.Chunks, b.Series.Chunks)
+ bloomsA, err := v2.Collect(a.Blooms)
+ require.NoError(t, err)
+ bloomsB, err := v2.Collect(b.Blooms)
+ require.NoError(t, err)
+ require.Equal(t, len(bloomsB), len(bloomsA))
+ },
+ v2.NewSliceIter(tc.expected),
+ querier,
+ )
+ })
+ }
+}
diff --git a/pkg/storage/bucket/azure/bucket_client.go b/pkg/storage/bucket/azure/bucket_client.go
index 7cc9a91767402..0cd5e6b3bacff 100644
--- a/pkg/storage/bucket/azure/bucket_client.go
+++ b/pkg/storage/bucket/azure/bucket_client.go
@@ -1,39 +1,37 @@
package azure
import (
+ "net/http"
+
"github.com/go-kit/log"
- "github.com/prometheus/common/model"
"github.com/thanos-io/objstore"
"github.com/thanos-io/objstore/providers/azure"
- yaml "gopkg.in/yaml.v2"
)
func NewBucketClient(cfg Config, name string, logger log.Logger) (objstore.Bucket, error) {
- bucketConfig := azure.Config{
- StorageAccountName: cfg.StorageAccountName,
- StorageAccountKey: cfg.StorageAccountKey.String(),
- StorageConnectionString: cfg.ConnectionString.String(),
- ContainerName: cfg.ContainerName,
- Endpoint: cfg.EndpointSuffix,
- MaxRetries: cfg.MaxRetries,
- HTTPConfig: azure.HTTPConfig{
- IdleConnTimeout: model.Duration(cfg.IdleConnTimeout),
- ResponseHeaderTimeout: model.Duration(cfg.ResponseHeaderTimeout),
- InsecureSkipVerify: cfg.InsecureSkipVerify,
- TLSHandshakeTimeout: model.Duration(cfg.TLSHandshakeTimeout),
- ExpectContinueTimeout: model.Duration(cfg.ExpectContinueTimeout),
- MaxIdleConns: cfg.MaxIdleConns,
- MaxIdleConnsPerHost: cfg.MaxIdleConnsPerHost,
- MaxConnsPerHost: cfg.MaxConnsPerHost,
- },
+ return newBucketClient(cfg, name, logger, azure.NewBucketWithConfig)
+}
+
+func newBucketClient(cfg Config, name string, logger log.Logger, factory func(log.Logger, azure.Config, string, http.RoundTripper) (*azure.Bucket, error)) (objstore.Bucket, error) {
+ // Start with default config to make sure that all parameters are set to sensible values, especially
+ // HTTP Config field.
+ bucketConfig := azure.DefaultConfig
+ bucketConfig.StorageAccountName = cfg.StorageAccountName
+ bucketConfig.StorageAccountKey = cfg.StorageAccountKey.String()
+ bucketConfig.StorageConnectionString = cfg.StorageConnectionString.String()
+ bucketConfig.ContainerName = cfg.ContainerName
+ bucketConfig.MaxRetries = cfg.MaxRetries
+ bucketConfig.UserAssignedID = cfg.UserAssignedID
+
+ if cfg.Endpoint != "" {
+ // azure.DefaultConfig has the default Endpoint, overwrite it only if a different one was explicitly provided.
+ bucketConfig.Endpoint = cfg.Endpoint
}
- // Thanos currently doesn't support passing the config as is, but expects a YAML,
- // so we're going to serialize it.
- serialized, err := yaml.Marshal(bucketConfig)
- if err != nil {
- return nil, err
+ var rt http.RoundTripper
+ if cfg.Transport != nil {
+ rt = cfg.Transport
}
- return azure.NewBucket(logger, serialized, name)
+ return factory(logger, bucketConfig, name, rt)
}
diff --git a/pkg/storage/bucket/azure/config.go b/pkg/storage/bucket/azure/config.go
index 928503190d931..ac8037b6b7819 100644
--- a/pkg/storage/bucket/azure/config.go
+++ b/pkg/storage/bucket/azure/config.go
@@ -2,22 +2,23 @@ package azure
import (
"flag"
+ "net/http"
"github.com/grafana/dskit/flagext"
-
- "github.com/grafana/loki/v3/pkg/storage/bucket/http"
)
// Config holds the config options for an Azure backend
type Config struct {
- StorageAccountName string `yaml:"account_name"`
- StorageAccountKey flagext.Secret `yaml:"account_key"`
- ConnectionString flagext.Secret `yaml:"connection_string"`
- ContainerName string `yaml:"container_name"`
- EndpointSuffix string `yaml:"endpoint_suffix"`
- MaxRetries int `yaml:"max_retries"`
+ StorageAccountName string `yaml:"account_name"`
+ StorageAccountKey flagext.Secret `yaml:"account_key"`
+ StorageConnectionString flagext.Secret `yaml:"connection_string"`
+ ContainerName string `yaml:"container_name"`
+ Endpoint string `yaml:"endpoint_suffix"`
+ MaxRetries int `yaml:"max_retries"`
+ UserAssignedID string `yaml:"user_assigned_id"`
- http.Config `yaml:"http"`
+ // Allow upstream callers to inject a round tripper
+ Transport http.RoundTripper `yaml:"-"`
}
// RegisterFlags registers the flags for Azure storage
@@ -28,10 +29,10 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
// RegisterFlagsWithPrefix registers the flags for Azure storage
func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
f.StringVar(&cfg.StorageAccountName, prefix+"azure.account-name", "", "Azure storage account name")
- f.Var(&cfg.StorageAccountKey, prefix+"azure.account-key", "Azure storage account key")
- f.Var(&cfg.ConnectionString, prefix+"azure.connection-string", "If `connection-string` is set, the values of `account-name` and `endpoint-suffix` values will not be used. Use this method over `account-key` if you need to authenticate via a SAS token. Or if you use the Azurite emulator.")
- f.StringVar(&cfg.ContainerName, prefix+"azure.container-name", "loki", "Azure storage container name")
- f.StringVar(&cfg.EndpointSuffix, prefix+"azure.endpoint-suffix", "", "Azure storage endpoint suffix without schema. The account name will be prefixed to this value to create the FQDN")
+ f.Var(&cfg.StorageAccountKey, prefix+"azure.account-key", "Azure storage account key. If unset, Azure managed identities will be used for authentication instead.")
+ f.Var(&cfg.StorageConnectionString, prefix+"azure.connection-string", "If `connection-string` is set, the value of `endpoint-suffix` will not be used. Use this method over `account-key` if you need to authenticate via a SAS token. Or if you use the Azurite emulator.")
+ f.StringVar(&cfg.ContainerName, prefix+"azure.container-name", "", "Azure storage container name")
+ f.StringVar(&cfg.Endpoint, prefix+"azure.endpoint-suffix", "", "Azure storage endpoint suffix without schema. The account name will be prefixed to this value to create the FQDN. If set to empty string, default endpoint suffix is used.")
f.IntVar(&cfg.MaxRetries, prefix+"azure.max-retries", 20, "Number of retries for recoverable errors")
- cfg.Config.RegisterFlagsWithPrefix(prefix+"azure.", f)
+ f.StringVar(&cfg.UserAssignedID, prefix+"azure.user-assigned-id", "", "User assigned managed identity. If empty, then System assigned identity is used.")
}
diff --git a/pkg/storage/bucket/azure/config_test.go b/pkg/storage/bucket/azure/config_test.go
deleted file mode 100644
index 82357faa147e4..0000000000000
--- a/pkg/storage/bucket/azure/config_test.go
+++ /dev/null
@@ -1,98 +0,0 @@
-package azure
-
-import (
- "testing"
- "time"
-
- "github.com/grafana/dskit/flagext"
- "github.com/stretchr/testify/require"
- yaml "gopkg.in/yaml.v2"
-
- "github.com/grafana/loki/v3/pkg/storage/bucket/http"
-)
-
-// defaultConfig should match the default flag values defined in RegisterFlagsWithPrefix.
-var defaultConfig = Config{
- ContainerName: "loki",
- MaxRetries: 20,
- Config: http.Config{
- IdleConnTimeout: 90 * time.Second,
- ResponseHeaderTimeout: 2 * time.Minute,
- InsecureSkipVerify: false,
- TLSHandshakeTimeout: 10 * time.Second,
- ExpectContinueTimeout: 1 * time.Second,
- MaxIdleConns: 100,
- MaxIdleConnsPerHost: 100,
- MaxConnsPerHost: 0,
- },
-}
-
-func TestConfig(t *testing.T) {
- t.Parallel()
-
- tests := map[string]struct {
- config string
- expectedConfig Config
- expectedErr error
- }{
- "default config": {
- config: "",
- expectedConfig: defaultConfig,
- expectedErr: nil,
- },
- "custom config": {
- config: `
-account_name: test-account-name
-account_key: test-account-key
-connection_string: test-connection-string
-container_name: test-container-name
-endpoint_suffix: test-endpoint-suffix
-max_retries: 1
-http:
- idle_conn_timeout: 2s
- response_header_timeout: 3s
- insecure_skip_verify: true
- tls_handshake_timeout: 4s
- expect_continue_timeout: 5s
- max_idle_connections: 6
- max_idle_connections_per_host: 7
- max_connections_per_host: 8
-`,
- expectedConfig: Config{
- StorageAccountName: "test-account-name",
- StorageAccountKey: flagext.SecretWithValue("test-account-key"),
- ConnectionString: flagext.SecretWithValue("test-connection-string"),
- ContainerName: "test-container-name",
- EndpointSuffix: "test-endpoint-suffix",
- MaxRetries: 1,
- Config: http.Config{
- IdleConnTimeout: 2 * time.Second,
- ResponseHeaderTimeout: 3 * time.Second,
- InsecureSkipVerify: true,
- TLSHandshakeTimeout: 4 * time.Second,
- ExpectContinueTimeout: 5 * time.Second,
- MaxIdleConns: 6,
- MaxIdleConnsPerHost: 7,
- MaxConnsPerHost: 8,
- },
- },
- expectedErr: nil,
- },
- "invalid type": {
- config: `max_retries: foo`,
- expectedConfig: defaultConfig,
- expectedErr: &yaml.TypeError{Errors: []string{"line 1: cannot unmarshal !!str `foo` into int"}},
- },
- }
-
- for testName, testData := range tests {
- t.Run(testName, func(t *testing.T) {
- cfg := Config{}
- flagext.DefaultValues(&cfg)
-
- err := yaml.Unmarshal([]byte(testData.config), &cfg)
- require.Equal(t, testData.expectedErr, err)
- require.Equal(t, testData.expectedConfig, cfg)
- })
- }
-}
diff --git a/pkg/storage/bucket/client.go b/pkg/storage/bucket/client.go
index 4f81ce7b2934e..06f8d128f850d 100644
--- a/pkg/storage/bucket/client.go
+++ b/pkg/storage/bucket/client.go
@@ -4,20 +4,18 @@ import (
"context"
"errors"
"flag"
- "fmt"
- "strings"
+ "regexp"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/thanos-io/objstore"
- opentracing "github.com/thanos-io/objstore/tracing/opentracing"
+ objstoretracing "github.com/thanos-io/objstore/tracing/opentracing"
"github.com/grafana/loki/v3/pkg/storage/bucket/azure"
"github.com/grafana/loki/v3/pkg/storage/bucket/filesystem"
"github.com/grafana/loki/v3/pkg/storage/bucket/gcs"
"github.com/grafana/loki/v3/pkg/storage/bucket/s3"
"github.com/grafana/loki/v3/pkg/storage/bucket/swift"
- "github.com/grafana/loki/v3/pkg/util"
)
const (
@@ -35,17 +33,22 @@ const (
// Filesystem is the value for the filesystem storage backend.
Filesystem = "filesystem"
+
+ // validPrefixCharactersRegex allows only alphanumeric characters to prevent subtle bugs and simplify validation
+ validPrefixCharactersRegex = `^[\da-zA-Z]+$`
)
var (
SupportedBackends = []string{S3, GCS, Azure, Swift, Filesystem}
- ErrUnsupportedStorageBackend = errors.New("unsupported storage backend")
+ ErrUnsupportedStorageBackend = errors.New("unsupported storage backend")
+ ErrInvalidCharactersInStoragePrefix = errors.New("storage prefix contains invalid characters, it may only contain digits and English alphabet letters")
+
+ metrics = objstore.BucketMetrics(prometheus.WrapRegistererWithPrefix("loki_", prometheus.DefaultRegisterer), "")
)
-// Config holds configuration for accessing long-term storage.
-type Config struct {
- Backend string `yaml:"backend"`
+// StorageBackendConfig holds configuration for accessing long-term storage.
+type StorageBackendConfig struct {
// Backends
S3 s3.Config `yaml:"s3"`
GCS gcs.Config `yaml:"gcs"`
@@ -53,52 +56,85 @@ type Config struct {
Swift swift.Config `yaml:"swift"`
Filesystem filesystem.Config `yaml:"filesystem"`
- // Not used internally, meant to allow callers to wrap Buckets
- // created using this config
- Middlewares []func(objstore.Bucket) (objstore.Bucket, error) `yaml:"-"`
-
// Used to inject additional backends into the config. Allows for this config to
// be embedded in multiple contexts and support non-object storage based backends.
ExtraBackends []string `yaml:"-"`
}
-// Returns the supportedBackends for the package and any custom backends injected into the config.
-func (cfg *Config) supportedBackends() []string {
+// Returns the SupportedBackends for the package and any custom backends injected into the config.
+func (cfg *StorageBackendConfig) SupportedBackends() []string {
return append(SupportedBackends, cfg.ExtraBackends...)
}
// RegisterFlags registers the backend storage config.
-func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
+func (cfg *StorageBackendConfig) RegisterFlags(f *flag.FlagSet) {
cfg.RegisterFlagsWithPrefix("", f)
}
-func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
- cfg.S3.RegisterFlagsWithPrefix(prefix, f)
+func (cfg *StorageBackendConfig) RegisterFlagsWithPrefixAndDefaultDirectory(prefix, dir string, f *flag.FlagSet) {
cfg.GCS.RegisterFlagsWithPrefix(prefix, f)
+ cfg.S3.RegisterFlagsWithPrefix(prefix, f)
cfg.Azure.RegisterFlagsWithPrefix(prefix, f)
cfg.Swift.RegisterFlagsWithPrefix(prefix, f)
- cfg.Filesystem.RegisterFlagsWithPrefix(prefix, f)
+ cfg.Filesystem.RegisterFlagsWithPrefixAndDefaultDirectory(prefix, dir, f)
+}
- f.StringVar(&cfg.Backend, prefix+"backend", S3, fmt.Sprintf("Backend storage to use. Supported backends are: %s.", strings.Join(cfg.supportedBackends(), ", ")))
+func (cfg *StorageBackendConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
+ cfg.RegisterFlagsWithPrefixAndDefaultDirectory(prefix, "", f)
}
-func (cfg *Config) Validate() error {
- if !util.StringsContain(cfg.supportedBackends(), cfg.Backend) {
- return ErrUnsupportedStorageBackend
+func (cfg *StorageBackendConfig) Validate() error {
+ if err := cfg.S3.Validate(); err != nil {
+ return err
}
- if cfg.Backend == S3 {
- if err := cfg.S3.Validate(); err != nil {
- return err
+ return nil
+}
+
+// Config holds configuration for accessing long-term storage.
+type Config struct {
+ StorageBackendConfig `yaml:",inline"`
+ StoragePrefix string `yaml:"storage_prefix"`
+
+ // Not used internally, meant to allow callers to wrap Buckets
+ // created using this config
+ Middlewares []func(objstore.InstrumentedBucket) (objstore.InstrumentedBucket, error) `yaml:"-"`
+}
+
+// RegisterFlags registers the backend storage config.
+func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
+ cfg.RegisterFlagsWithPrefix("", f)
+}
+
+func (cfg *Config) RegisterFlagsWithPrefixAndDefaultDirectory(prefix, dir string, f *flag.FlagSet) {
+ cfg.StorageBackendConfig.RegisterFlagsWithPrefixAndDefaultDirectory(prefix, dir, f)
+ f.StringVar(&cfg.StoragePrefix, prefix+"storage-prefix", "", "Prefix for all objects stored in the backend storage. For simplicity, it may only contain digits and English alphabet letters.")
+}
+
+func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
+ cfg.RegisterFlagsWithPrefixAndDefaultDirectory(prefix, "", f)
+}
+
+func (cfg *Config) Validate() error {
+ if cfg.StoragePrefix != "" {
+ acceptablePrefixCharacters := regexp.MustCompile(validPrefixCharactersRegex)
+ if !acceptablePrefixCharacters.MatchString(cfg.StoragePrefix) {
+ return ErrInvalidCharactersInStoragePrefix
}
}
- return nil
+ return cfg.StorageBackendConfig.Validate()
}
// NewClient creates a new bucket client based on the configured backend
-func NewClient(ctx context.Context, cfg Config, name string, logger log.Logger, reg prometheus.Registerer) (client objstore.Bucket, err error) {
- switch cfg.Backend {
+func NewClient(ctx context.Context, backend string, cfg Config, name string, logger log.Logger) (objstore.InstrumentedBucket, error) {
+ var (
+ client objstore.Bucket
+ err error
+ )
+
+ // TODO: add support for other backends that loki already supports
+ switch backend {
case S3:
client, err = s3.NewBucketClient(cfg.S3, name, logger)
case GCS:
@@ -117,26 +153,19 @@ func NewClient(ctx context.Context, cfg Config, name string, logger log.Logger,
return nil, err
}
- client = opentracing.WrapWithTraces(bucketWithMetrics(client, name, reg))
+ if cfg.StoragePrefix != "" {
+ client = NewPrefixedBucketClient(client, cfg.StoragePrefix)
+ }
+
+ instrumentedClient := objstoretracing.WrapWithTraces(objstore.WrapWith(client, metrics))
// Wrap the client with any provided middleware
for _, wrap := range cfg.Middlewares {
- client, err = wrap(client)
+ instrumentedClient, err = wrap(instrumentedClient)
if err != nil {
return nil, err
}
}
- return client, nil
-}
-
-func bucketWithMetrics(bucketClient objstore.Bucket, name string, reg prometheus.Registerer) objstore.Bucket {
- if reg == nil {
- return bucketClient
- }
-
- return objstore.WrapWithMetrics(
- bucketClient,
- prometheus.WrapRegistererWith(prometheus.Labels{"component": name}, reg),
- "")
+ return instrumentedClient, nil
}
diff --git a/pkg/storage/bucket/client_test.go b/pkg/storage/bucket/client_test.go
index fb7acec91089a..a4bdb8f6e251c 100644
--- a/pkg/storage/bucket/client_test.go
+++ b/pkg/storage/bucket/client_test.go
@@ -14,7 +14,6 @@ import (
const (
configWithS3Backend = `
-backend: s3
s3:
endpoint: localhost
bucket_name: test
@@ -24,7 +23,6 @@ s3:
`
configWithGCSBackend = `
-backend: gcs
gcs:
bucket_name: test
service_account: |-
@@ -40,10 +38,6 @@ gcs:
"auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
"client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/test%40test.com"
}
-`
-
- configWithUnknownBackend = `
-backend: unknown
`
)
@@ -51,19 +45,23 @@ func TestNewClient(t *testing.T) {
t.Parallel()
tests := map[string]struct {
+ backend string
config string
expectedErr error
}{
"should create an S3 bucket": {
+ backend: "s3",
config: configWithS3Backend,
expectedErr: nil,
},
"should create a GCS bucket": {
+ backend: "gcs",
config: configWithGCSBackend,
expectedErr: nil,
},
"should return error on unknown backend": {
- config: configWithUnknownBackend,
+ backend: "unknown",
+ config: "",
expectedErr: ErrUnsupportedStorageBackend,
},
}
@@ -78,7 +76,7 @@ func TestNewClient(t *testing.T) {
require.NoError(t, err)
// Instance a new bucket client from the config
- bucketClient, err := NewClient(context.Background(), cfg, "test", util_log.Logger, nil)
+ bucketClient, err := NewClient(context.Background(), testData.backend, cfg, "test", util_log.Logger)
require.Equal(t, testData.expectedErr, err)
if testData.expectedErr == nil {
diff --git a/pkg/storage/bucket/filesystem/config.go b/pkg/storage/bucket/filesystem/config.go
index 923923a032906..873a2eb1ba289 100644
--- a/pkg/storage/bucket/filesystem/config.go
+++ b/pkg/storage/bucket/filesystem/config.go
@@ -12,7 +12,13 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
cfg.RegisterFlagsWithPrefix("", f)
}
+// RegisterFlagsWithPrefixAndDefaultDirectory registers the flags for filesystem
+// storage with the provided prefix and sets the default directory to dir.
+func (cfg *Config) RegisterFlagsWithPrefixAndDefaultDirectory(prefix, dir string, f *flag.FlagSet) {
+ f.StringVar(&cfg.Directory, prefix+"filesystem.dir", dir, "Local filesystem storage directory.")
+}
+
// RegisterFlagsWithPrefix registers the flags for filesystem storage with the provided prefix
func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
- f.StringVar(&cfg.Directory, prefix+"filesystem.dir", "", "Local filesystem storage directory.")
+ cfg.RegisterFlagsWithPrefixAndDefaultDirectory(prefix, "", f)
}
diff --git a/pkg/storage/bucket/gcs/bucket_client.go b/pkg/storage/bucket/gcs/bucket_client.go
index 39551ec49032b..b5a8ce541e1d7 100644
--- a/pkg/storage/bucket/gcs/bucket_client.go
+++ b/pkg/storage/bucket/gcs/bucket_client.go
@@ -6,22 +6,16 @@ import (
"github.com/go-kit/log"
"github.com/thanos-io/objstore"
"github.com/thanos-io/objstore/providers/gcs"
- yaml "gopkg.in/yaml.v2"
)
// NewBucketClient creates a new GCS bucket client
func NewBucketClient(ctx context.Context, cfg Config, name string, logger log.Logger) (objstore.Bucket, error) {
- bucketConfig := gcs.Config{
- Bucket: cfg.BucketName,
- ServiceAccount: cfg.ServiceAccount.String(),
- }
+ // start with default http configs
+ bucketConfig := gcs.DefaultConfig
+ bucketConfig.Bucket = cfg.BucketName
+ bucketConfig.ServiceAccount = cfg.ServiceAccount.String()
+ bucketConfig.ChunkSizeBytes = cfg.ChunkBufferSize
+ bucketConfig.HTTPConfig.Transport = cfg.Transport
- // Thanos currently doesn't support passing the config as is, but expects a YAML,
- // so we're going to serialize it.
- serialized, err := yaml.Marshal(bucketConfig)
- if err != nil {
- return nil, err
- }
-
- return gcs.NewBucket(ctx, logger, serialized, name)
+ return gcs.NewBucketWithConfig(ctx, logger, bucketConfig, name, nil)
}
diff --git a/pkg/storage/bucket/gcs/config.go b/pkg/storage/bucket/gcs/config.go
index 1e212352281eb..a46c5030e4413 100644
--- a/pkg/storage/bucket/gcs/config.go
+++ b/pkg/storage/bucket/gcs/config.go
@@ -2,14 +2,19 @@ package gcs
import (
"flag"
+ "net/http"
"github.com/grafana/dskit/flagext"
)
// Config holds the config options for GCS backend
type Config struct {
- BucketName string `yaml:"bucket_name"`
- ServiceAccount flagext.Secret `yaml:"service_account"`
+ BucketName string `yaml:"bucket_name"`
+ ServiceAccount flagext.Secret `yaml:"service_account" doc:"description_method=GCSServiceAccountLongDescription"`
+ ChunkBufferSize int `yaml:"chunk_buffer_size"`
+
+ // Allow upstream callers to inject a round tripper
+ Transport http.RoundTripper `yaml:"-"`
}
// RegisterFlags registers the flags for GCS storage
@@ -20,5 +25,18 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
// RegisterFlagsWithPrefix registers the flags for GCS storage with the provided prefix
func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
f.StringVar(&cfg.BucketName, prefix+"gcs.bucket-name", "", "GCS bucket name")
- f.Var(&cfg.ServiceAccount, prefix+"gcs.service-account", "JSON representing either a Google Developers Console client_credentials.json file or a Google Developers service account key file. If empty, fallback to Google default logic.")
+ f.Var(&cfg.ServiceAccount, prefix+"gcs.service-account", cfg.GCSServiceAccountShortDescription())
+ f.IntVar(&cfg.ChunkBufferSize, prefix+"gcs.chunk-buffer-size", 0, "The maximum size of the buffer that GCS client for a single PUT request. 0 to disable buffering.")
+}
+
+func (cfg *Config) GCSServiceAccountShortDescription() string {
+ return "JSON either from a Google Developers Console client_credentials.json file, or a Google Developers service account key. Needs to be valid JSON, not a filesystem path."
+}
+
+func (cfg *Config) GCSServiceAccountLongDescription() string {
+ return cfg.GCSServiceAccountShortDescription() +
+ " If empty, fallback to Google default logic:" +
+ "\n1. A JSON file whose path is specified by the GOOGLE_APPLICATION_CREDENTIALS environment variable. For workload identity federation, refer to https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation on how to generate the JSON configuration file for on-prem/non-Google cloud platforms." +
+ "\n2. A JSON file in a location known to the gcloud command-line tool: $HOME/.config/gcloud/application_default_credentials.json." +
+ "\n3. On Google Compute Engine it fetches credentials from the metadata server."
}
diff --git a/pkg/storage/bucket/http/config.go b/pkg/storage/bucket/http/config.go
index 1c83e1f311bbb..509de0bf301f1 100644
--- a/pkg/storage/bucket/http/config.go
+++ b/pkg/storage/bucket/http/config.go
@@ -15,6 +15,7 @@ type Config struct {
MaxIdleConns int `yaml:"max_idle_connections"`
MaxIdleConnsPerHost int `yaml:"max_idle_connections_per_host"`
MaxConnsPerHost int `yaml:"max_connections_per_host"`
+ CAFile string `yaml:"ca_file"`
}
// RegisterFlags registers the flags for the storage HTTP client.
@@ -24,12 +25,13 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
// RegisterFlagsWithPrefix registers the flags for the storage HTTP client with the provided prefix.
func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
- f.DurationVar(&cfg.IdleConnTimeout, prefix+"http.idle-conn-timeout", 90*time.Second, "The time an idle connection will remain idle before closing.")
- f.DurationVar(&cfg.ResponseHeaderTimeout, prefix+"http.response-header-timeout", 2*time.Minute, "The amount of time the client will wait for a servers response headers.")
- f.BoolVar(&cfg.InsecureSkipVerify, prefix+"http.insecure-skip-verify", false, "If the client connects via HTTPS and this option is enabled, the client will accept any certificate and hostname.")
+ f.DurationVar(&cfg.IdleConnTimeout, prefix+"idle-conn-timeout", 90*time.Second, "The time an idle connection will remain idle before closing.")
+ f.DurationVar(&cfg.ResponseHeaderTimeout, prefix+"response-header-timeout", 2*time.Minute, "The amount of time the client will wait for a servers response headers.")
+ f.BoolVar(&cfg.InsecureSkipVerify, prefix+"insecure-skip-verify", false, "If the client connects via HTTPS and this option is enabled, the client will accept any certificate and hostname.")
f.DurationVar(&cfg.TLSHandshakeTimeout, prefix+"tls-handshake-timeout", 10*time.Second, "Maximum time to wait for a TLS handshake. 0 means no limit.")
f.DurationVar(&cfg.ExpectContinueTimeout, prefix+"expect-continue-timeout", 1*time.Second, "The time to wait for a server's first response headers after fully writing the request headers if the request has an Expect header. 0 to send the request body immediately.")
f.IntVar(&cfg.MaxIdleConns, prefix+"max-idle-connections", 100, "Maximum number of idle (keep-alive) connections across all hosts. 0 means no limit.")
f.IntVar(&cfg.MaxIdleConnsPerHost, prefix+"max-idle-connections-per-host", 100, "Maximum number of idle (keep-alive) connections to keep per-host. If 0, a built-in default value is used.")
f.IntVar(&cfg.MaxConnsPerHost, prefix+"max-connections-per-host", 0, "Maximum number of connections per host. 0 means no limit.")
+ f.StringVar(&cfg.CAFile, prefix+"ca-file", "", "Path to the trusted CA file that signed the SSL certificate of the object storage endpoint.")
}
diff --git a/pkg/storage/bucket/object_client_adapter.go b/pkg/storage/bucket/object_client_adapter.go
new file mode 100644
index 0000000000000..094f0ad2ea7ac
--- /dev/null
+++ b/pkg/storage/bucket/object_client_adapter.go
@@ -0,0 +1,150 @@
+package bucket
+
+import (
+ "context"
+ "io"
+ "strings"
+
+ "github.com/go-kit/log"
+ "github.com/go-kit/log/level"
+ "github.com/pkg/errors"
+ "github.com/thanos-io/objstore"
+
+ "github.com/grafana/loki/v3/pkg/storage/chunk/client"
+)
+
+type ObjectClientAdapter struct {
+ bucket, hedgedBucket objstore.Bucket
+ logger log.Logger
+ isRetryableErr func(err error) bool
+}
+
+func NewObjectClientAdapter(bucket, hedgedBucket objstore.Bucket, logger log.Logger, opts ...ClientOptions) *ObjectClientAdapter {
+ if hedgedBucket == nil {
+ hedgedBucket = bucket
+ }
+
+ o := &ObjectClientAdapter{
+ bucket: bucket,
+ hedgedBucket: hedgedBucket,
+ logger: log.With(logger, "component", "bucket_to_object_client_adapter"),
+ // default to no retryable errors. Override with WithRetryableErrFunc
+ isRetryableErr: func(_ error) bool {
+ return false
+ },
+ }
+
+ for _, opt := range opts {
+ opt(o)
+ }
+
+ return o
+}
+
+type ClientOptions func(*ObjectClientAdapter)
+
+func WithRetryableErrFunc(f func(err error) bool) ClientOptions {
+ return func(o *ObjectClientAdapter) {
+ o.isRetryableErr = f
+ }
+}
+
+func (o *ObjectClientAdapter) Stop() {
+}
+
+// ObjectExists checks if a given objectKey exists in the bucket
+func (o *ObjectClientAdapter) ObjectExists(ctx context.Context, objectKey string) (bool, error) {
+ return o.bucket.Exists(ctx, objectKey)
+}
+
+// GetAttributes returns the attributes of the specified object key from the configured bucket.
+func (o *ObjectClientAdapter) GetAttributes(ctx context.Context, objectKey string) (client.ObjectAttributes, error) {
+ attr := client.ObjectAttributes{}
+ thanosAttr, err := o.hedgedBucket.Attributes(ctx, objectKey)
+ if err != nil {
+ return attr, err
+ }
+
+ attr.Size = thanosAttr.Size
+ return attr, nil
+}
+
+// PutObject puts the specified bytes into the configured bucket at the provided key
+func (o *ObjectClientAdapter) PutObject(ctx context.Context, objectKey string, object io.Reader) error {
+ return o.bucket.Upload(ctx, objectKey, object)
+}
+
+// GetObject returns a reader and the size for the specified object key from the configured bucket.
+// size is set to -1 if it cannot be succefully determined, it is up to the caller to check this value before using it.
+func (o *ObjectClientAdapter) GetObject(ctx context.Context, objectKey string) (io.ReadCloser, int64, error) {
+ reader, err := o.hedgedBucket.Get(ctx, objectKey)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ size, err := objstore.TryToGetSize(reader)
+ if err != nil {
+ size = -1
+ level.Warn(o.logger).Log("msg", "failed to get size of object", "err", err)
+ }
+
+ return reader, size, err
+}
+
+func (o *ObjectClientAdapter) GetObjectRange(ctx context.Context, objectKey string, offset, length int64) (io.ReadCloser, error) {
+ return o.hedgedBucket.GetRange(ctx, objectKey, offset, length)
+}
+
+// List objects with given prefix.
+func (o *ObjectClientAdapter) List(ctx context.Context, prefix, delimiter string) ([]client.StorageObject, []client.StorageCommonPrefix, error) {
+ var storageObjects []client.StorageObject
+ var commonPrefixes []client.StorageCommonPrefix
+ var iterParams []objstore.IterOption
+
+ // If delimiter is empty we want to list all files
+ if delimiter == "" {
+ iterParams = append(iterParams, objstore.WithRecursiveIter)
+ }
+
+ err := o.bucket.Iter(ctx, prefix, func(objectKey string) error {
+ // CommonPrefixes are keys that have the prefix and have the delimiter
+ // as a suffix
+ if delimiter != "" && strings.HasSuffix(objectKey, delimiter) {
+ commonPrefixes = append(commonPrefixes, client.StorageCommonPrefix(objectKey))
+ return nil
+ }
+
+ // TODO: remove this once thanos support IterWithAttributes
+ attr, err := o.bucket.Attributes(ctx, objectKey)
+ if err != nil {
+ return errors.Wrapf(err, "failed to get attributes for %s", objectKey)
+ }
+
+ storageObjects = append(storageObjects, client.StorageObject{
+ Key: objectKey,
+ ModifiedAt: attr.LastModified,
+ })
+
+ return nil
+ }, iterParams...)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return storageObjects, commonPrefixes, nil
+}
+
+// DeleteObject deletes the specified object key from the configured bucket.
+func (o *ObjectClientAdapter) DeleteObject(ctx context.Context, objectKey string) error {
+ return o.bucket.Delete(ctx, objectKey)
+}
+
+// IsObjectNotFoundErr returns true if error means that object is not found. Relevant to GetObject and DeleteObject operations.
+func (o *ObjectClientAdapter) IsObjectNotFoundErr(err error) bool {
+ return o.bucket.IsObjNotFoundErr(err)
+}
+
+// IsRetryableErr returns true if the request failed due to some retryable server-side scenario
+func (o *ObjectClientAdapter) IsRetryableErr(err error) bool {
+ return o.isRetryableErr(err)
+}
diff --git a/pkg/storage/bucket/object_client_adapter_test.go b/pkg/storage/bucket/object_client_adapter_test.go
new file mode 100644
index 0000000000000..1ce6de26856bf
--- /dev/null
+++ b/pkg/storage/bucket/object_client_adapter_test.go
@@ -0,0 +1,123 @@
+package bucket
+
+import (
+ "bytes"
+ "context"
+ "sort"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/grafana/loki/v3/pkg/storage/bucket/filesystem"
+ "github.com/grafana/loki/v3/pkg/storage/chunk/client"
+)
+
+func TestObjectClientAdapter_List(t *testing.T) {
+ tests := []struct {
+ name string
+ prefix string
+ delimiter string
+ storageObjKeys []string
+ storageCommonPref []client.StorageCommonPrefix
+ wantErr error
+ }{
+ {
+ "list_top_level_only",
+ "",
+ "/",
+ []string{"top-level-file-1", "top-level-file-2"},
+ []client.StorageCommonPrefix{"dir-1/", "dir-2/", "depply/"},
+ nil,
+ },
+ {
+ "list_all_dir_1",
+ "dir-1",
+ "",
+ []string{"dir-1/file-1", "dir-1/file-2"},
+ nil,
+ nil,
+ },
+ {
+ "list_recursive",
+ "",
+ "",
+ []string{
+ "top-level-file-1",
+ "top-level-file-2",
+ "dir-1/file-1",
+ "dir-1/file-2",
+ "dir-2/file-3",
+ "dir-2/file-4",
+ "dir-2/file-5",
+ "depply/nested/folder/a",
+ "depply/nested/folder/b",
+ "depply/nested/folder/c",
+ },
+ nil,
+ nil,
+ },
+ {
+ "unknown_prefix",
+ "test",
+ "",
+ []string{},
+ nil,
+ nil,
+ },
+ {
+ "only_storage_common_prefix",
+ "depply/",
+ "/",
+ []string{},
+ []client.StorageCommonPrefix{
+ "depply/nested/",
+ },
+ nil,
+ },
+ }
+
+ for _, tt := range tests {
+ config := filesystem.Config{
+ Directory: t.TempDir(),
+ }
+ newBucket, err := filesystem.NewBucketClient(config)
+ require.NoError(t, err)
+
+ buff := bytes.NewBufferString("foo")
+ require.NoError(t, newBucket.Upload(context.Background(), "top-level-file-1", buff))
+ require.NoError(t, newBucket.Upload(context.Background(), "top-level-file-2", buff))
+ require.NoError(t, newBucket.Upload(context.Background(), "dir-1/file-1", buff))
+ require.NoError(t, newBucket.Upload(context.Background(), "dir-1/file-2", buff))
+ require.NoError(t, newBucket.Upload(context.Background(), "dir-2/file-3", buff))
+ require.NoError(t, newBucket.Upload(context.Background(), "dir-2/file-4", buff))
+ require.NoError(t, newBucket.Upload(context.Background(), "dir-2/file-5", buff))
+ require.NoError(t, newBucket.Upload(context.Background(), "depply/nested/folder/a", buff))
+ require.NoError(t, newBucket.Upload(context.Background(), "depply/nested/folder/b", buff))
+ require.NoError(t, newBucket.Upload(context.Background(), "depply/nested/folder/c", buff))
+
+ client := NewObjectClientAdapter(newBucket, nil, nil)
+ client.bucket = newBucket
+
+ storageObj, storageCommonPref, err := client.List(context.Background(), tt.prefix, tt.delimiter)
+ if tt.wantErr != nil {
+ require.Equal(t, tt.wantErr.Error(), err.Error())
+ continue
+ }
+
+ keys := []string{}
+ for _, key := range storageObj {
+ keys = append(keys, key.Key)
+ }
+
+ sort.Slice(tt.storageObjKeys, func(i, j int) bool {
+ return tt.storageObjKeys[i] < tt.storageObjKeys[j]
+ })
+ sort.Slice(tt.storageCommonPref, func(i, j int) bool {
+ return tt.storageCommonPref[i] < tt.storageCommonPref[j]
+ })
+
+ require.NoError(t, err)
+ require.Equal(t, tt.storageObjKeys, keys)
+ require.Equal(t, tt.storageCommonPref, storageCommonPref)
+ }
+}
diff --git a/pkg/storage/bucket/s3/bucket_client.go b/pkg/storage/bucket/s3/bucket_client.go
index c66322aef3c46..5d904d8e5fe9b 100644
--- a/pkg/storage/bucket/s3/bucket_client.go
+++ b/pkg/storage/bucket/s3/bucket_client.go
@@ -4,6 +4,7 @@ import (
"github.com/go-kit/log"
"github.com/prometheus/common/model"
"github.com/thanos-io/objstore"
+ "github.com/thanos-io/objstore/exthttp"
"github.com/thanos-io/objstore/providers/s3"
)
@@ -19,7 +20,7 @@ func NewBucketClient(cfg Config, name string, logger log.Logger) (objstore.Bucke
return nil, err
}
- return s3.NewBucketWithConfig(logger, s3Cfg, name)
+ return s3.NewBucketWithConfig(logger, s3Cfg, name, nil)
}
// NewBucketReaderClient creates a new S3 bucket client
@@ -29,7 +30,7 @@ func NewBucketReaderClient(cfg Config, name string, logger log.Logger) (objstore
return nil, err
}
- return s3.NewBucketWithConfig(logger, s3Cfg, name)
+ return s3.NewBucketWithConfig(logger, s3Cfg, name, nil)
}
func newS3Config(cfg Config) (s3.Config, error) {
@@ -38,17 +39,28 @@ func newS3Config(cfg Config) (s3.Config, error) {
return s3.Config{}, err
}
+ putUserMetadata := map[string]string{}
+
+ if cfg.StorageClass != "" {
+ putUserMetadata[awsStorageClassHeader] = cfg.StorageClass
+ }
+
return s3.Config{
- Bucket: cfg.BucketName,
- Endpoint: cfg.Endpoint,
- Region: cfg.Region,
- AccessKey: cfg.AccessKeyID,
- SecretKey: cfg.SecretAccessKey.String(),
- SessionToken: cfg.SessionToken.String(),
- Insecure: cfg.Insecure,
- DisableDualstack: cfg.DisableDualstack,
- SSEConfig: sseCfg,
- PutUserMetadata: map[string]string{awsStorageClassHeader: cfg.StorageClass},
+ Bucket: cfg.BucketName,
+ Endpoint: cfg.Endpoint,
+ Region: cfg.Region,
+ AccessKey: cfg.AccessKeyID,
+ SecretKey: cfg.SecretAccessKey.String(),
+ SessionToken: cfg.SessionToken.String(),
+ Insecure: cfg.Insecure,
+ PutUserMetadata: putUserMetadata,
+ SendContentMd5: cfg.SendContentMd5,
+ SSEConfig: sseCfg,
+ DisableDualstack: !cfg.DualstackEnabled,
+ ListObjectsVersion: cfg.ListObjectsVersion,
+ BucketLookupType: cfg.BucketLookupType,
+ AWSSDKAuth: cfg.NativeAWSAuthEnabled,
+ PartSize: cfg.PartSize,
HTTPConfig: s3.HTTPConfig{
IdleConnTimeout: model.Duration(cfg.HTTP.IdleConnTimeout),
ResponseHeaderTimeout: model.Duration(cfg.HTTP.ResponseHeaderTimeout),
@@ -59,6 +71,16 @@ func newS3Config(cfg Config) (s3.Config, error) {
MaxIdleConnsPerHost: cfg.HTTP.MaxIdleConnsPerHost,
MaxConnsPerHost: cfg.HTTP.MaxConnsPerHost,
Transport: cfg.HTTP.Transport,
+ TLSConfig: exthttp.TLSConfig{
+ CAFile: cfg.HTTP.TLSConfig.CAPath,
+ CertFile: cfg.HTTP.TLSConfig.CertPath,
+ KeyFile: cfg.HTTP.TLSConfig.KeyPath,
+ ServerName: cfg.HTTP.TLSConfig.ServerName,
+ },
+ },
+ TraceConfig: s3.TraceConfig{
+ Enable: cfg.TraceConfig.Enabled,
},
+ STSEndpoint: cfg.STSEndpoint,
}, nil
}
diff --git a/pkg/storage/bucket/s3/config.go b/pkg/storage/bucket/s3/config.go
index 32db169f450f6..792f93f752b32 100644
--- a/pkg/storage/bucket/s3/config.go
+++ b/pkg/storage/bucket/s3/config.go
@@ -5,23 +5,20 @@ import (
"flag"
"fmt"
"net/http"
+ "slices"
"strings"
+ "time"
+ s3_service "github.com/aws/aws-sdk-go/service/s3"
"github.com/grafana/dskit/flagext"
"github.com/minio/minio-go/v7/pkg/encrypt"
"github.com/pkg/errors"
"github.com/thanos-io/objstore/providers/s3"
- bucket_http "github.com/grafana/loki/v3/pkg/storage/bucket/http"
- "github.com/grafana/loki/v3/pkg/storage/common/aws"
"github.com/grafana/loki/v3/pkg/util"
)
const (
- // Signature Version 2 is being turned off (deprecated) in Amazon S3. Amazon S3 will then only accept API requests that are signed using Signature Version 4.
- // https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingAWSSDK.html#UsingAWSSDK-sig2-deprecation
- SignatureVersionV4 = "v4"
-
// SSEKMS config type constant to configure S3 server side encryption using KMS
// https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html
SSEKMS = "SSE-KMS"
@@ -32,41 +29,99 @@ const (
)
var (
- supportedSignatureVersions = []string{SignatureVersionV4}
- supportedSSETypes = []string{SSEKMS, SSES3}
- errUnsupportedSignatureVersion = errors.New("unsupported signature version")
- errUnsupportedSSEType = errors.New("unsupported S3 SSE type")
- errInvalidSSEContext = errors.New("invalid S3 SSE encryption context")
+ supportedSSETypes = []string{SSEKMS, SSES3}
+ supportedStorageClasses = s3_service.ObjectStorageClass_Values()
+ supportedBucketLookupTypes = thanosS3BucketLookupTypesValues()
+
+ errUnsupportedSSEType = errors.New("unsupported S3 SSE type")
+ errUnsupportedStorageClass = fmt.Errorf("unsupported S3 storage class (supported values: %s)", strings.Join(supportedStorageClasses, ", "))
+ errInvalidSSEContext = errors.New("invalid S3 SSE encryption context")
+ errInvalidEndpointPrefix = errors.New("the endpoint must not prefixed with the bucket name")
+ errInvalidSTSEndpoint = errors.New("sts-endpoint must be a valid url")
)
+var thanosS3BucketLookupTypes = map[string]s3.BucketLookupType{
+ s3.AutoLookup.String(): s3.AutoLookup,
+ s3.VirtualHostLookup.String(): s3.VirtualHostLookup,
+ s3.PathLookup.String(): s3.PathLookup,
+}
+
+func thanosS3BucketLookupTypesValues() (list []string) {
+ for k := range thanosS3BucketLookupTypes {
+ list = append(list, k)
+ }
+ // sort the list for consistent output in help, where it's used
+ slices.Sort(list)
+ return list
+}
+
// HTTPConfig stores the http.Transport configuration for the s3 minio client.
type HTTPConfig struct {
- bucket_http.Config `yaml:",inline"`
+ IdleConnTimeout time.Duration `yaml:"idle_conn_timeout" category:"advanced"`
+ ResponseHeaderTimeout time.Duration `yaml:"response_header_timeout" category:"advanced"`
+ InsecureSkipVerify bool `yaml:"insecure_skip_verify" category:"advanced"`
+ TLSHandshakeTimeout time.Duration `yaml:"tls_handshake_timeout" category:"advanced"`
+ ExpectContinueTimeout time.Duration `yaml:"expect_continue_timeout" category:"advanced"`
+ MaxIdleConns int `yaml:"max_idle_connections" category:"advanced"`
+ MaxIdleConnsPerHost int `yaml:"max_idle_connections_per_host" category:"advanced"`
+ MaxConnsPerHost int `yaml:"max_connections_per_host" category:"advanced"`
// Allow upstream callers to inject a round tripper
Transport http.RoundTripper `yaml:"-"`
+
+ TLSConfig TLSConfig `yaml:",inline"`
+}
+
+// TLSConfig configures the options for TLS connections.
+type TLSConfig struct {
+ CAPath string `yaml:"tls_ca_path" category:"advanced"`
+ CertPath string `yaml:"tls_cert_path" category:"advanced"`
+ KeyPath string `yaml:"tls_key_path" category:"advanced"`
+ ServerName string `yaml:"tls_server_name" category:"advanced"`
}
// RegisterFlagsWithPrefix registers the flags for s3 storage with the provided prefix
func (cfg *HTTPConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
- cfg.Config.RegisterFlagsWithPrefix(prefix+"s3.", f)
+ f.DurationVar(&cfg.IdleConnTimeout, prefix+"s3.http.idle-conn-timeout", 90*time.Second, "The time an idle connection will remain idle before closing.")
+ f.DurationVar(&cfg.ResponseHeaderTimeout, prefix+"s3.http.response-header-timeout", 2*time.Minute, "The amount of time the client will wait for a servers response headers.")
+ f.BoolVar(&cfg.InsecureSkipVerify, prefix+"s3.http.insecure-skip-verify", false, "If the client connects to S3 via HTTPS and this option is enabled, the client will accept any certificate and hostname.")
+ f.DurationVar(&cfg.TLSHandshakeTimeout, prefix+"s3.tls-handshake-timeout", 10*time.Second, "Maximum time to wait for a TLS handshake. 0 means no limit.")
+ f.DurationVar(&cfg.ExpectContinueTimeout, prefix+"s3.expect-continue-timeout", 1*time.Second, "The time to wait for a server's first response headers after fully writing the request headers if the request has an Expect header. 0 to send the request body immediately.")
+ f.IntVar(&cfg.MaxIdleConns, prefix+"s3.max-idle-connections", 100, "Maximum number of idle (keep-alive) connections across all hosts. 0 means no limit.")
+ f.IntVar(&cfg.MaxIdleConnsPerHost, prefix+"s3.max-idle-connections-per-host", 100, "Maximum number of idle (keep-alive) connections to keep per-host. If 0, a built-in default value is used.")
+ f.IntVar(&cfg.MaxConnsPerHost, prefix+"s3.max-connections-per-host", 0, "Maximum number of connections per host. 0 means no limit.")
+ cfg.TLSConfig.RegisterFlagsWithPrefix(prefix, f)
+}
+
+// RegisterFlagsWithPrefix registers the flags for s3 storage with the provided prefix.
+func (cfg *TLSConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
+ f.StringVar(&cfg.CAPath, prefix+"s3.http.tls-ca-path", "", "Path to the CA certificates to validate server certificate against. If not set, the host's root CA certificates are used.")
+ f.StringVar(&cfg.CertPath, prefix+"s3.http.tls-cert-path", "", "Path to the client certificate, which will be used for authenticating with the server. Also requires the key path to be configured.")
+ f.StringVar(&cfg.KeyPath, prefix+"s3.http.tls-key-path", "", "Path to the key for the client certificate. Also requires the client certificate to be configured.")
+ f.StringVar(&cfg.ServerName, prefix+"s3.http.tls-server-name", "", "Override the expected name on the server certificate.")
}
// Config holds the config options for an S3 backend
type Config struct {
- Endpoint string `yaml:"endpoint"`
- Region string `yaml:"region"`
- BucketName string `yaml:"bucket_name"`
- SecretAccessKey flagext.Secret `yaml:"secret_access_key"`
- SessionToken flagext.Secret `yaml:"session_token"`
- AccessKeyID string `yaml:"access_key_id"`
- Insecure bool `yaml:"insecure"`
- DisableDualstack bool `yaml:"disable_dualstack"`
- SignatureVersion string `yaml:"signature_version"`
- StorageClass string `yaml:"storage_class"`
+ Endpoint string `yaml:"endpoint"`
+ Region string `yaml:"region"`
+ BucketName string `yaml:"bucket_name"`
+ SecretAccessKey flagext.Secret `yaml:"secret_access_key"`
+ AccessKeyID string `yaml:"access_key_id"`
+ SessionToken flagext.Secret `yaml:"session_token"`
+ Insecure bool `yaml:"insecure" category:"advanced"`
+ ListObjectsVersion string `yaml:"list_objects_version" category:"advanced"`
+ BucketLookupType s3.BucketLookupType `yaml:"bucket_lookup_type" category:"advanced"`
+ DualstackEnabled bool `yaml:"dualstack_enabled" category:"experimental"`
+ StorageClass string `yaml:"storage_class" category:"experimental"`
+ NativeAWSAuthEnabled bool `yaml:"native_aws_auth_enabled" category:"experimental"`
+ PartSize uint64 `yaml:"part_size" category:"experimental"`
+ SendContentMd5 bool `yaml:"send_content_md5" category:"experimental"`
+ STSEndpoint string `yaml:"sts_endpoint"`
- SSE SSEConfig `yaml:"sse"`
- HTTP HTTPConfig `yaml:"http"`
+ SSE SSEConfig `yaml:"sse"`
+ HTTP HTTPConfig `yaml:"http"`
+ TraceConfig TraceConfig `yaml:"trace"`
}
// RegisterFlags registers the flags for s3 storage with the provided prefix
@@ -83,21 +138,32 @@ func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
f.StringVar(&cfg.Region, prefix+"s3.region", "", "S3 region. If unset, the client will issue a S3 GetBucketLocation API call to autodetect it.")
f.StringVar(&cfg.Endpoint, prefix+"s3.endpoint", "", "The S3 bucket endpoint. It could be an AWS S3 endpoint listed at https://docs.aws.amazon.com/general/latest/gr/s3.html or the address of an S3-compatible service in hostname:port format.")
f.BoolVar(&cfg.Insecure, prefix+"s3.insecure", false, "If enabled, use http:// for the S3 endpoint instead of https://. This could be useful in local dev/test environments while using an S3-compatible backend storage, like Minio.")
- f.BoolVar(&cfg.DisableDualstack, prefix+"s3.disable-dualstack", false, "Disable forcing S3 dualstack endpoint usage.")
- f.StringVar(&cfg.SignatureVersion, prefix+"s3.signature-version", SignatureVersionV4, fmt.Sprintf("The signature version to use for authenticating against S3. Supported values are: %s.", strings.Join(supportedSignatureVersions, ", ")))
- f.StringVar(&cfg.StorageClass, prefix+"s3.storage-class", aws.StorageClassStandard, "The S3 storage class to use. Details can be found at https://aws.amazon.com/s3/storage-classes/.")
+ f.StringVar(&cfg.ListObjectsVersion, prefix+"s3.list-objects-version", "", "Use a specific version of the S3 list object API. Supported values are v1 or v2. Default is unset.")
+ f.StringVar(&cfg.StorageClass, prefix+"s3.storage-class", "", "The S3 storage class to use, not set by default. Details can be found at https://aws.amazon.com/s3/storage-classes/. Supported values are: "+strings.Join(supportedStorageClasses, ", "))
+ f.BoolVar(&cfg.NativeAWSAuthEnabled, prefix+"s3.native-aws-auth-enabled", false, "If enabled, it will use the default authentication methods of the AWS SDK for go based on known environment variables and known AWS config files.")
+ f.Uint64Var(&cfg.PartSize, prefix+"s3.part-size", 0, "The minimum file size in bytes used for multipart uploads. If 0, the value is optimally computed for each object.")
+ f.BoolVar(&cfg.SendContentMd5, prefix+"s3.send-content-md5", false, "If enabled, a Content-MD5 header is sent with S3 Put Object requests. Consumes more resources to compute the MD5, but may improve compatibility with object storage services that do not support checksums.")
+ f.Var(newBucketLookupTypeValue(s3.AutoLookup, &cfg.BucketLookupType), prefix+"s3.bucket-lookup-type", fmt.Sprintf("Bucket lookup style type, used to access bucket in S3-compatible service. Default is auto. Supported values are: %s.", strings.Join(supportedBucketLookupTypes, ", ")))
+ f.BoolVar(&cfg.DualstackEnabled, prefix+"s3.dualstack-enabled", true, "When enabled, direct all AWS S3 requests to the dual-stack IPv4/IPv6 endpoint for the configured region.")
+ f.StringVar(&cfg.STSEndpoint, prefix+"s3.sts-endpoint", "", "Accessing S3 resources using temporary, secure credentials provided by AWS Security Token Service.")
cfg.SSE.RegisterFlagsWithPrefix(prefix+"s3.sse.", f)
cfg.HTTP.RegisterFlagsWithPrefix(prefix, f)
+ cfg.TraceConfig.RegisterFlagsWithPrefix(prefix+"s3.trace.", f)
}
// Validate config and returns error on failure
func (cfg *Config) Validate() error {
- if !util.StringsContain(supportedSignatureVersions, cfg.SignatureVersion) {
- return errUnsupportedSignatureVersion
+ if cfg.Endpoint != "" {
+ endpoint := strings.Split(cfg.Endpoint, ".")
+ if cfg.BucketName != "" && endpoint[0] != "" && endpoint[0] == cfg.BucketName {
+ return errInvalidEndpointPrefix
+ }
}
-
- if err := aws.ValidateStorageClass(cfg.StorageClass); err != nil {
- return err
+ if cfg.STSEndpoint != "" && !util.IsValidURL(cfg.STSEndpoint) {
+ return errInvalidSTSEndpoint
+ }
+ if !slices.Contains(supportedStorageClasses, cfg.StorageClass) && cfg.StorageClass != "" {
+ return errUnsupportedStorageClass
}
return cfg.SSE.Validate()
@@ -191,3 +257,35 @@ func parseKMSEncryptionContext(data string) (map[string]string, error) {
err := errors.Wrap(json.Unmarshal([]byte(data), &decoded), "unable to parse KMS encryption context")
return decoded, err
}
+
+type TraceConfig struct {
+ Enabled bool `yaml:"enabled" category:"advanced"`
+}
+
+func (cfg *TraceConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
+ f.BoolVar(&cfg.Enabled, prefix+"enabled", false, "When enabled, low-level S3 HTTP operation information is logged at the debug level.")
+}
+
+// bucketLookupTypeValue is an adapter between s3.BucketLookupType and flag.Value.
+type bucketLookupTypeValue s3.BucketLookupType
+
+func newBucketLookupTypeValue(value s3.BucketLookupType, p *s3.BucketLookupType) *bucketLookupTypeValue {
+ *p = value
+ return (*bucketLookupTypeValue)(p)
+}
+
+func (v *bucketLookupTypeValue) String() string {
+ if v == nil {
+ return s3.AutoLookup.String()
+ }
+ return s3.BucketLookupType(*v).String()
+}
+
+func (v *bucketLookupTypeValue) Set(s string) error {
+ t, ok := thanosS3BucketLookupTypes[s]
+ if !ok {
+ return fmt.Errorf("unsupported bucket lookup type: %s", s)
+ }
+ *v = bucketLookupTypeValue(t)
+ return nil
+}
diff --git a/pkg/storage/bucket/s3/config_test.go b/pkg/storage/bucket/s3/config_test.go
index 3f32e8f847936..078353b68bd86 100644
--- a/pkg/storage/bucket/s3/config_test.go
+++ b/pkg/storage/bucket/s3/config_test.go
@@ -1,127 +1,23 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// Provenance-includes-location: https://github.com/cortexproject/cortex/blob/master/pkg/storage/bucket/s3/config_test.go
+// Provenance-includes-license: Apache-2.0
+// Provenance-includes-copyright: The Cortex Authors.
+
package s3
import (
+ "bytes"
"encoding/base64"
- "fmt"
"net/http"
- "strings"
"testing"
- "time"
+ s3_service "github.com/aws/aws-sdk-go/service/s3"
"github.com/grafana/dskit/flagext"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- "gopkg.in/yaml.v2"
-
- bucket_http "github.com/grafana/loki/v3/pkg/storage/bucket/http"
- "github.com/grafana/loki/v3/pkg/storage/common/aws"
+ "gopkg.in/yaml.v3"
)
-// defaultConfig should match the default flag values defined in RegisterFlagsWithPrefix.
-var defaultConfig = Config{
- SignatureVersion: SignatureVersionV4,
- StorageClass: aws.StorageClassStandard,
- HTTP: HTTPConfig{
- Config: bucket_http.Config{
- IdleConnTimeout: 90 * time.Second,
- ResponseHeaderTimeout: 2 * time.Minute,
- InsecureSkipVerify: false,
- TLSHandshakeTimeout: 10 * time.Second,
- ExpectContinueTimeout: 1 * time.Second,
- MaxIdleConns: 100,
- MaxIdleConnsPerHost: 100,
- MaxConnsPerHost: 0,
- },
- },
-}
-
-func TestConfig(t *testing.T) {
- t.Parallel()
-
- tests := map[string]struct {
- config string
- expectedConfig Config
- expectedErr error
- }{
- "default config": {
- config: "",
- expectedConfig: defaultConfig,
- expectedErr: nil,
- },
- "custom config": {
- config: `
-endpoint: test-endpoint
-region: test-region
-bucket_name: test-bucket-name
-secret_access_key: test-secret-access-key
-access_key_id: test-access-key-id
-insecure: true
-signature_version: test-signature-version
-storage_class: test-storage-class
-disable_dualstack: true
-sse:
- type: test-type
- kms_key_id: test-kms-key-id
- kms_encryption_context: test-kms-encryption-context
-http:
- idle_conn_timeout: 2s
- response_header_timeout: 3s
- insecure_skip_verify: true
- tls_handshake_timeout: 4s
- expect_continue_timeout: 5s
- max_idle_connections: 6
- max_idle_connections_per_host: 7
- max_connections_per_host: 8
-`,
- expectedConfig: Config{
- Endpoint: "test-endpoint",
- Region: "test-region",
- BucketName: "test-bucket-name",
- SecretAccessKey: flagext.SecretWithValue("test-secret-access-key"),
- AccessKeyID: "test-access-key-id",
- Insecure: true,
- SignatureVersion: "test-signature-version",
- StorageClass: "test-storage-class",
- DisableDualstack: true,
- SSE: SSEConfig{
- Type: "test-type",
- KMSKeyID: "test-kms-key-id",
- KMSEncryptionContext: "test-kms-encryption-context",
- },
- HTTP: HTTPConfig{
- Config: bucket_http.Config{
- IdleConnTimeout: 2 * time.Second,
- ResponseHeaderTimeout: 3 * time.Second,
- InsecureSkipVerify: true,
- TLSHandshakeTimeout: 4 * time.Second,
- ExpectContinueTimeout: 5 * time.Second,
- MaxIdleConns: 6,
- MaxIdleConnsPerHost: 7,
- MaxConnsPerHost: 8,
- },
- },
- },
- expectedErr: nil,
- },
- "invalid type": {
- config: `insecure: foo`,
- expectedConfig: defaultConfig,
- expectedErr: &yaml.TypeError{Errors: []string{"line 1: cannot unmarshal !!str `foo` into bool"}},
- },
- }
-
- for testName, testData := range tests {
- t.Run(testName, func(t *testing.T) {
- cfg := Config{}
- flagext.DefaultValues(&cfg)
-
- err := yaml.Unmarshal([]byte(testData.config), &cfg)
- require.Equal(t, testData.expectedErr, err)
- require.Equal(t, testData.expectedConfig, cfg)
- })
- }
-}
-
func TestSSEConfig_Validate(t *testing.T) {
tests := map[string]struct {
setup func() *SSEConfig
@@ -169,6 +65,85 @@ func TestSSEConfig_Validate(t *testing.T) {
}
}
+func TestConfig_Validate(t *testing.T) {
+ tests := map[string]struct {
+ setup func() *Config
+ expected error
+ }{
+ "should pass with default config": {
+ setup: func() *Config {
+ sseCfg := &SSEConfig{}
+ flagext.DefaultValues(sseCfg)
+ cfg := &Config{
+ Endpoint: "s3.eu-central-1.amazonaws.com",
+ BucketName: "mimir-block",
+ SSE: *sseCfg,
+ StorageClass: s3_service.StorageClassStandard,
+ }
+ return cfg
+ },
+ },
+ "should fail if invalid storage class is set": {
+ setup: func() *Config {
+ return &Config{
+ StorageClass: "foo",
+ }
+ },
+ expected: errUnsupportedStorageClass,
+ },
+ "should fail on invalid endpoint prefix": {
+ setup: func() *Config {
+ return &Config{
+ Endpoint: "mimir-blocks.s3.eu-central-1.amazonaws.com",
+ BucketName: "mimir-blocks",
+ StorageClass: s3_service.StorageClassStandard,
+ }
+ },
+ expected: errInvalidEndpointPrefix,
+ },
+ "should pass if native_aws_auth_enabled is set": {
+ setup: func() *Config {
+ return &Config{
+ NativeAWSAuthEnabled: true,
+ }
+ },
+ },
+ "should pass with using sts endpoint": {
+ setup: func() *Config {
+ sseCfg := &SSEConfig{}
+ flagext.DefaultValues(sseCfg)
+ cfg := &Config{
+ BucketName: "mimir-block",
+ SSE: *sseCfg,
+ StorageClass: s3_service.StorageClassStandard,
+ STSEndpoint: "https://sts.eu-central-1.amazonaws.com",
+ }
+ return cfg
+ },
+ },
+ "should not pass with using sts endpoint as its using an invalid url": {
+ setup: func() *Config {
+ sseCfg := &SSEConfig{}
+ flagext.DefaultValues(sseCfg)
+ cfg := &Config{
+ BucketName: "mimir-block",
+ SSE: *sseCfg,
+ StorageClass: s3_service.StorageClassStandard,
+ STSEndpoint: "sts.eu-central-1.amazonaws.com",
+ }
+ return cfg
+ },
+ expected: errInvalidSTSEndpoint,
+ },
+ }
+
+ for testName, testData := range tests {
+ t.Run(testName, func(t *testing.T) {
+ assert.Equal(t, testData.expected, testData.setup().Validate())
+ })
+ }
+}
+
func TestSSEConfig_BuildMinioConfig(t *testing.T) {
tests := map[string]struct {
cfg *SSEConfig
@@ -225,31 +200,32 @@ func TestParseKMSEncryptionContext(t *testing.T) {
assert.Equal(t, expected, actual)
}
-func TestConfig_Validate(t *testing.T) {
- tests := map[string]struct {
- cfg Config
- expectedErr error
- }{
- "should fail if invalid signature version is set": {
- Config{SignatureVersion: "foo"},
- errUnsupportedSignatureVersion,
- },
- "should pass if valid signature version is set": {
- defaultConfig,
- nil,
- },
- "should fail if invalid storage class is set": {
- Config{SignatureVersion: SignatureVersionV4, StorageClass: "foo"},
- fmt.Errorf("unsupported S3 storage class: foo. Supported values: %s", strings.Join(aws.SupportedStorageClasses, ", ")),
- },
- "should pass if valid storage signature version is set": {
- Config{SignatureVersion: SignatureVersionV4, StorageClass: aws.StorageClassStandardInfrequentAccess},
- nil,
- },
- }
+func TestConfigParsesCredentialsInlineWithSessionToken(t *testing.T) {
+ var cfg = Config{}
+ yamlCfg := `
+access_key_id: access key id
+secret_access_key: secret access key
+session_token: session token
+`
+ err := yaml.Unmarshal([]byte(yamlCfg), &cfg)
+ require.NoError(t, err)
+
+ require.Equal(t, cfg.AccessKeyID, "access key id")
+ require.Equal(t, cfg.SecretAccessKey.String(), "secret access key")
+ require.Equal(t, cfg.SessionToken.String(), "session token")
+}
- for name, test := range tests {
- actual := test.cfg.Validate()
- assert.Equal(t, test.expectedErr, actual, name)
+func TestConfigRedactsCredentials(t *testing.T) {
+ cfg := Config{
+ AccessKeyID: "access key id",
+ SecretAccessKey: flagext.SecretWithValue("secret access key"),
+ SessionToken: flagext.SecretWithValue("session token"),
}
+
+ output, err := yaml.Marshal(cfg)
+ require.NoError(t, err)
+
+ require.True(t, bytes.Contains(output, []byte("access key id")))
+ require.False(t, bytes.Contains(output, []byte("secret access id")))
+ require.False(t, bytes.Contains(output, []byte("session token")))
}
diff --git a/pkg/storage/bucket/sse_bucket_client.go b/pkg/storage/bucket/sse_bucket_client.go
index 426522cfcfd1f..04c3d71a68e10 100644
--- a/pkg/storage/bucket/sse_bucket_client.go
+++ b/pkg/storage/bucket/sse_bucket_client.go
@@ -12,8 +12,8 @@ import (
"github.com/grafana/loki/v3/pkg/storage/bucket/s3"
)
-// TenantConfigProvider defines a per-tenant config provider.
-type TenantConfigProvider interface {
+// SSEConfigProvider defines a per-tenant SSE config provider.
+type SSEConfigProvider interface {
// S3SSEType returns the per-tenant S3 SSE type.
S3SSEType(userID string) string
@@ -29,11 +29,11 @@ type TenantConfigProvider interface {
type SSEBucketClient struct {
userID string
bucket objstore.Bucket
- cfgProvider TenantConfigProvider
+ cfgProvider SSEConfigProvider
}
// NewSSEBucketClient makes a new SSEBucketClient. The cfgProvider can be nil.
-func NewSSEBucketClient(userID string, bucket objstore.Bucket, cfgProvider TenantConfigProvider) *SSEBucketClient {
+func NewSSEBucketClient(userID string, bucket objstore.Bucket, cfgProvider SSEConfigProvider) *SSEBucketClient {
return &SSEBucketClient{
userID: userID,
bucket: bucket,
diff --git a/pkg/storage/bucket/swift/bucket_client.go b/pkg/storage/bucket/swift/bucket_client.go
index 4508f1ca7326d..b36c07e506b87 100644
--- a/pkg/storage/bucket/swift/bucket_client.go
+++ b/pkg/storage/bucket/swift/bucket_client.go
@@ -42,5 +42,5 @@ func NewBucketClient(cfg Config, _ string, logger log.Logger) (objstore.Bucket,
return nil, err
}
- return swift.NewContainer(logger, serialized)
+ return swift.NewContainer(logger, serialized, nil)
}
diff --git a/pkg/storage/bucket/user_bucket_client.go b/pkg/storage/bucket/user_bucket_client.go
index 14926a837b6f9..47fa996195585 100644
--- a/pkg/storage/bucket/user_bucket_client.go
+++ b/pkg/storage/bucket/user_bucket_client.go
@@ -6,7 +6,7 @@ import (
// NewUserBucketClient returns a bucket client to use to access the storage on behalf of the provided user.
// The cfgProvider can be nil.
-func NewUserBucketClient(userID string, bucket objstore.Bucket, cfgProvider TenantConfigProvider) objstore.InstrumentedBucket {
+func NewUserBucketClient(userID string, bucket objstore.Bucket, cfgProvider SSEConfigProvider) objstore.InstrumentedBucket {
// Inject the user/tenant prefix.
bucket = NewPrefixedBucketClient(bucket, userID)
diff --git a/pkg/storage/chunk/cache/memcached_client.go b/pkg/storage/chunk/cache/memcached_client.go
index 995e896fbcfee..ffdc817b68b42 100644
--- a/pkg/storage/chunk/cache/memcached_client.go
+++ b/pkg/storage/chunk/cache/memcached_client.go
@@ -114,7 +114,7 @@ func (cfg *MemcachedClientConfig) RegisterFlagsWithPrefix(prefix, description st
func NewMemcachedClient(cfg MemcachedClientConfig, name string, r prometheus.Registerer, logger log.Logger, metricsNamespace string) MemcachedClient {
var selector serverSelector
if cfg.ConsistentHash {
- selector = jumphash.DefaultSelector()
+ selector = jumphash.DefaultSelector("memcached")
} else {
selector = &memcache.ServerList{}
}
diff --git a/pkg/storage/chunk/cache/resultscache/cache.go b/pkg/storage/chunk/cache/resultscache/cache.go
index 0dfc4d49aae0a..974036f940e0c 100644
--- a/pkg/storage/chunk/cache/resultscache/cache.go
+++ b/pkg/storage/chunk/cache/resultscache/cache.go
@@ -289,10 +289,10 @@ func merge(extents []Extent, acc *accumulator) ([]Extent, error) {
return nil, err
}
return append(extents, Extent{
- Start: acc.Extent.Start,
- End: acc.Extent.End,
+ Start: acc.Start,
+ End: acc.End,
Response: anyResp,
- TraceId: acc.Extent.TraceId,
+ TraceId: acc.TraceId,
}), nil
}
@@ -386,7 +386,7 @@ func (s ResultsCache) partition(req Request, extents []Extent) ([]Request, []Res
// If start and end are the same (valid in promql), start == req.GetEnd() and we won't do the query.
// But we should only do the request if we don't have a valid cached response for it.
- if req.GetStart() == req.GetEnd() && len(cachedResponses) == 0 {
+ if req.GetStart().Equal(req.GetEnd()) && len(cachedResponses) == 0 {
requests = append(requests, req)
}
diff --git a/pkg/storage/chunk/client/aws/s3_storage_client.go b/pkg/storage/chunk/client/aws/s3_storage_client.go
index 9ab8c9116339f..65817f38c9d9f 100644
--- a/pkg/storage/chunk/client/aws/s3_storage_client.go
+++ b/pkg/storage/chunk/client/aws/s3_storage_client.go
@@ -563,7 +563,7 @@ func isContextErr(err error) bool {
}
// IsStorageTimeoutErr returns true if error means that object cannot be retrieved right now due to server-side timeouts.
-func (a *S3ObjectClient) IsStorageTimeoutErr(err error) bool {
+func IsStorageTimeoutErr(err error) bool {
// TODO(dannyk): move these out to be generic
// context errors are all client-side
if isContextErr(err) {
@@ -599,7 +599,7 @@ func (a *S3ObjectClient) IsStorageTimeoutErr(err error) bool {
}
// IsStorageThrottledErr returns true if error means that object cannot be retrieved right now due to throttling.
-func (a *S3ObjectClient) IsStorageThrottledErr(err error) bool {
+func IsStorageThrottledErr(err error) bool {
if rerr, ok := err.(awserr.RequestFailure); ok {
// https://docs.aws.amazon.com/sdkref/latest/guide/feature-retry-behavior.html
@@ -609,6 +609,11 @@ func (a *S3ObjectClient) IsStorageThrottledErr(err error) bool {
return false
}
+
+func IsRetryableErr(err error) bool {
+ return IsStorageTimeoutErr(err) || IsStorageThrottledErr(err)
+}
+
func (a *S3ObjectClient) IsRetryableErr(err error) bool {
- return a.IsStorageTimeoutErr(err) || a.IsStorageThrottledErr(err)
+ return IsRetryableErr(err)
}
diff --git a/pkg/storage/chunk/client/aws/s3_thanos_object_client.go b/pkg/storage/chunk/client/aws/s3_thanos_object_client.go
new file mode 100644
index 0000000000000..e00ded920d552
--- /dev/null
+++ b/pkg/storage/chunk/client/aws/s3_thanos_object_client.go
@@ -0,0 +1,44 @@
+package aws
+
+import (
+ "context"
+
+ "github.com/go-kit/log"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/thanos-io/objstore"
+
+ "github.com/grafana/loki/v3/pkg/storage/bucket"
+ "github.com/grafana/loki/v3/pkg/storage/chunk/client"
+ "github.com/grafana/loki/v3/pkg/storage/chunk/client/hedging"
+)
+
+func NewS3ThanosObjectClient(ctx context.Context, cfg bucket.Config, component string, logger log.Logger, hedgingCfg hedging.Config) (client.ObjectClient, error) {
+ b, err := newS3ThanosObjectClient(ctx, cfg, component, logger, false, hedgingCfg)
+ if err != nil {
+ return nil, err
+ }
+
+ var hedged objstore.Bucket
+ if hedgingCfg.At != 0 {
+ hedged, err = newS3ThanosObjectClient(ctx, cfg, component, logger, true, hedgingCfg)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ o := bucket.NewObjectClientAdapter(b, hedged, logger, bucket.WithRetryableErrFunc(IsRetryableErr))
+ return o, nil
+}
+
+func newS3ThanosObjectClient(ctx context.Context, cfg bucket.Config, component string, logger log.Logger, hedging bool, hedgingCfg hedging.Config) (objstore.Bucket, error) {
+ if hedging {
+ hedgedTrasport, err := hedgingCfg.RoundTripperWithRegisterer(nil, prometheus.WrapRegistererWithPrefix("loki_", prometheus.DefaultRegisterer))
+ if err != nil {
+ return nil, err
+ }
+
+ cfg.S3.HTTP.Transport = hedgedTrasport
+ }
+
+ return bucket.NewClient(ctx, bucket.S3, cfg, component, logger)
+}
diff --git a/pkg/storage/chunk/client/azure/blob_storage_thanos_object_client.go b/pkg/storage/chunk/client/azure/blob_storage_thanos_object_client.go
new file mode 100644
index 0000000000000..4bf2137433064
--- /dev/null
+++ b/pkg/storage/chunk/client/azure/blob_storage_thanos_object_client.go
@@ -0,0 +1,44 @@
+package azure
+
+import (
+ "context"
+
+ "github.com/go-kit/log"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/thanos-io/objstore"
+
+ "github.com/grafana/loki/v3/pkg/storage/bucket"
+ "github.com/grafana/loki/v3/pkg/storage/chunk/client"
+ "github.com/grafana/loki/v3/pkg/storage/chunk/client/hedging"
+)
+
+// NewBlobStorageObjectClient makes a new BlobStorage-backed ObjectClient.
+func NewBlobStorageThanosObjectClient(ctx context.Context, cfg bucket.Config, component string, logger log.Logger, hedgingCfg hedging.Config) (client.ObjectClient, error) {
+ b, err := newBlobStorageThanosObjClient(ctx, cfg, component, logger, false, hedgingCfg)
+ if err != nil {
+ return nil, err
+ }
+
+ var hedged objstore.Bucket
+ if hedgingCfg.At != 0 {
+ hedged, err = newBlobStorageThanosObjClient(ctx, cfg, component, logger, true, hedgingCfg)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return bucket.NewObjectClientAdapter(b, hedged, logger), nil
+}
+
+func newBlobStorageThanosObjClient(ctx context.Context, cfg bucket.Config, component string, logger log.Logger, hedging bool, hedgingCfg hedging.Config) (objstore.Bucket, error) {
+ if hedging {
+ hedgedTrasport, err := hedgingCfg.RoundTripperWithRegisterer(nil, prometheus.WrapRegistererWithPrefix("loki_", prometheus.DefaultRegisterer))
+ if err != nil {
+ return nil, err
+ }
+
+ cfg.Azure.Transport = hedgedTrasport
+ }
+
+ return bucket.NewClient(ctx, bucket.Azure, cfg, component, logger)
+}
diff --git a/pkg/storage/chunk/client/gcp/fixtures.go b/pkg/storage/chunk/client/gcp/fixtures.go
index 3fc03fb6e0158..a9ab2d0afb127 100644
--- a/pkg/storage/chunk/client/gcp/fixtures.go
+++ b/pkg/storage/chunk/client/gcp/fixtures.go
@@ -49,19 +49,11 @@ func (f *fixture) Clients() (
}
f.gcssrv = fakestorage.NewServer(nil)
- /*
- // Note: fake-gcs-server upgrade does not work in the `dist` tooling builds.
- // Leave at v1.7.0 until the issue is resolved.
- // Example failure: https://github.com/grafana/loki/actions/runs/10744853958/job/29802951861
- // Open issue: https://github.com/fsouza/fake-gcs-server/issues/1739
- // Once the issue is resolved, this code block can be used to replace the
- // `CreateBucket` call below.
- opts := fakestorage.CreateBucketOpts{
- Name: "chunks",
- }
- f.gcssrv.CreateBucketWithOpts(opts)
- */
- f.gcssrv.CreateBucket("chunks")
+
+ opts := fakestorage.CreateBucketOpts{
+ Name: "chunks",
+ }
+ f.gcssrv.CreateBucketWithOpts(opts)
conn, err := grpc.NewClient(f.btsrv.Addr, grpc.WithTransportCredentials(insecure.NewCredentials()))
if err != nil {
diff --git a/pkg/storage/chunk/client/gcp/gcs_object_client.go b/pkg/storage/chunk/client/gcp/gcs_object_client.go
index 9b05b57404c49..1d44659b3f3cc 100644
--- a/pkg/storage/chunk/client/gcp/gcs_object_client.go
+++ b/pkg/storage/chunk/client/gcp/gcs_object_client.go
@@ -279,7 +279,7 @@ func isContextErr(err error) bool {
}
// IsStorageTimeoutErr returns true if error means that object cannot be retrieved right now due to server-side timeouts.
-func (s *GCSObjectClient) IsStorageTimeoutErr(err error) bool {
+func IsStorageTimeoutErr(err error) bool {
// TODO(dannyk): move these out to be generic
// context errors are all client-side
if isContextErr(err) {
@@ -315,7 +315,7 @@ func (s *GCSObjectClient) IsStorageTimeoutErr(err error) bool {
}
// IsStorageThrottledErr returns true if error means that object cannot be retrieved right now due to throttling.
-func (s *GCSObjectClient) IsStorageThrottledErr(err error) bool {
+func IsStorageThrottledErr(err error) bool {
if gerr, ok := err.(*googleapi.Error); ok {
// https://cloud.google.com/storage/docs/retry-strategy
return gerr.Code == http.StatusTooManyRequests ||
@@ -325,9 +325,14 @@ func (s *GCSObjectClient) IsStorageThrottledErr(err error) bool {
return false
}
+// IsRetryableErr returns true if the request failed due to some retryable server-side scenario
+func IsRetryableErr(err error) bool {
+ return IsStorageTimeoutErr(err) || IsStorageThrottledErr(err)
+}
+
// IsRetryableErr returns true if the request failed due to some retryable server-side scenario
func (s *GCSObjectClient) IsRetryableErr(err error) bool {
- return s.IsStorageTimeoutErr(err) || s.IsStorageThrottledErr(err)
+ return IsRetryableErr(err)
}
func gcsTransport(ctx context.Context, scope string, insecure bool, http2 bool, serviceAccount flagext.Secret) (http.RoundTripper, error) {
diff --git a/pkg/storage/chunk/client/gcp/gcs_object_client_test.go b/pkg/storage/chunk/client/gcp/gcs_object_client_test.go
index c885c4c1d780c..a0e6313f7ce43 100644
--- a/pkg/storage/chunk/client/gcp/gcs_object_client_test.go
+++ b/pkg/storage/chunk/client/gcp/gcs_object_client_test.go
@@ -147,8 +147,8 @@ func TestUpstreamRetryableErrs(t *testing.T) {
require.NoError(t, err)
_, _, err = cli.GetObject(ctx, "foo")
- require.Equal(t, tc.isThrottledErr, cli.IsStorageThrottledErr(err))
- require.Equal(t, tc.isTimeoutErr, cli.IsStorageTimeoutErr(err))
+ require.Equal(t, tc.isThrottledErr, IsStorageThrottledErr(err))
+ require.Equal(t, tc.isTimeoutErr, IsStorageTimeoutErr(err))
})
}
}
@@ -229,7 +229,7 @@ func TestTCPErrs(t *testing.T) {
_, _, err = cli.GetObject(ctx, "foo")
require.Error(t, err)
- require.Equal(t, tc.retryable, cli.IsStorageTimeoutErr(err))
+ require.Equal(t, tc.retryable, IsStorageTimeoutErr(err))
})
}
}
diff --git a/pkg/storage/chunk/client/gcp/gcs_thanos_object_client.go b/pkg/storage/chunk/client/gcp/gcs_thanos_object_client.go
new file mode 100644
index 0000000000000..b4190be2d6943
--- /dev/null
+++ b/pkg/storage/chunk/client/gcp/gcs_thanos_object_client.go
@@ -0,0 +1,44 @@
+package gcp
+
+import (
+ "context"
+
+ "github.com/go-kit/log"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/thanos-io/objstore"
+
+ "github.com/grafana/loki/v3/pkg/storage/bucket"
+ "github.com/grafana/loki/v3/pkg/storage/chunk/client"
+ "github.com/grafana/loki/v3/pkg/storage/chunk/client/hedging"
+)
+
+func NewGCSThanosObjectClient(ctx context.Context, cfg bucket.Config, component string, logger log.Logger, hedgingCfg hedging.Config) (client.ObjectClient, error) {
+ b, err := newGCSThanosObjectClient(ctx, cfg, component, logger, false, hedgingCfg)
+ if err != nil {
+ return nil, err
+ }
+
+ var hedged objstore.Bucket
+ if hedgingCfg.At != 0 {
+ hedged, err = newGCSThanosObjectClient(ctx, cfg, component, logger, true, hedgingCfg)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ o := bucket.NewObjectClientAdapter(b, hedged, logger, bucket.WithRetryableErrFunc(IsRetryableErr))
+ return o, nil
+}
+
+func newGCSThanosObjectClient(ctx context.Context, cfg bucket.Config, component string, logger log.Logger, hedging bool, hedgingCfg hedging.Config) (objstore.Bucket, error) {
+ if hedging {
+ hedgedTrasport, err := hedgingCfg.RoundTripperWithRegisterer(nil, prometheus.WrapRegistererWithPrefix("loki_", prometheus.DefaultRegisterer))
+ if err != nil {
+ return nil, err
+ }
+
+ cfg.GCS.Transport = hedgedTrasport
+ }
+
+ return bucket.NewClient(ctx, bucket.GCS, cfg, component, logger)
+}
diff --git a/pkg/storage/chunk/client/object_client.go b/pkg/storage/chunk/client/object_client.go
index 41026486c55e9..ba1d5be0120be 100644
--- a/pkg/storage/chunk/client/object_client.go
+++ b/pkg/storage/chunk/client/object_client.go
@@ -183,6 +183,12 @@ func (o *client) getChunk(ctx context.Context, decodeContext *chunk.DecodeContex
}
defer readCloser.Close()
+ // reset if the size is unknown
+ // start with a buf of size bytes.MinRead since we cannot avoid allocations
+ if size < 0 {
+ size = 0
+ }
+
// adds bytes.MinRead to avoid allocations when the size is known.
// This is because ReadFrom reads bytes.MinRead by bytes.MinRead.
buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead))
diff --git a/pkg/storage/factory.go b/pkg/storage/factory.go
index 01066d69ad5f6..7f4046a47d868 100644
--- a/pkg/storage/factory.go
+++ b/pkg/storage/factory.go
@@ -15,6 +15,7 @@ import (
"github.com/grafana/dskit/flagext"
"github.com/grafana/loki/v3/pkg/indexgateway"
+ "github.com/grafana/loki/v3/pkg/storage/bucket"
"github.com/grafana/loki/v3/pkg/storage/chunk/cache"
"github.com/grafana/loki/v3/pkg/storage/chunk/client"
"github.com/grafana/loki/v3/pkg/storage/chunk/client/alibaba"
@@ -40,6 +41,7 @@ import (
"github.com/grafana/loki/v3/pkg/storage/types"
"github.com/grafana/loki/v3/pkg/util"
"github.com/grafana/loki/v3/pkg/util/constants"
+ util_log "github.com/grafana/loki/v3/pkg/util/log"
)
var (
@@ -293,6 +295,9 @@ type Config struct {
DisableBroadIndexQueries bool `yaml:"disable_broad_index_queries"`
MaxParallelGetChunk int `yaml:"max_parallel_get_chunk"`
+ UseThanosObjstore bool `yaml:"use_thanos_objstore" doc:"hidden"`
+ ObjectStore bucket.Config `yaml:"object_store" doc:"hidden"`
+
MaxChunkBatchSize int `yaml:"max_chunk_batch_size"`
BoltDBShipperConfig boltdb.IndexCfg `yaml:"boltdb_shipper" doc:"description=Configures storing index in an Object Store (GCS/S3/Azure/Swift/COS/Filesystem) in the form of boltdb files. Required fields only required when boltdb-shipper is defined in config."`
TSDBShipperConfig indexshipper.Config `yaml:"tsdb_shipper" doc:"description=Configures storing index in an Object Store (GCS/S3/Azure/Swift/COS/Filesystem) in a prometheus TSDB-like format. Required fields only required when TSDB is defined in config."`
@@ -320,6 +325,9 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
cfg.Hedging.RegisterFlagsWithPrefix("store.", f)
cfg.CongestionControl.RegisterFlagsWithPrefix("store.", f)
+ f.BoolVar(&cfg.UseThanosObjstore, "use-thanos-objstore", false, "Enables the use of thanos-io/objstore clients for connecting to object storage. When set to true, the configuration inside `storage_config.object_store` or `common.storage.object_store` block takes effect.")
+ cfg.ObjectStore.RegisterFlagsWithPrefix("object-store.", f)
+
cfg.IndexQueriesCacheConfig.RegisterFlagsWithPrefix("store.index-cache-read.", "", f)
f.DurationVar(&cfg.IndexCacheValidity, "store.index-cache-validity", 5*time.Minute, "Cache validity for active index entries. Should be no higher than -ingester.max-chunk-idle.")
f.StringVar(&cfg.ObjectPrefix, "store.object-prefix", "", "The prefix to all keys inserted in object storage. Example: loki-instances/west/")
@@ -357,12 +365,15 @@ func (cfg *Config) Validate() error {
if err := cfg.BloomShipperConfig.Validate(); err != nil {
return errors.Wrap(err, "invalid bloom shipper config")
}
+ if err := cfg.ObjectStore.Validate(); err != nil {
+ return errors.Wrap(err, "invalid object store config")
+ }
return cfg.NamedStores.Validate()
}
// NewIndexClient creates a new index client of the desired type specified in the PeriodConfig
-func NewIndexClient(periodCfg config.PeriodConfig, tableRange config.TableRange, cfg Config, schemaCfg config.SchemaConfig, limits StoreLimits, cm ClientMetrics, shardingStrategy indexgateway.ShardingStrategy, registerer prometheus.Registerer, logger log.Logger, metricsNamespace string) (index.Client, error) {
+func NewIndexClient(component string, periodCfg config.PeriodConfig, tableRange config.TableRange, cfg Config, schemaCfg config.SchemaConfig, limits StoreLimits, cm ClientMetrics, shardingStrategy indexgateway.ShardingStrategy, registerer prometheus.Registerer, logger log.Logger, metricsNamespace string) (index.Client, error) {
switch true {
case util.StringsContain(types.TestingStorageTypes, periodCfg.IndexType):
@@ -393,7 +404,7 @@ func NewIndexClient(periodCfg config.PeriodConfig, tableRange config.TableRange,
return client, nil
}
- objectClient, err := NewObjectClient(periodCfg.ObjectType, cfg, cm)
+ objectClient, err := NewObjectClient(periodCfg.ObjectType, component, cfg, cm)
if err != nil {
return nil, err
}
@@ -454,7 +465,7 @@ func NewIndexClient(periodCfg config.PeriodConfig, tableRange config.TableRange,
}
// NewChunkClient makes a new chunk.Client of the desired types.
-func NewChunkClient(name string, cfg Config, schemaCfg config.SchemaConfig, cc congestion.Controller, registerer prometheus.Registerer, clientMetrics ClientMetrics, logger log.Logger) (client.Client, error) {
+func NewChunkClient(name, component string, cfg Config, schemaCfg config.SchemaConfig, cc congestion.Controller, registerer prometheus.Registerer, clientMetrics ClientMetrics, logger log.Logger) (client.Client, error) {
var storeType = name
// lookup storeType for named stores
@@ -467,7 +478,7 @@ func NewChunkClient(name string, cfg Config, schemaCfg config.SchemaConfig, cc c
case util.StringsContain(types.TestingStorageTypes, storeType):
switch storeType {
case types.StorageTypeInMemory:
- c, err := NewObjectClient(name, cfg, clientMetrics)
+ c, err := NewObjectClient(name, component, cfg, clientMetrics)
if err != nil {
return nil, err
}
@@ -477,14 +488,14 @@ func NewChunkClient(name string, cfg Config, schemaCfg config.SchemaConfig, cc c
case util.StringsContain(types.SupportedStorageTypes, storeType):
switch storeType {
case types.StorageTypeFileSystem:
- c, err := NewObjectClient(name, cfg, clientMetrics)
+ c, err := NewObjectClient(name, component, cfg, clientMetrics)
if err != nil {
return nil, err
}
return client.NewClientWithMaxParallel(c, client.FSEncoder, cfg.MaxParallelGetChunk, schemaCfg), nil
case types.StorageTypeAWS, types.StorageTypeS3, types.StorageTypeAzure, types.StorageTypeBOS, types.StorageTypeSwift, types.StorageTypeCOS, types.StorageTypeAlibabaCloud:
- c, err := NewObjectClient(name, cfg, clientMetrics)
+ c, err := NewObjectClient(name, component, cfg, clientMetrics)
if err != nil {
return nil, err
}
@@ -494,7 +505,7 @@ func NewChunkClient(name string, cfg Config, schemaCfg config.SchemaConfig, cc c
return client.NewClientWithMaxParallel(c, nil, cfg.MaxParallelGetChunk, schemaCfg), nil
case types.StorageTypeGCS:
- c, err := NewObjectClient(name, cfg, clientMetrics)
+ c, err := NewObjectClient(name, component, cfg, clientMetrics)
if err != nil {
return nil, err
}
@@ -535,7 +546,7 @@ func NewChunkClient(name string, cfg Config, schemaCfg config.SchemaConfig, cc c
}
// NewTableClient makes a new table client based on the configuration.
-func NewTableClient(name string, periodCfg config.PeriodConfig, cfg Config, cm ClientMetrics, registerer prometheus.Registerer, logger log.Logger) (index.TableClient, error) {
+func NewTableClient(name, component string, periodCfg config.PeriodConfig, cfg Config, cm ClientMetrics, registerer prometheus.Registerer, logger log.Logger) (index.TableClient, error) {
switch true {
case util.StringsContain(types.TestingStorageTypes, name):
switch name {
@@ -544,7 +555,7 @@ func NewTableClient(name string, periodCfg config.PeriodConfig, cfg Config, cm C
}
case util.StringsContain(types.SupportedIndexTypes, name):
- objectClient, err := NewObjectClient(periodCfg.ObjectType, cfg, cm)
+ objectClient, err := NewObjectClient(periodCfg.ObjectType, component, cfg, cm)
if err != nil {
return nil, err
}
@@ -599,13 +610,13 @@ func (c *ClientMetrics) Unregister() {
}
// NewObjectClient makes a new StorageClient with the prefix in the front.
-func NewObjectClient(name string, cfg Config, clientMetrics ClientMetrics) (client.ObjectClient, error) {
- actual, err := internalNewObjectClient(name, cfg, clientMetrics)
+func NewObjectClient(name, component string, cfg Config, clientMetrics ClientMetrics) (client.ObjectClient, error) {
+ actual, err := internalNewObjectClient(name, component, cfg, clientMetrics)
if err != nil {
return nil, err
}
- if cfg.ObjectPrefix == "" {
+ if cfg.UseThanosObjstore || cfg.ObjectPrefix == "" {
return actual, nil
} else {
prefix := strings.Trim(cfg.ObjectPrefix, "/") + "/"
@@ -614,16 +625,16 @@ func NewObjectClient(name string, cfg Config, clientMetrics ClientMetrics) (clie
}
// internalNewObjectClient makes the underlying StorageClient of the desired types.
-func internalNewObjectClient(name string, cfg Config, clientMetrics ClientMetrics) (client.ObjectClient, error) {
+func internalNewObjectClient(storeName, component string, cfg Config, clientMetrics ClientMetrics) (client.ObjectClient, error) {
var (
namedStore string
- storeType = name
+ storeType = storeName
)
// lookup storeType for named stores
- if nsType, ok := cfg.NamedStores.storeType[name]; ok {
+ if nsType, ok := cfg.NamedStores.storeType[storeName]; ok {
storeType = nsType
- namedStore = name
+ namedStore = storeName
}
switch storeType {
@@ -635,10 +646,18 @@ func internalNewObjectClient(name string, cfg Config, clientMetrics ClientMetric
if namedStore != "" {
awsCfg, ok := cfg.NamedStores.AWS[namedStore]
if !ok {
- return nil, fmt.Errorf("Unrecognized named aws storage config %s", name)
+ return nil, fmt.Errorf("Unrecognized named aws storage config %s", storeName)
}
s3Cfg = awsCfg.S3Config
}
+
+ if cfg.CongestionControl.Enabled {
+ s3Cfg.BackoffConfig.MaxRetries = 1
+ }
+
+ if cfg.UseThanosObjstore {
+ return aws.NewS3ThanosObjectClient(context.Background(), cfg.ObjectStore, component, util_log.Logger, cfg.Hedging)
+ }
return aws.NewS3ObjectClient(s3Cfg, cfg.Hedging)
case types.StorageTypeAlibabaCloud:
@@ -646,7 +665,7 @@ func internalNewObjectClient(name string, cfg Config, clientMetrics ClientMetric
if namedStore != "" {
nsCfg, ok := cfg.NamedStores.AlibabaCloud[namedStore]
if !ok {
- return nil, fmt.Errorf("Unrecognized named alibabacloud oss storage config %s", name)
+ return nil, fmt.Errorf("Unrecognized named alibabacloud oss storage config %s", storeName)
}
ossCfg = (alibaba.OssConfig)(nsCfg)
@@ -658,7 +677,7 @@ func internalNewObjectClient(name string, cfg Config, clientMetrics ClientMetric
if namedStore != "" {
nsCfg, ok := cfg.NamedStores.GCS[namedStore]
if !ok {
- return nil, fmt.Errorf("Unrecognized named gcs storage config %s", name)
+ return nil, fmt.Errorf("Unrecognized named gcs storage config %s", storeName)
}
gcsCfg = (gcp.GCSConfig)(nsCfg)
}
@@ -668,6 +687,9 @@ func internalNewObjectClient(name string, cfg Config, clientMetrics ClientMetric
if cfg.CongestionControl.Enabled {
gcsCfg.EnableRetries = false
}
+ if cfg.UseThanosObjstore {
+ return gcp.NewGCSThanosObjectClient(context.Background(), cfg.ObjectStore, component, util_log.Logger, cfg.Hedging)
+ }
return gcp.NewGCSObjectClient(context.Background(), gcsCfg, cfg.Hedging)
case types.StorageTypeAzure:
@@ -675,10 +697,13 @@ func internalNewObjectClient(name string, cfg Config, clientMetrics ClientMetric
if namedStore != "" {
nsCfg, ok := cfg.NamedStores.Azure[namedStore]
if !ok {
- return nil, fmt.Errorf("Unrecognized named azure storage config %s", name)
+ return nil, fmt.Errorf("Unrecognized named azure storage config %s", storeName)
}
azureCfg = (azure.BlobStorageConfig)(nsCfg)
}
+ if cfg.UseThanosObjstore {
+ return azure.NewBlobStorageThanosObjectClient(context.Background(), cfg.ObjectStore, component, util_log.Logger, cfg.Hedging)
+ }
return azure.NewBlobStorage(&azureCfg, clientMetrics.AzureMetrics, cfg.Hedging)
case types.StorageTypeSwift:
@@ -686,7 +711,7 @@ func internalNewObjectClient(name string, cfg Config, clientMetrics ClientMetric
if namedStore != "" {
nsCfg, ok := cfg.NamedStores.Swift[namedStore]
if !ok {
- return nil, fmt.Errorf("Unrecognized named swift storage config %s", name)
+ return nil, fmt.Errorf("Unrecognized named swift storage config %s", storeName)
}
swiftCfg = (openstack.SwiftConfig)(nsCfg)
}
@@ -697,7 +722,7 @@ func internalNewObjectClient(name string, cfg Config, clientMetrics ClientMetric
if namedStore != "" {
nsCfg, ok := cfg.NamedStores.Filesystem[namedStore]
if !ok {
- return nil, fmt.Errorf("Unrecognized named filesystem storage config %s", name)
+ return nil, fmt.Errorf("Unrecognized named filesystem storage config %s", storeName)
}
fsCfg = (local.FSConfig)(nsCfg)
}
@@ -708,7 +733,7 @@ func internalNewObjectClient(name string, cfg Config, clientMetrics ClientMetric
if namedStore != "" {
nsCfg, ok := cfg.NamedStores.BOS[namedStore]
if !ok {
- return nil, fmt.Errorf("Unrecognized named bos storage config %s", name)
+ return nil, fmt.Errorf("Unrecognized named bos storage config %s", storeName)
}
bosCfg = (baidubce.BOSStorageConfig)(nsCfg)
@@ -720,7 +745,7 @@ func internalNewObjectClient(name string, cfg Config, clientMetrics ClientMetric
if namedStore != "" {
nsCfg, ok := cfg.NamedStores.COS[namedStore]
if !ok {
- return nil, fmt.Errorf("Unrecognized named cos storage config %s", name)
+ return nil, fmt.Errorf("Unrecognized named cos storage config %s", storeName)
}
cosCfg = (ibmcloud.COSConfig)(nsCfg)
@@ -728,6 +753,10 @@ func internalNewObjectClient(name string, cfg Config, clientMetrics ClientMetric
return ibmcloud.NewCOSObjectClient(cosCfg, cfg.Hedging)
default:
- return nil, fmt.Errorf("Unrecognized storage client %v, choose one of: %v, %v, %v, %v, %v, %v, %v, %v, %v", name, types.StorageTypeAWS, types.StorageTypeS3, types.StorageTypeGCS, types.StorageTypeAzure, types.StorageTypeAlibabaCloud, types.StorageTypeSwift, types.StorageTypeBOS, types.StorageTypeCOS, types.StorageTypeFileSystem)
+ if cfg.UseThanosObjstore {
+ return nil, fmt.Errorf("Unrecognized storage client %v, choose one of: %s", storeName, strings.Join(cfg.ObjectStore.SupportedBackends(), ", "))
+ }
+
+ return nil, fmt.Errorf("Unrecognized storage client %v, choose one of: %v, %v, %v, %v, %v, %v, %v, %v, %v", storeName, types.StorageTypeAWS, types.StorageTypeS3, types.StorageTypeGCS, types.StorageTypeAzure, types.StorageTypeAlibabaCloud, types.StorageTypeSwift, types.StorageTypeBOS, types.StorageTypeCOS, types.StorageTypeFileSystem)
}
}
diff --git a/pkg/storage/factory_test.go b/pkg/storage/factory_test.go
index 3ac8658e8d6b2..52cd6ba749a86 100644
--- a/pkg/storage/factory_test.go
+++ b/pkg/storage/factory_test.go
@@ -234,7 +234,7 @@ func TestNewObjectClient_prefixing(t *testing.T) {
var cfg Config
flagext.DefaultValues(&cfg)
- objectClient, err := NewObjectClient("inmemory", cfg, cm)
+ objectClient, err := NewObjectClient("inmemory", "test", cfg, cm)
require.NoError(t, err)
_, ok := objectClient.(client.PrefixedObjectClient)
@@ -246,7 +246,7 @@ func TestNewObjectClient_prefixing(t *testing.T) {
flagext.DefaultValues(&cfg)
cfg.ObjectPrefix = "my/prefix/"
- objectClient, err := NewObjectClient("inmemory", cfg, cm)
+ objectClient, err := NewObjectClient("inmemory", "test", cfg, cm)
require.NoError(t, err)
prefixed, ok := objectClient.(client.PrefixedObjectClient)
@@ -259,7 +259,7 @@ func TestNewObjectClient_prefixing(t *testing.T) {
flagext.DefaultValues(&cfg)
cfg.ObjectPrefix = "my/prefix"
- objectClient, err := NewObjectClient("inmemory", cfg, cm)
+ objectClient, err := NewObjectClient("inmemory", "test", cfg, cm)
require.NoError(t, err)
prefixed, ok := objectClient.(client.PrefixedObjectClient)
@@ -272,7 +272,7 @@ func TestNewObjectClient_prefixing(t *testing.T) {
flagext.DefaultValues(&cfg)
cfg.ObjectPrefix = "/my/prefix/"
- objectClient, err := NewObjectClient("inmemory", cfg, cm)
+ objectClient, err := NewObjectClient("inmemory", "test", cfg, cm)
require.NoError(t, err)
prefixed, ok := objectClient.(client.PrefixedObjectClient)
diff --git a/pkg/storage/store.go b/pkg/storage/store.go
index 768708a24f34c..a8e6a1add3239 100644
--- a/pkg/storage/store.go
+++ b/pkg/storage/store.go
@@ -228,8 +228,6 @@ func (s *LokiStore) chunkClientForPeriod(p config.PeriodConfig) (client.Client,
if objectStoreType == "" {
objectStoreType = p.IndexType
}
- chunkClientReg := prometheus.WrapRegistererWith(
- prometheus.Labels{"component": "chunk-store-" + p.From.String()}, s.registerer)
var cc congestion.Controller
ccCfg := s.cfg.CongestionControl
@@ -242,7 +240,10 @@ func (s *LokiStore) chunkClientForPeriod(p config.PeriodConfig) (client.Client,
)
}
- chunks, err := NewChunkClient(objectStoreType, s.cfg, s.schemaCfg, cc, chunkClientReg, s.clientMetrics, s.logger)
+ component := "chunk-store-" + p.From.String()
+ chunkClientReg := prometheus.WrapRegistererWith(
+ prometheus.Labels{"component": component}, s.registerer)
+ chunks, err := NewChunkClient(objectStoreType, component, s.cfg, s.schemaCfg, cc, chunkClientReg, s.clientMetrics, s.logger)
if err != nil {
return nil, errors.Wrap(err, "error creating object client")
}
@@ -265,14 +266,8 @@ func shouldUseIndexGatewayClient(cfg indexshipper.Config) bool {
}
func (s *LokiStore) storeForPeriod(p config.PeriodConfig, tableRange config.TableRange, chunkClient client.Client, f *fetcher.Fetcher) (stores.ChunkWriter, index.ReaderWriter, func(), error) {
- indexClientReg := prometheus.WrapRegistererWith(
- prometheus.Labels{
- "component": fmt.Sprintf(
- "index-store-%s-%s",
- p.IndexType,
- p.From.String(),
- ),
- }, s.registerer)
+ component := fmt.Sprintf("index-store-%s-%s", p.IndexType, p.From.String())
+ indexClientReg := prometheus.WrapRegistererWith(prometheus.Labels{"component": component}, s.registerer)
indexClientLogger := log.With(s.logger, "index-store", fmt.Sprintf("%s-%s", p.IndexType, p.From.String()))
if p.IndexType == types.TSDBType {
@@ -290,7 +285,7 @@ func (s *LokiStore) storeForPeriod(p config.PeriodConfig, tableRange config.Tabl
}, nil
}
- objectClient, err := NewObjectClient(p.ObjectType, s.cfg, s.clientMetrics)
+ objectClient, err := NewObjectClient(p.ObjectType, component, s.cfg, s.clientMetrics)
if err != nil {
return nil, nil, nil, err
}
@@ -313,7 +308,7 @@ func (s *LokiStore) storeForPeriod(p config.PeriodConfig, tableRange config.Tabl
}, nil
}
- idx, err := NewIndexClient(p, tableRange, s.cfg, s.schemaCfg, s.limits, s.clientMetrics, nil, indexClientReg, indexClientLogger, s.metricsNamespace)
+ idx, err := NewIndexClient(component, p, tableRange, s.cfg, s.schemaCfg, s.limits, s.clientMetrics, nil, indexClientReg, indexClientLogger, s.metricsNamespace)
if err != nil {
return nil, nil, nil, errors.Wrap(err, "error creating index client")
}
diff --git a/pkg/storage/stores/series/series_index_gateway_store.go b/pkg/storage/stores/series/series_index_gateway_store.go
index 0202494ae6e1d..c85643a9b4342 100644
--- a/pkg/storage/stores/series/series_index_gateway_store.go
+++ b/pkg/storage/stores/series/series_index_gateway_store.go
@@ -11,6 +11,7 @@ import (
"github.com/grafana/loki/v3/pkg/logproto"
"github.com/grafana/loki/v3/pkg/logql/syntax"
+ statscontext "github.com/grafana/loki/v3/pkg/logqlmodel/stats"
"github.com/grafana/loki/v3/pkg/storage/chunk"
"github.com/grafana/loki/v3/pkg/storage/stores/index/stats"
"github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb/sharding"
@@ -58,6 +59,10 @@ func (c *IndexGatewayClientStore) GetChunkRefs(ctx context.Context, _ string, fr
result[i] = *ref
}
+ statsCtx := statscontext.FromContext(ctx)
+ statsCtx.AddIndexTotalChunkRefs(response.Stats.TotalChunks)
+ statsCtx.AddIndexPostFilterChunkRefs(response.Stats.PostFilterChunks)
+
return result, nil
}
diff --git a/pkg/storage/stores/shipper/bloomshipper/client.go b/pkg/storage/stores/shipper/bloomshipper/client.go
index 1c66e500a6b9c..627016c63c025 100644
--- a/pkg/storage/stores/shipper/bloomshipper/client.go
+++ b/pkg/storage/stores/shipper/bloomshipper/client.go
@@ -225,13 +225,16 @@ func newBlockRefWithEncoding(ref Ref, enc compression.Codec) BlockRef {
}
func BlockFrom(enc compression.Codec, tenant, table string, blk *v1.Block) (Block, error) {
- md, _ := blk.Metadata()
+ md, err := blk.Metadata()
+ if err != nil {
+ return Block{}, errors.Wrap(err, "decoding index")
+ }
+
ref := newBlockRefWithEncoding(newRefFrom(tenant, table, md), enc)
// TODO(owen-d): pool
buf := bytes.NewBuffer(nil)
- err := v1.TarCompress(ref.Codec, buf, blk.Reader())
-
+ err = v1.TarCompress(ref.Codec, buf, blk.Reader())
if err != nil {
return Block{}, err
}
diff --git a/pkg/storage/stores/shipper/bloomshipper/store.go b/pkg/storage/stores/shipper/bloomshipper/store.go
index b486b7ca8e524..f6a4b7f0b06c1 100644
--- a/pkg/storage/stores/shipper/bloomshipper/store.go
+++ b/pkg/storage/stores/shipper/bloomshipper/store.go
@@ -334,7 +334,7 @@ func NewBloomStore(
}
for _, periodicConfig := range periodicConfigs {
- objectClient, err := storage.NewObjectClient(periodicConfig.ObjectType, storageConfig, clientMetrics)
+ objectClient, err := storage.NewObjectClient(periodicConfig.ObjectType, "bloomshipper", storageConfig, clientMetrics)
if err != nil {
return nil, errors.Wrapf(err, "creating object client for period %s", periodicConfig.From)
}
diff --git a/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/util_test.go b/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/util_test.go
index 38bd567ff4a3a..c4596f66cb4b6 100644
--- a/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/util_test.go
+++ b/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/util_test.go
@@ -128,7 +128,7 @@ type testObjectClient struct {
}
func newTestObjectClient(path string, clientMetrics storage.ClientMetrics) client.ObjectClient {
- c, err := storage.NewObjectClient("filesystem", storage.Config{
+ c, err := storage.NewObjectClient("filesystem", "test", storage.Config{
FSConfig: local.FSConfig{
Directory: path,
},
diff --git a/pkg/tool/audit/audit.go b/pkg/tool/audit/audit.go
index 63bbaab017960..017a3a9e93b42 100644
--- a/pkg/tool/audit/audit.go
+++ b/pkg/tool/audit/audit.go
@@ -17,7 +17,6 @@ import (
"github.com/grafana/loki/v3/pkg/compactor"
"github.com/grafana/loki/v3/pkg/compactor/retention"
"github.com/grafana/loki/v3/pkg/storage"
- loki_storage "github.com/grafana/loki/v3/pkg/storage"
"github.com/grafana/loki/v3/pkg/storage/chunk/client"
indexshipper_storage "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/storage"
shipperutil "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/storage"
@@ -53,8 +52,7 @@ func Run(ctx context.Context, cloudIndexPath, table string, cfg Config, logger l
func GetObjectClient(cfg Config) (client.ObjectClient, error) {
periodCfg := cfg.SchemaConfig.Configs[len(cfg.SchemaConfig.Configs)-1] // only check the last period.
-
- objClient, err := loki_storage.NewObjectClient(periodCfg.ObjectType, cfg.StorageConfig, storage.NewClientMetrics())
+ objClient, err := storage.NewObjectClient(periodCfg.ObjectType, "tool-audit", cfg.StorageConfig, storage.NewClientMetrics())
if err != nil {
return nil, fmt.Errorf("couldn't create object client: %w", err)
}
diff --git a/pkg/util/entry_size.go b/pkg/util/entry_size.go
new file mode 100644
index 0000000000000..b3416f3b9f7d1
--- /dev/null
+++ b/pkg/util/entry_size.go
@@ -0,0 +1,34 @@
+package util
+
+import (
+ "golang.org/x/exp/slices"
+
+ "github.com/grafana/loki/pkg/push"
+
+ "github.com/grafana/loki/v3/pkg/util/constants"
+)
+
+func EntriesTotalSize(entries []push.Entry) int {
+ size := 0
+ for _, entry := range entries {
+ size += EntryTotalSize(&entry)
+ }
+ return size
+}
+
+func EntryTotalSize(entry *push.Entry) int {
+ return len(entry.Line) + StructuredMetadataSize(entry.StructuredMetadata)
+}
+
+var excludedStructuredMetadataLabels = []string{constants.LevelLabel}
+
+func StructuredMetadataSize(metas push.LabelsAdapter) int {
+ size := 0
+ for _, meta := range metas {
+ if slices.Contains(excludedStructuredMetadataLabels, meta.Name) {
+ continue
+ }
+ size += len(meta.Name) + len(meta.Value)
+ }
+ return size
+}
diff --git a/pkg/util/http.go b/pkg/util/http.go
index c3c64ea1e3a86..3fdfca6df24f1 100644
--- a/pkg/util/http.go
+++ b/pkg/util/http.go
@@ -298,3 +298,12 @@ func FlagFromValues(values url.Values, key string, d bool) bool {
return d
}
}
+
+func IsValidURL(endpoint string) bool {
+ u, err := url.Parse(endpoint)
+ if err != nil {
+ return false
+ }
+
+ return u.Scheme != "" && u.Host != ""
+}
diff --git a/pkg/util/jumphash/memcached_client_selector.go b/pkg/util/jumphash/memcached_client_selector.go
index ccec90fa0dda2..7eec90a3de706 100644
--- a/pkg/util/jumphash/memcached_client_selector.go
+++ b/pkg/util/jumphash/memcached_client_selector.go
@@ -7,6 +7,7 @@ import (
"github.com/cespare/xxhash"
"github.com/facette/natsort"
+ "github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/grafana/gomemcache/memcache"
@@ -23,6 +24,7 @@ import (
// with consistent DNS names where the naturally sorted order
// is predictable.
type Selector struct {
+ logger log.Logger
mu sync.RWMutex
addrs []net.Addr
resolveUnixAddr UnixResolver
@@ -33,15 +35,17 @@ type UnixResolver func(network, address string) (*net.UnixAddr, error)
type TCPResolver func(network, address string) (*net.TCPAddr, error)
-func NewSelector(resolveUnixAddr UnixResolver, resolveTCPAddr TCPResolver) *Selector {
+func NewSelector(name string, resolveUnixAddr UnixResolver, resolveTCPAddr TCPResolver) *Selector {
return &Selector{
+ logger: log.With(util_log.Logger, "name", name),
resolveUnixAddr: resolveUnixAddr,
resolveTCPAddr: resolveTCPAddr,
}
}
-func DefaultSelector() *Selector {
+func DefaultSelector(name string) *Selector {
return &Selector{
+ logger: log.With(util_log.Logger, "name", name),
resolveUnixAddr: net.ResolveUnixAddr,
resolveTCPAddr: net.ResolveTCPAddr,
}
@@ -102,7 +106,7 @@ func (s *Selector) SetServers(servers ...string) error {
}
}
- level.Debug(util_log.Logger).Log("msg", "updating memcached servers", "servers", strings.Join(addresses(naddrs), ","), "count", len(naddrs))
+ level.Debug(util_log.Logger).Log("msg", "updating servers", "servers", strings.Join(addresses(naddrs), ","), "count", len(naddrs))
s.mu.Lock()
defer s.mu.Unlock()
diff --git a/pkg/util/jumphash/memcached_client_selector_test.go b/pkg/util/jumphash/memcached_client_selector_test.go
index 939106ad5aac8..06beca0f8800c 100644
--- a/pkg/util/jumphash/memcached_client_selector_test.go
+++ b/pkg/util/jumphash/memcached_client_selector_test.go
@@ -57,6 +57,7 @@ var mockTCPResolver = func(_, address string) (*net.TCPAddr, error) {
func TestMemcachedJumpHashSelector_PickSever(t *testing.T) {
s := NewSelector(
+ "test",
mockUnixResolver,
mockTCPResolver,
)
@@ -84,6 +85,7 @@ func TestMemcachedJumpHashSelector_PickSever(t *testing.T) {
func TestMemcachedJumpHashSelector_PickSever_ErrNoServers(t *testing.T) {
s := NewSelector(
+ "test",
mockUnixResolver,
mockTCPResolver,
)
diff --git a/pkg/util/limiter/combined_limits.go b/pkg/util/limiter/combined_limits.go
index 3ea2a230634e8..5c98c6bf9383d 100644
--- a/pkg/util/limiter/combined_limits.go
+++ b/pkg/util/limiter/combined_limits.go
@@ -8,11 +8,13 @@ import (
"github.com/grafana/loki/v3/pkg/distributor"
"github.com/grafana/loki/v3/pkg/indexgateway"
"github.com/grafana/loki/v3/pkg/ingester"
+ "github.com/grafana/loki/v3/pkg/pattern"
querier_limits "github.com/grafana/loki/v3/pkg/querier/limits"
queryrange_limits "github.com/grafana/loki/v3/pkg/querier/queryrange/limits"
"github.com/grafana/loki/v3/pkg/ruler"
scheduler_limits "github.com/grafana/loki/v3/pkg/scheduler/limits"
"github.com/grafana/loki/v3/pkg/storage"
+ "github.com/grafana/loki/v3/pkg/storage/bucket"
)
type CombinedLimits interface {
@@ -28,4 +30,6 @@ type CombinedLimits interface {
bloomgateway.Limits
bloomplanner.Limits
bloombuilder.Limits
+ pattern.Limits
+ bucket.SSEConfigProvider
}
diff --git a/pkg/util/marshal/query.go b/pkg/util/marshal/query.go
index 7f06cd6ebe366..cbf3b40d94856 100644
--- a/pkg/util/marshal/query.go
+++ b/pkg/util/marshal/query.go
@@ -1,8 +1,11 @@
package marshal
import (
+ "bytes"
"fmt"
"strconv"
+ "strings"
+ "unicode/utf8"
"unsafe"
jsoniter "github.com/json-iterator/go"
@@ -20,6 +23,16 @@ import (
"github.com/grafana/loki/v3/pkg/util/httpreq"
)
+var (
+ // The rune error replacement is rejected by Prometheus hence replacing them with space.
+ removeInvalidUtf = func(r rune) rune {
+ if r == utf8.RuneError {
+ return 32 // rune value for space
+ }
+ return r
+ }
+)
+
// NewResultValue constructs a ResultValue from a promql.Value
func NewResultValue(v parser.Value) (loghttp.ResultValue, error) {
var err error
@@ -77,6 +90,9 @@ func NewStreams(s logqlmodel.Streams) (loghttp.Streams, error) {
ret := make([]loghttp.Stream, len(s))
for i, stream := range s {
+ if strings.ContainsRune(stream.Labels, utf8.RuneError) {
+ stream.Labels = string(bytes.Map(removeInvalidUtf, []byte(stream.Labels)))
+ }
ret[i], err = NewStream(stream)
if err != nil {
diff --git a/pkg/util/marshal/query_test.go b/pkg/util/marshal/query_test.go
new file mode 100644
index 0000000000000..15764760c243f
--- /dev/null
+++ b/pkg/util/marshal/query_test.go
@@ -0,0 +1,19 @@
+package marshal
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/grafana/loki/v3/pkg/logqlmodel"
+)
+
+func TestNewStreams(t *testing.T) {
+ s, err := NewStreams(logqlmodel.Streams{
+ {
+ Labels: "{asdf=\"�\"}",
+ },
+ })
+ require.NoError(t, err)
+ require.Equal(t, " ", s[0].Labels["asdf"], "expected only a space for label who only contained invalid UTF8 rune")
+}
diff --git a/pkg/validation/limits.go b/pkg/validation/limits.go
index 5da6bc9cfc61d..215ace5c11cd4 100644
--- a/pkg/validation/limits.go
+++ b/pkg/validation/limits.go
@@ -61,6 +61,7 @@ const (
defaultMaxStructuredMetadataCount = 128
defaultBloomBuildMaxBlockSize = "200MB"
defaultBloomBuildMaxBloomSize = "128MB"
+ defaultBloomTaskTargetChunkSize = "20GB"
defaultBlockedIngestionStatusCode = 260 // 260 is a custom status code to indicate blocked ingestion
)
@@ -207,9 +208,11 @@ type Limits struct {
BloomBuildTaskMaxRetries int `yaml:"bloom_build_task_max_retries" json:"bloom_build_task_max_retries" category:"experimental"`
BloomBuilderResponseTimeout time.Duration `yaml:"bloom_build_builder_response_timeout" json:"bloom_build_builder_response_timeout" category:"experimental"`
- BloomCreationEnabled bool `yaml:"bloom_creation_enabled" json:"bloom_creation_enabled" category:"experimental"`
- BloomSplitSeriesKeyspaceBy int `yaml:"bloom_split_series_keyspace_by" json:"bloom_split_series_keyspace_by" category:"experimental"`
- BloomBlockEncoding string `yaml:"bloom_block_encoding" json:"bloom_block_encoding" category:"experimental"`
+ BloomCreationEnabled bool `yaml:"bloom_creation_enabled" json:"bloom_creation_enabled" category:"experimental"`
+ BloomPlanningStrategy string `yaml:"bloom_planning_strategy" json:"bloom_planning_strategy" category:"experimental"`
+ BloomSplitSeriesKeyspaceBy int `yaml:"bloom_split_series_keyspace_by" json:"bloom_split_series_keyspace_by" category:"experimental"`
+ BloomTaskTargetSeriesChunkSize flagext.ByteSize `yaml:"bloom_task_target_series_chunk_size" json:"bloom_task_target_series_chunk_size" category:"experimental"`
+ BloomBlockEncoding string `yaml:"bloom_block_encoding" json:"bloom_block_encoding" category:"experimental"`
BloomMaxBlockSize flagext.ByteSize `yaml:"bloom_max_block_size" json:"bloom_max_block_size" category:"experimental"`
BloomMaxBloomSize flagext.ByteSize `yaml:"bloom_max_bloom_size" json:"bloom_max_bloom_size" category:"experimental"`
@@ -224,6 +227,16 @@ type Limits struct {
BlockIngestionStatusCode int `yaml:"block_ingestion_status_code" json:"block_ingestion_status_code"`
IngestionPartitionsTenantShardSize int `yaml:"ingestion_partitions_tenant_shard_size" json:"ingestion_partitions_tenant_shard_size" category:"experimental"`
+
+ PatternIngesterTokenizableJSONFieldsDefault dskit_flagext.StringSliceCSV `yaml:"pattern_ingester_tokenizable_json_fields_default" json:"pattern_ingester_tokenizable_json_fields_default" doc:"hidden"`
+ PatternIngesterTokenizableJSONFieldsAppend dskit_flagext.StringSliceCSV `yaml:"pattern_ingester_tokenizable_json_fields_append" json:"pattern_ingester_tokenizable_json_fields_append" doc:"hidden"`
+ PatternIngesterTokenizableJSONFieldsDelete dskit_flagext.StringSliceCSV `yaml:"pattern_ingester_tokenizable_json_fields_delete" json:"pattern_ingester_tokenizable_json_fields_delete" doc:"hidden"`
+
+ // This config doesn't have a CLI flag registered here because they're registered in
+ // their own original config struct.
+ S3SSEType string `yaml:"s3_sse_type" json:"s3_sse_type" doc:"nocli|description=S3 server-side encryption type. Required to enable server-side encryption overrides for a specific tenant. If not set, the default S3 client settings are used."`
+ S3SSEKMSKeyID string `yaml:"s3_sse_kms_key_id" json:"s3_sse_kms_key_id" doc:"nocli|description=S3 server-side encryption KMS Key ID. Ignored if the SSE type override is not set."`
+ S3SSEKMSEncryptionContext string `yaml:"s3_sse_kms_encryption_context" json:"s3_sse_kms_encryption_context" doc:"nocli|description=S3 server-side encryption KMS encryption context. If unset and the key ID override is set, the encryption context will not be provided to S3. Ignored if the SSE type override is not set."`
}
type StreamRetention struct {
@@ -243,7 +256,7 @@ func (e LimitError) Error() string {
// RegisterFlags adds the flags required to config this to the given FlagSet
func (l *Limits) RegisterFlags(f *flag.FlagSet) {
f.StringVar(&l.IngestionRateStrategy, "distributor.ingestion-rate-limit-strategy", "global", "Whether the ingestion rate limit should be applied individually to each distributor instance (local), or evenly shared across the cluster (global). The ingestion rate strategy cannot be overridden on a per-tenant basis.\n- local: enforces the limit on a per distributor basis. The actual effective rate limit will be N times higher, where N is the number of distributor replicas.\n- global: enforces the limit globally, configuring a per-distributor local rate limiter as 'ingestion_rate / N', where N is the number of distributor replicas (it's automatically adjusted if the number of replicas change). The global strategy requires the distributors to form their own ring, which is used to keep track of the current number of healthy distributor replicas.")
- f.Float64Var(&l.IngestionRateMB, "distributor.ingestion-rate-limit-mb", 4, "Per-user ingestion rate limit in sample size per second. Units in MB.")
+ f.Float64Var(&l.IngestionRateMB, "distributor.ingestion-rate-limit-mb", 4, "Per-user ingestion rate limit in sample size per second. Sample size includes size of the logs line and the size of structured metadata labels. Units in MB.")
f.Float64Var(&l.IngestionBurstSizeMB, "distributor.ingestion-burst-size-mb", 6, "Per-user allowed ingestion burst size (in sample size). Units in MB. The burst size refers to the per-distributor local rate limiter even in the case of the 'global' strategy, and should be set at least to the maximum logs size expected in a single push request.")
_ = l.MaxLineSize.Set("256KB")
@@ -389,7 +402,10 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) {
)
f.BoolVar(&l.BloomCreationEnabled, "bloom-build.enable", false, "Experimental. Whether to create blooms for the tenant.")
- f.IntVar(&l.BloomSplitSeriesKeyspaceBy, "bloom-build.split-keyspace-by", 256, "Experimental. Number of splits to create for the series keyspace when building blooms. The series keyspace is split into this many parts to parallelize bloom creation.")
+ f.StringVar(&l.BloomPlanningStrategy, "bloom-build.planning-strategy", "split_keyspace_by_factor", "Experimental. Bloom planning strategy to use in bloom creation. Can be one of: 'split_keyspace_by_factor', 'split_by_series_chunks_size'")
+ f.IntVar(&l.BloomSplitSeriesKeyspaceBy, "bloom-build.split-keyspace-by", 256, "Experimental. Only if `bloom-build.planning-strategy` is 'split'. Number of splits to create for the series keyspace when building blooms. The series keyspace is split into this many parts to parallelize bloom creation.")
+ _ = l.BloomTaskTargetSeriesChunkSize.Set(defaultBloomTaskTargetChunkSize)
+ f.Var(&l.BloomTaskTargetSeriesChunkSize, "bloom-build.split-target-series-chunk-size", fmt.Sprintf("Experimental. Target chunk size in bytes for bloom tasks. Default is %s.", defaultBloomTaskTargetChunkSize))
f.IntVar(&l.BloomBuildMaxBuilders, "bloom-build.max-builders", 0, "Experimental. Maximum number of builders to use when building blooms. 0 allows unlimited builders.")
f.DurationVar(&l.BloomBuilderResponseTimeout, "bloom-build.builder-response-timeout", 0, "Experimental. Timeout for a builder to finish a task. If a builder does not respond within this time, it is considered failed and the task will be requeued. 0 disables the timeout.")
f.IntVar(&l.BloomBuildTaskMaxRetries, "bloom-build.task-max-retries", 3, "Experimental. Maximum number of retries for a failed task. If a task fails more than this number of times, it is considered failed and will not be retried. A value of 0 disables this limit.")
@@ -416,6 +432,11 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) {
f.IntVar(&l.BlockIngestionStatusCode, "limits.block-ingestion-status-code", defaultBlockedIngestionStatusCode, "HTTP status code to return when ingestion is blocked. If 200, the ingestion will be blocked without returning an error to the client. By Default, a custom status code (260) is returned to the client along with an error message.")
f.IntVar(&l.IngestionPartitionsTenantShardSize, "limits.ingestion-partition-tenant-shard-size", 0, "The number of partitions a tenant's data should be sharded to when using kafka ingestion. Tenants are sharded across partitions using shuffle-sharding. 0 disables shuffle sharding and tenant is sharded across all partitions.")
+
+ _ = l.PatternIngesterTokenizableJSONFieldsDefault.Set("log,message,msg,msg_,_msg,content")
+ f.Var(&l.PatternIngesterTokenizableJSONFieldsDefault, "limits.pattern-ingester-tokenizable-json-fields", "List of JSON fields that should be tokenized in the pattern ingester.")
+ f.Var(&l.PatternIngesterTokenizableJSONFieldsAppend, "limits.pattern-ingester-tokenizable-json-fields-append", "List of JSON fields that should be appended to the default list of tokenizable fields in the pattern ingester.")
+ f.Var(&l.PatternIngesterTokenizableJSONFieldsDelete, "limits.pattern-ingester-tokenizable-json-fields-delete", "List of JSON fields that should be deleted from the (default U append) list of tokenizable fields in the pattern ingester.")
}
// SetGlobalOTLPConfig set GlobalOTLPConfig which is used while unmarshaling per-tenant otlp config to use the default list of resource attributes picked as index labels.
@@ -996,10 +1017,18 @@ func (o *Overrides) BloomCreationEnabled(userID string) bool {
return o.getOverridesForUser(userID).BloomCreationEnabled
}
+func (o *Overrides) BloomPlanningStrategy(userID string) string {
+ return o.getOverridesForUser(userID).BloomPlanningStrategy
+}
+
func (o *Overrides) BloomSplitSeriesKeyspaceBy(userID string) int {
return o.getOverridesForUser(userID).BloomSplitSeriesKeyspaceBy
}
+func (o *Overrides) BloomTaskTargetSeriesChunksSizeBytes(userID string) uint64 {
+ return uint64(o.getOverridesForUser(userID).BloomTaskTargetSeriesChunkSize)
+}
+
func (o *Overrides) BloomBuildMaxBuilders(userID string) int {
return o.getOverridesForUser(userID).BloomBuildMaxBuilders
}
@@ -1048,6 +1077,56 @@ func (o *Overrides) BlockIngestionStatusCode(userID string) int {
return o.getOverridesForUser(userID).BlockIngestionStatusCode
}
+func (o *Overrides) PatternIngesterTokenizableJSONFields(userID string) []string {
+ defaultFields := o.getOverridesForUser(userID).PatternIngesterTokenizableJSONFieldsDefault
+ appendFields := o.getOverridesForUser(userID).PatternIngesterTokenizableJSONFieldsAppend
+ deleteFields := o.getOverridesForUser(userID).PatternIngesterTokenizableJSONFieldsDelete
+
+ outputMap := make(map[string]struct{}, len(defaultFields)+len(appendFields))
+
+ for _, field := range defaultFields {
+ outputMap[field] = struct{}{}
+ }
+
+ for _, field := range appendFields {
+ outputMap[field] = struct{}{}
+ }
+
+ for _, field := range deleteFields {
+ delete(outputMap, field)
+ }
+
+ output := make([]string, 0, len(outputMap))
+ for field := range outputMap {
+ output = append(output, field)
+ }
+
+ return output
+}
+
+func (o *Overrides) PatternIngesterTokenizableJSONFieldsAppend(userID string) []string {
+ return o.getOverridesForUser(userID).PatternIngesterTokenizableJSONFieldsAppend
+}
+
+func (o *Overrides) PatternIngesterTokenizableJSONFieldsDelete(userID string) []string {
+ return o.getOverridesForUser(userID).PatternIngesterTokenizableJSONFieldsDelete
+}
+
+// S3SSEType returns the per-tenant S3 SSE type.
+func (o *Overrides) S3SSEType(user string) string {
+ return o.getOverridesForUser(user).S3SSEType
+}
+
+// S3SSEKMSKeyID returns the per-tenant S3 KMS-SSE key id.
+func (o *Overrides) S3SSEKMSKeyID(user string) string {
+ return o.getOverridesForUser(user).S3SSEKMSKeyID
+}
+
+// S3SSEKMSEncryptionContext returns the per-tenant S3 KMS-SSE encryption context.
+func (o *Overrides) S3SSEKMSEncryptionContext(user string) string {
+ return o.getOverridesForUser(user).S3SSEKMSEncryptionContext
+}
+
func (o *Overrides) getOverridesForUser(userID string) *Limits {
if o.tenantLimits != nil {
l := o.tenantLimits.TenantLimits(userID)
diff --git a/pkg/validation/limits_test.go b/pkg/validation/limits_test.go
index 19278c77a342f..bfb522f73a2e6 100644
--- a/pkg/validation/limits_test.go
+++ b/pkg/validation/limits_test.go
@@ -354,3 +354,63 @@ func TestLimitsValidation(t *testing.T) {
})
}
}
+
+func Test_PatternIngesterTokenizableJSONFields(t *testing.T) {
+ for _, tc := range []struct {
+ name string
+ yaml string
+ expected []string
+ }{
+ {
+ name: "only defaults",
+ yaml: `
+pattern_ingester_tokenizable_json_fields_default: log,message
+`,
+ expected: []string{"log", "message"},
+ },
+ {
+ name: "with append",
+ yaml: `
+pattern_ingester_tokenizable_json_fields_default: log,message
+pattern_ingester_tokenizable_json_fields_append: msg,body
+`,
+ expected: []string{"log", "message", "msg", "body"},
+ },
+ {
+ name: "with delete",
+ yaml: `
+pattern_ingester_tokenizable_json_fields_default: log,message
+pattern_ingester_tokenizable_json_fields_delete: message
+`,
+ expected: []string{"log"},
+ },
+ {
+ name: "with append and delete from default",
+ yaml: `
+pattern_ingester_tokenizable_json_fields_default: log,message
+pattern_ingester_tokenizable_json_fields_append: msg,body
+pattern_ingester_tokenizable_json_fields_delete: message
+`,
+ expected: []string{"log", "msg", "body"},
+ },
+ {
+ name: "with append and delete from append",
+ yaml: `
+pattern_ingester_tokenizable_json_fields_default: log,message
+pattern_ingester_tokenizable_json_fields_append: msg,body
+pattern_ingester_tokenizable_json_fields_delete: body
+`,
+ expected: []string{"log", "message", "msg"},
+ },
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ overrides := Overrides{
+ defaultLimits: &Limits{},
+ }
+ require.NoError(t, yaml.Unmarshal([]byte(tc.yaml), overrides.defaultLimits))
+
+ actual := overrides.PatternIngesterTokenizableJSONFields("fake")
+ require.ElementsMatch(t, tc.expected, actual)
+ })
+ }
+}
diff --git a/production/README.md b/production/README.md
index fba63e7a868ea..4e826dde638ab 100644
--- a/production/README.md
+++ b/production/README.md
@@ -83,7 +83,7 @@ First, see the [build from source](../README.md) section of the root readme.
Once Promtail is built, to run Promtail, use the following command:
```bash
-$ ./promtail -config.file=./cmd/promtail/promtail-local-config.yaml
+$ ./promtail -config.file=./clients/cmd/promtail/promtail-local-config.yaml
...
```
diff --git a/production/helm/loki/CHANGELOG.md b/production/helm/loki/CHANGELOG.md
index 55ed8d2ed6849..73af7d78fde02 100644
--- a/production/helm/loki/CHANGELOG.md
+++ b/production/helm/loki/CHANGELOG.md
@@ -13,6 +13,10 @@ Entries should include a reference to the pull request that introduced the chang
[//]: # ( : do not remove this line. This locator is used by the CI pipeline to automatically create a changelog entry for each new Loki release. Add other chart versions and respective changelog entries bellow this line.)
+## 6.18.0
+
+- [CHANGE] Added automated weekly releases, which created this release.
+
## 6.17.1
- [BUGFIX] Added missing `loki.storage.azure.chunkDelimiter` parameter to Helm chart.
@@ -28,6 +32,7 @@ Entries should include a reference to the pull request that introduced the chang
## 6.15.0
- [ENHANCEMENT] Allow setting annotations for memberlist and query-scheduler-discovery services
+- [ENHANCEMENT] Allow to customize `client_max_body_size` when using Loki Gateway. #12924
## 6.14.1
diff --git a/production/helm/loki/Chart.yaml b/production/helm/loki/Chart.yaml
index 2381bac048101..273bdbbb7db13 100644
--- a/production/helm/loki/Chart.yaml
+++ b/production/helm/loki/Chart.yaml
@@ -3,7 +3,7 @@ name: loki
description: Helm chart for Grafana Loki and Grafana Enterprise Logs supporting both simple, scalable and distributed modes.
type: application
appVersion: 3.2.0
-version: 6.17.1
+version: 6.18.0
home: https://grafana.github.io/helm-charts
sources:
- https://github.com/grafana/loki
diff --git a/production/helm/loki/README.md b/production/helm/loki/README.md
index e152718c170ff..f78406625ac47 100644
--- a/production/helm/loki/README.md
+++ b/production/helm/loki/README.md
@@ -1,6 +1,6 @@
# loki
-  
+  
Helm chart for Grafana Loki and Grafana Enterprise Logs supporting both simple, scalable and distributed modes.
@@ -24,6 +24,39 @@ Find more information in the Loki Helm Chart [documentation](https://grafana.com
If you made any changes to the [Chart.yaml](https://github.com/grafana/loki/blob/main/production/helm/loki/Chart.yaml) or [values.yaml](https://github.com/grafana/loki/blob/main/production/helm/loki/values.yaml) run `make helm-docs` from the root of the repository to update the documentation and commit the changed files.
+Futhermore, please add an entry to the [CHANGELOG.md](./CHANGELOG.md) file about what you changed. This file has a header that looks like this:
+
+```
+[//]: # ( : do not remove this line. This locator is used by the CI pipeline to automatically create a changelog entry for each new Loki release. Add other chart versions and respective changelog entries bellow this line.)
+````
+
+Place your changes as a bulleted list below this header. The helm chart is automatically released once a week, at which point the `CHANGELOG.md` file will be updated to reflect the release of all changes between this header the the header of the previous version as the changes for that weeks release. For example, if the weekly release will be `1.21.0`, and the `CHANGELOG.md` file has the following entries:
+
+```
+[//]: # ( : do not remove this line. This locator is used by the CI pipeline to automatically create a changelog entry for each new Loki release. Add other chart versions and respective changelog entries bellow this line.)
+
+- [CHANGE] Changed the thing
+- [FEATURE] Cool new feature
+
+## 1.20.0
+
+- [BUGFIX] Fixed the bug
+```
+
+Then the weekly release will create a `CHANGELOG.md` with the following content:
+```
+[//]: # ( : do not remove this line. This locator is used by the CI pipeline to automatically create a changelog entry for each new Loki release. Add other chart versions and respective changelog entries bellow this line.)
+
+## 1.21.0
+
+- [CHANGE] Changed the thing
+- [FEATURE] Cool new feature
+
+## 1.20.0
+
+- [BUGFIX] Fixed the bug
+```
+
#### Versioning
Normally contributors need _not_ bump the version nor update the [CHANGELOG.md](https://github.com/grafana/loki/blob/main/production/helm/loki/CHANGELOG.md). A new version of the Chart will follow this cadence:
diff --git a/production/helm/loki/README.md.gotmpl b/production/helm/loki/README.md.gotmpl
index 72d55f3b44c9e..9934d21fcb098 100644
--- a/production/helm/loki/README.md.gotmpl
+++ b/production/helm/loki/README.md.gotmpl
@@ -14,6 +14,39 @@ Find more information in the Loki Helm Chart [documentation](https://grafana.com
If you made any changes to the [Chart.yaml](https://github.com/grafana/loki/blob/main/production/helm/loki/Chart.yaml) or [values.yaml](https://github.com/grafana/loki/blob/main/production/helm/loki/values.yaml) run `make helm-docs` from the root of the repository to update the documentation and commit the changed files.
+Futhermore, please add an entry to the [CHANGELOG.md](./CHANGELOG.md) file about what you changed. This file has a header that looks like this:
+
+```
+[//]: # ( : do not remove this line. This locator is used by the CI pipeline to automatically create a changelog entry for each new Loki release. Add other chart versions and respective changelog entries bellow this line.)
+````
+
+Place your changes as a bulleted list below this header. The helm chart is automatically released once a week, at which point the `CHANGELOG.md` file will be updated to reflect the release of all changes between this header the the header of the previous version as the changes for that weeks release. For example, if the weekly release will be `1.21.0`, and the `CHANGELOG.md` file has the following entries:
+
+```
+[//]: # ( : do not remove this line. This locator is used by the CI pipeline to automatically create a changelog entry for each new Loki release. Add other chart versions and respective changelog entries bellow this line.)
+
+- [CHANGE] Changed the thing
+- [FEATURE] Cool new feature
+
+## 1.20.0
+
+- [BUGFIX] Fixed the bug
+```
+
+Then the weekly release will create a `CHANGELOG.md` with the following content:
+```
+[//]: # ( : do not remove this line. This locator is used by the CI pipeline to automatically create a changelog entry for each new Loki release. Add other chart versions and respective changelog entries bellow this line.)
+
+## 1.21.0
+
+- [CHANGE] Changed the thing
+- [FEATURE] Cool new feature
+
+## 1.20.0
+
+- [BUGFIX] Fixed the bug
+```
+
#### Versioning
Normally contributors need _not_ bump the version nor update the [CHANGELOG.md](https://github.com/grafana/loki/blob/main/production/helm/loki/CHANGELOG.md). A new version of the Chart will follow this cadence:
diff --git a/production/helm/loki/templates/_helpers.tpl b/production/helm/loki/templates/_helpers.tpl
index f302bc5a621a3..6a2aa3f218f36 100644
--- a/production/helm/loki/templates/_helpers.tpl
+++ b/production/helm/loki/templates/_helpers.tpl
@@ -540,11 +540,16 @@ Memcached Exporter Docker image
{{- include "loki.image" $dict -}}
{{- end }}
+{{/* Allow KubeVersion to be overridden. */}}
+{{- define "loki.kubeVersion" -}}
+ {{- default .Capabilities.KubeVersion.Version .Values.kubeVersionOverride -}}
+{{- end -}}
+
{{/*
Return the appropriate apiVersion for ingress.
*/}}
{{- define "loki.ingress.apiVersion" -}}
- {{- if and (.Capabilities.APIVersions.Has "networking.k8s.io/v1") (semverCompare ">= 1.19-0" .Capabilities.KubeVersion.Version) -}}
+ {{- if and (.Capabilities.APIVersions.Has "networking.k8s.io/v1") (semverCompare ">= 1.19-0" (include "loki.kubeVersion" .)) -}}
{{- print "networking.k8s.io/v1" -}}
{{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" -}}
{{- print "networking.k8s.io/v1beta1" -}}
@@ -564,14 +569,14 @@ Return if ingress is stable.
Return if ingress supports ingressClassName.
*/}}
{{- define "loki.ingress.supportsIngressClassName" -}}
- {{- or (eq (include "loki.ingress.isStable" .) "true") (and (eq (include "loki.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18-0" .Capabilities.KubeVersion.Version)) -}}
+ {{- or (eq (include "loki.ingress.isStable" .) "true") (and (eq (include "loki.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18-0" (include "loki.kubeVersion" .))) -}}
{{- end -}}
{{/*
Return if ingress supports pathType.
*/}}
{{- define "loki.ingress.supportsPathType" -}}
- {{- or (eq (include "loki.ingress.isStable" .) "true") (and (eq (include "loki.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18-0" .Capabilities.KubeVersion.Version)) -}}
+ {{- or (eq (include "loki.ingress.isStable" .) "true") (and (eq (include "loki.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18-0" (include "loki.kubeVersion" .))) -}}
{{- end -}}
{{/*
@@ -736,7 +741,7 @@ http {
uwsgi_temp_path /tmp/uwsgi_temp;
scgi_temp_path /tmp/scgi_temp;
- client_max_body_size 4M;
+ client_max_body_size {{ .Values.gateway.nginxConfig.clientMaxBodySize }};
proxy_read_timeout 600; ## 10 minutes
proxy_send_timeout 600;
@@ -986,7 +991,7 @@ http {
{{/* Configure enableServiceLinks in pod */}}
{{- define "loki.enableServiceLinks" -}}
-{{- if semverCompare ">=1.13-0" .Capabilities.KubeVersion.Version -}}
+{{- if semverCompare ">=1.13-0" (include "loki.kubeVersion" .) -}}
{{- if or (.Values.loki.enableServiceLinks) (ne .Values.loki.enableServiceLinks false) -}}
enableServiceLinks: true
{{- else -}}
@@ -1086,7 +1091,7 @@ checksum/config: {{ include (print .Template.BasePath "/config.yaml") . | sha256
Return the appropriate apiVersion for PodDisruptionBudget.
*/}}
{{- define "loki.pdb.apiVersion" -}}
- {{- if and (.Capabilities.APIVersions.Has "policy/v1") (semverCompare ">=1.21-0" .Capabilities.KubeVersion.Version) -}}
+ {{- if and (.Capabilities.APIVersions.Has "policy/v1") (semverCompare ">=1.21-0" (include "loki.kubeVersion" .)) -}}
{{- print "policy/v1" -}}
{{- else -}}
{{- print "policy/v1beta1" -}}
@@ -1108,7 +1113,7 @@ Return the object store type for use with the test schema.
Return the appropriate apiVersion for HorizontalPodAutoscaler.
*/}}
{{- define "loki.hpa.apiVersion" -}}
- {{- if and (.Capabilities.APIVersions.Has "autoscaling/v2") (semverCompare ">= 1.19-0" .Capabilities.KubeVersion.Version) -}}
+ {{- if and (.Capabilities.APIVersions.Has "autoscaling/v2") (semverCompare ">= 1.19-0" (include "loki.kubeVersion" .)) -}}
{{- print "autoscaling/v2" -}}
{{- else if .Capabilities.APIVersions.Has "autoscaling/v2beta2" -}}
{{- print "autoscaling/v2beta2" -}}
diff --git a/production/helm/loki/templates/admin-api/deployment-admin-api.yaml b/production/helm/loki/templates/admin-api/deployment-admin-api.yaml
index 650c72fc15983..f85bbf90014b4 100644
--- a/production/helm/loki/templates/admin-api/deployment-admin-api.yaml
+++ b/production/helm/loki/templates/admin-api/deployment-admin-api.yaml
@@ -121,6 +121,10 @@ spec:
{{- if .Values.adminApi.env }}
{{ toYaml .Values.adminApi.env | nindent 12 }}
{{- end }}
+ {{- with .Values.adminApi.extraEnvFrom }}
+ envFrom:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
{{- with .Values.adminApi.extraContainers }}
{{ toYaml . | nindent 8 }}
{{- end }}
diff --git a/production/helm/loki/templates/backend/statefulset-backend.yaml b/production/helm/loki/templates/backend/statefulset-backend.yaml
index 534190d4a4533..c20ce9f9a0552 100644
--- a/production/helm/loki/templates/backend/statefulset-backend.yaml
+++ b/production/helm/loki/templates/backend/statefulset-backend.yaml
@@ -32,7 +32,7 @@ spec:
partition: 0
serviceName: {{ include "loki.backendFullname" . }}-headless
revisionHistoryLimit: {{ .Values.loki.revisionHistoryLimit }}
- {{- if and (semverCompare ">= 1.23-0" .Capabilities.KubeVersion.Version) (.Values.backend.persistence.enableStatefulSetAutoDeletePVC) (.Values.backend.persistence.volumeClaimsEnabled) }}
+ {{- if and (semverCompare ">= 1.23-0" (include "loki.kubeVersion" .)) (.Values.backend.persistence.enableStatefulSetAutoDeletePVC) (.Values.backend.persistence.volumeClaimsEnabled) }}
{{/*
Data on the backend nodes is easy to replace, so we want to always delete PVCs to make
operation easier, and will rely on re-fetching data when needed.
diff --git a/production/helm/loki/templates/bloom-gateway/statefulset-bloom-gateway.yaml b/production/helm/loki/templates/bloom-gateway/statefulset-bloom-gateway.yaml
index 7e97b8e93ece8..747642b227909 100644
--- a/production/helm/loki/templates/bloom-gateway/statefulset-bloom-gateway.yaml
+++ b/production/helm/loki/templates/bloom-gateway/statefulset-bloom-gateway.yaml
@@ -19,7 +19,7 @@ spec:
partition: 0
serviceName: {{ include "loki.bloomGatewayFullname" . }}-headless
revisionHistoryLimit: {{ .Values.loki.revisionHistoryLimit }}
- {{- if and (semverCompare ">= 1.23-0" .Capabilities.KubeVersion.Version) (.Values.bloomGateway.persistence.enableStatefulSetAutoDeletePVC) }}
+ {{- if and (semverCompare ">= 1.23-0" (include "loki.kubeVersion" .)) (.Values.bloomGateway.persistence.enableStatefulSetAutoDeletePVC) }}
persistentVolumeClaimRetentionPolicy:
whenDeleted: {{ .Values.bloomGateway.persistence.whenDeleted }}
whenScaled: {{ .Values.bloomGateway.persistence.whenScaled }}
diff --git a/production/helm/loki/templates/bloom-planner/statefulset-bloom-planner.yaml b/production/helm/loki/templates/bloom-planner/statefulset-bloom-planner.yaml
index 8406542dabaa4..d134af39e92c7 100644
--- a/production/helm/loki/templates/bloom-planner/statefulset-bloom-planner.yaml
+++ b/production/helm/loki/templates/bloom-planner/statefulset-bloom-planner.yaml
@@ -19,7 +19,7 @@ spec:
partition: 0
serviceName: {{ include "loki.bloomPlannerFullname" . }}-headless
revisionHistoryLimit: {{ .Values.loki.revisionHistoryLimit }}
- {{- if and (semverCompare ">= 1.23-0" .Capabilities.KubeVersion.Version) (.Values.bloomPlanner.persistence.enableStatefulSetAutoDeletePVC) }}
+ {{- if and (semverCompare ">= 1.23-0" (include "loki.kubeVersion" .)) (.Values.bloomPlanner.persistence.enableStatefulSetAutoDeletePVC) }}
persistentVolumeClaimRetentionPolicy:
whenDeleted: {{ .Values.bloomPlanner.persistence.whenDeleted }}
whenScaled: {{ .Values.bloomPlanner.persistence.whenScaled }}
diff --git a/production/helm/loki/templates/compactor/statefulset-compactor.yaml b/production/helm/loki/templates/compactor/statefulset-compactor.yaml
index 98fab0affc32f..944ac425bf5ad 100644
--- a/production/helm/loki/templates/compactor/statefulset-compactor.yaml
+++ b/production/helm/loki/templates/compactor/statefulset-compactor.yaml
@@ -20,7 +20,7 @@ spec:
partition: 0
serviceName: {{ include "loki.compactorFullname" . }}-headless
revisionHistoryLimit: {{ .Values.loki.revisionHistoryLimit }}
- {{- if and (semverCompare ">= 1.23-0" .Capabilities.KubeVersion.Version) (.Values.compactor.persistence.enableStatefulSetAutoDeletePVC) }}
+ {{- if and (semverCompare ">= 1.23-0" (include "loki.kubeVersion" .)) (.Values.compactor.persistence.enableStatefulSetAutoDeletePVC) }}
persistentVolumeClaimRetentionPolicy:
whenDeleted: {{ .Values.compactor.persistence.whenDeleted }}
whenScaled: {{ .Values.compactor.persistence.whenScaled }}
diff --git a/production/helm/loki/templates/gateway/deployment-gateway-enterprise.yaml b/production/helm/loki/templates/gateway/deployment-gateway-enterprise.yaml
index 746fa6142b771..d75fd5fe65492 100644
--- a/production/helm/loki/templates/gateway/deployment-gateway-enterprise.yaml
+++ b/production/helm/loki/templates/gateway/deployment-gateway-enterprise.yaml
@@ -114,6 +114,10 @@ spec:
{{- if .Values.enterpriseGateway.env }}
{{ toYaml .Values.enterpriseGateway.env | nindent 12 }}
{{- end }}
+ {{- with .Values.enterpriseGateway.extraEnvFrom }}
+ envFrom:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
{{- with .Values.enterpriseGateway.extraContainers }}
{{ toYaml . | nindent 8 }}
{{- end }}
diff --git a/production/helm/loki/templates/index-gateway/statefulset-index-gateway.yaml b/production/helm/loki/templates/index-gateway/statefulset-index-gateway.yaml
index 0eb7cf3a911e4..d417c978140f7 100644
--- a/production/helm/loki/templates/index-gateway/statefulset-index-gateway.yaml
+++ b/production/helm/loki/templates/index-gateway/statefulset-index-gateway.yaml
@@ -18,7 +18,7 @@ spec:
{{- end }}
serviceName: {{ include "loki.indexGatewayFullname" . }}-headless
revisionHistoryLimit: {{ .Values.loki.revisionHistoryLimit }}
- {{- if and (semverCompare ">= 1.23-0" .Capabilities.KubeVersion.Version) (.Values.indexGateway.persistence.enableStatefulSetAutoDeletePVC) }}
+ {{- if and (semverCompare ">= 1.23-0" (include "loki.kubeVersion" .)) (.Values.indexGateway.persistence.enableStatefulSetAutoDeletePVC) }}
{{/*
Data on the read nodes is easy to replace, so we want to always delete PVCs to make
operation easier, and will rely on re-fetching data when needed.
diff --git a/production/helm/loki/templates/ingester/statefulset-ingester-zone-a.yaml b/production/helm/loki/templates/ingester/statefulset-ingester-zone-a.yaml
index 11d360f24f270..e4c35c7dd84c9 100644
--- a/production/helm/loki/templates/ingester/statefulset-ingester-zone-a.yaml
+++ b/production/helm/loki/templates/ingester/statefulset-ingester-zone-a.yaml
@@ -26,7 +26,7 @@ spec:
podManagementPolicy: Parallel
serviceName: {{ include "loki.ingesterFullname" . }}-zone-a
revisionHistoryLimit: {{ .Values.loki.revisionHistoryLimit }}
- {{- if and (semverCompare ">= 1.23-0" .Capabilities.KubeVersion.Version) (.Values.ingester.persistence.enableStatefulSetAutoDeletePVC) }}
+ {{- if and (semverCompare ">= 1.23-0" (include "loki.kubeVersion" .)) (.Values.ingester.persistence.enableStatefulSetAutoDeletePVC) }}
{{/*
Data on the read nodes is easy to replace, so we want to always delete PVCs to make
operation easier, and will rely on re-fetching data when needed.
diff --git a/production/helm/loki/templates/ingester/statefulset-ingester-zone-b.yaml b/production/helm/loki/templates/ingester/statefulset-ingester-zone-b.yaml
index 8a46273fea049..db499cae8d583 100644
--- a/production/helm/loki/templates/ingester/statefulset-ingester-zone-b.yaml
+++ b/production/helm/loki/templates/ingester/statefulset-ingester-zone-b.yaml
@@ -26,7 +26,7 @@ spec:
podManagementPolicy: Parallel
serviceName: {{ include "loki.ingesterFullname" . }}-zone-b
revisionHistoryLimit: {{ .Values.loki.revisionHistoryLimit }}
- {{- if and (semverCompare ">= 1.23-0" .Capabilities.KubeVersion.Version) (.Values.ingester.persistence.enableStatefulSetAutoDeletePVC) }}
+ {{- if and (semverCompare ">= 1.23-0" (include "loki.kubeVersion" .)) (.Values.ingester.persistence.enableStatefulSetAutoDeletePVC) }}
{{/*
Data on the read nodes is easy to replace, so we want to always delete PVCs to make
operation easier, and will rely on re-fetching data when needed.
diff --git a/production/helm/loki/templates/ingester/statefulset-ingester-zone-c.yaml b/production/helm/loki/templates/ingester/statefulset-ingester-zone-c.yaml
index 55c25396c956e..994b460b9a15f 100644
--- a/production/helm/loki/templates/ingester/statefulset-ingester-zone-c.yaml
+++ b/production/helm/loki/templates/ingester/statefulset-ingester-zone-c.yaml
@@ -26,7 +26,7 @@ spec:
podManagementPolicy: Parallel
serviceName: {{ include "loki.ingesterFullname" . }}-zone-c
revisionHistoryLimit: {{ .Values.loki.revisionHistoryLimit }}
- {{- if and (semverCompare ">= 1.23-0" .Capabilities.KubeVersion.Version) (.Values.ingester.persistence.enableStatefulSetAutoDeletePVC) }}
+ {{- if and (semverCompare ">= 1.23-0" (include "loki.kubeVersion" .)) (.Values.ingester.persistence.enableStatefulSetAutoDeletePVC) }}
{{/*
Data on the read nodes is easy to replace, so we want to always delete PVCs to make
operation easier, and will rely on re-fetching data when needed.
diff --git a/production/helm/loki/templates/ingester/statefulset-ingester.yaml b/production/helm/loki/templates/ingester/statefulset-ingester.yaml
index adeeb3b5e6e9e..f66145c1720fd 100644
--- a/production/helm/loki/templates/ingester/statefulset-ingester.yaml
+++ b/production/helm/loki/templates/ingester/statefulset-ingester.yaml
@@ -23,7 +23,7 @@ spec:
{{- end }}
serviceName: {{ include "loki.ingesterFullname" . }}-headless
revisionHistoryLimit: {{ .Values.loki.revisionHistoryLimit }}
- {{- if and (semverCompare ">= 1.23-0" .Capabilities.KubeVersion.Version) (.Values.ingester.persistence.enableStatefulSetAutoDeletePVC) }}
+ {{- if and (semverCompare ">= 1.23-0" (include "loki.kubeVersion" .)) (.Values.ingester.persistence.enableStatefulSetAutoDeletePVC) }}
{{/*
Data on the read nodes is easy to replace, so we want to always delete PVCs to make
operation easier, and will rely on re-fetching data when needed.
diff --git a/production/helm/loki/templates/networkpolicy.yaml b/production/helm/loki/templates/networkpolicy.yaml
index 5052e81162b3d..9286edb74eff9 100644
--- a/production/helm/loki/templates/networkpolicy.yaml
+++ b/production/helm/loki/templates/networkpolicy.yaml
@@ -66,7 +66,7 @@ spec:
{{- include "loki.selectorLabels" . | nindent 6 }}
ingress:
- ports:
- - port: http
+ - port: http-metrics
protocol: TCP
{{- if .Values.networkPolicy.ingress.namespaceSelector }}
from:
diff --git a/production/helm/loki/templates/pattern-ingester/statefulset-pattern-ingester.yaml b/production/helm/loki/templates/pattern-ingester/statefulset-pattern-ingester.yaml
index b1be84fef4de0..9538edcb57150 100644
--- a/production/helm/loki/templates/pattern-ingester/statefulset-pattern-ingester.yaml
+++ b/production/helm/loki/templates/pattern-ingester/statefulset-pattern-ingester.yaml
@@ -20,7 +20,7 @@ spec:
partition: 0
serviceName: {{ include "loki.patternIngesterFullname" . }}-headless
revisionHistoryLimit: {{ .Values.loki.revisionHistoryLimit }}
- {{- if and (semverCompare ">= 1.23-0" .Capabilities.KubeVersion.Version) (.Values.patternIngester.persistence.enableStatefulSetAutoDeletePVC) }}
+ {{- if and (semverCompare ">= 1.23-0" (include "loki.kubeVersion" .)) (.Values.patternIngester.persistence.enableStatefulSetAutoDeletePVC) }}
persistentVolumeClaimRetentionPolicy:
whenDeleted: {{ .Values.patternIngester.persistence.whenDeleted }}
whenScaled: {{ .Values.patternIngester.persistence.whenScaled }}
diff --git a/production/helm/loki/templates/read/statefulset-read.yaml b/production/helm/loki/templates/read/statefulset-read.yaml
index 7696d90e65bd6..9d4213b174588 100644
--- a/production/helm/loki/templates/read/statefulset-read.yaml
+++ b/production/helm/loki/templates/read/statefulset-read.yaml
@@ -32,7 +32,7 @@ spec:
partition: 0
serviceName: {{ printf "%s-headless" (include "loki.readFullname" .) }}
revisionHistoryLimit: {{ .Values.loki.revisionHistoryLimit }}
- {{- if and (semverCompare ">= 1.23-0" .Capabilities.KubeVersion.Version) (.Values.read.persistence.enableStatefulSetAutoDeletePVC) }}
+ {{- if and (semverCompare ">= 1.23-0" (include "loki.kubeVersion" .)) (.Values.read.persistence.enableStatefulSetAutoDeletePVC) }}
{{/*
Data on the read nodes is easy to replace, so we want to always delete PVCs to make
operation easier, and will rely on re-fetching data when needed.
diff --git a/production/helm/loki/templates/single-binary/statefulset.yaml b/production/helm/loki/templates/single-binary/statefulset.yaml
index 5e28902e5677f..4acd406b9cfa5 100644
--- a/production/helm/loki/templates/single-binary/statefulset.yaml
+++ b/production/helm/loki/templates/single-binary/statefulset.yaml
@@ -26,7 +26,7 @@ spec:
partition: 0
serviceName: {{ include "loki.singleBinaryFullname" . }}-headless
revisionHistoryLimit: {{ .Values.loki.revisionHistoryLimit }}
- {{- if and (semverCompare ">= 1.23-0" .Capabilities.KubeVersion.Version) (.Values.singleBinary.persistence.enableStatefulSetAutoDeletePVC) (.Values.singleBinary.persistence.enabled) }}
+ {{- if and (semverCompare ">= 1.23-0" (include "loki.kubeVersion" .)) (.Values.singleBinary.persistence.enableStatefulSetAutoDeletePVC) (.Values.singleBinary.persistence.enabled) }}
{{/*
Data on the singleBinary nodes is easy to replace, so we want to always delete PVCs to make
operation easier, and will rely on re-fetching data when needed.
diff --git a/production/helm/loki/templates/write/statefulset-write.yaml b/production/helm/loki/templates/write/statefulset-write.yaml
index 75605c27c26cb..4d6183b291c13 100644
--- a/production/helm/loki/templates/write/statefulset-write.yaml
+++ b/production/helm/loki/templates/write/statefulset-write.yaml
@@ -32,7 +32,7 @@ spec:
partition: 0
serviceName: {{ include "loki.writeFullname" . }}-headless
revisionHistoryLimit: {{ .Values.loki.revisionHistoryLimit }}
- {{- if and (semverCompare ">= 1.23-0" .Capabilities.KubeVersion.Version) (.Values.write.persistence.enableStatefulSetAutoDeletePVC) (.Values.write.persistence.volumeClaimsEnabled) }}
+ {{- if and (semverCompare ">= 1.23-0" (include "loki.kubeVersion" .)) (.Values.write.persistence.enableStatefulSetAutoDeletePVC) (.Values.write.persistence.volumeClaimsEnabled) }}
{{/*
Data on the write nodes is easy to replace, so we want to always delete PVCs to make
operation easier, and will rely on re-fetching data when needed.
diff --git a/production/helm/loki/values.yaml b/production/helm/loki/values.yaml
index 7870f53393c87..8c3c5c3bb6e3a 100644
--- a/production/helm/loki/values.yaml
+++ b/production/helm/loki/values.yaml
@@ -1,3 +1,9 @@
+# -- Overrides the version used to determine compatibility of resources with the target Kubernetes cluster.
+# This is useful when using `helm template`, because then helm will use the client version of kubectl as the Kubernetes version,
+# which may or may not match your cluster's server version. Example: 'v1.24.4'. Set to null to use the version that helm
+# devises.
+kubeVersionOverride: null
+
global:
image:
# -- Overrides the Docker registry globally for all images
@@ -821,6 +827,8 @@ adminApi:
# - domain.tld
# -- Additional CLI arguments for the `admin-api` target
extraArgs: {}
+ # -- Environment variables from secrets or configmaps to add to the admin-api pods
+ extraEnvFrom: []
# -- Additional labels for the `admin-api` Deployment
labels: {}
# -- Additional annotations for the `admin-api` Deployment
@@ -1074,6 +1082,8 @@ gateway:
# -- Allows appending custom configuration to the http block, passed through the `tpl` function to allow templating
httpSnippet: >-
{{ if .Values.loki.tenants }}proxy_set_header X-Scope-OrgID $remote_user;{{ end }}
+ # -- Allows customizing the `client_max_body_size` directive
+ clientMaxBodySize: 4M
# -- Whether ssl should be appended to the listen directive of the server block or not.
ssl: false
# -- Override Read URL
@@ -1099,6 +1109,8 @@ enterpriseGateway:
# - domain.tld
# -- Additional CLI arguments for the `gateway` target
extraArgs: {}
+ # -- Environment variables from secrets or configmaps to add to the enterprise gateway pods
+ extraEnvFrom: []
# -- Additional labels for the `gateway` Pod
labels: {}
# -- Additional annotations for the `gateway` Pod
diff --git a/production/loki-mixin-compiled-ssd/dashboards/loki-bloom-compactor.json b/production/loki-mixin-compiled-ssd/dashboards/loki-bloom-build.json
similarity index 70%
rename from production/loki-mixin-compiled-ssd/dashboards/loki-bloom-compactor.json
rename to production/loki-mixin-compiled-ssd/dashboards/loki-bloom-build.json
index c365fab0a7e59..149dfacd857d3 100644
--- a/production/loki-mixin-compiled-ssd/dashboards/loki-bloom-compactor.json
+++ b/production/loki-mixin-compiled-ssd/dashboards/loki-bloom-build.json
@@ -36,29 +36,6 @@
"title": "Overview",
"type": "row"
},
- {
- "gridPos": {
- "h": 8,
- "w": 14,
- "x": 0,
- "y": 1
- },
- "id": 35,
- "options": {
- "code": {
- "language": "plaintext",
- "showLineNumbers": false,
- "showMiniMap": false
- },
- "content": "## About the Bloom Compactor\nThe compactor iterates through chunks and creates blooms out of them.\nThe size of the resulting blooms depends on the bloom filter settings, the tokenizer settings, the number of ring tokens per compactor and the total number opf compactors.\n\nCompactors are horizontally scalable and uses a ring to:\n- Shard tenants\n- Shard series fingerprints within a tenant subring.\n\nThe blooms for the series are grouped together in blocks which are flushed to object store.",
- "mode": "markdown"
- },
- "panels": [ ],
- "pluginVersion": "11.1.0-70005",
- "targets": [ ],
- "transparent": true,
- "type": "text"
- },
{
"datasource": {
"type": "prometheus",
@@ -77,6 +54,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 15,
"gradientMode": "none",
@@ -121,13 +99,30 @@
},
"unit": "percentunit"
},
- "overrides": [ ]
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byRegexp",
+ "options": "/(Planned|success|failure)/"
+ },
+ "properties": [
+ {
+ "id": "unit",
+ "value": "none"
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ }
+ ]
+ }
+ ]
},
"gridPos": {
"h": 7,
"w": 12,
"x": 0,
- "y": 9
+ "y": 1
},
"id": 42,
"options": {
@@ -144,7 +139,7 @@
}
},
"panels": [ ],
- "pluginVersion": "11.1.0-69868",
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -152,41 +147,15 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum(loki_bloomcompactor_progress{cluster=~\"$cluster\", job=~\"$namespace/bloom-compactor\"})\n/\nsum(count(loki_bloomcompactor_progress{cluster=~\"$cluster\", job=~\"$namespace/bloom-compactor\"}))",
- "hide": false,
- "instant": false,
- "legendFormat": "avg",
- "range": true,
- "refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "quantile(\n 0.9, \n sum by (pod) (\n loki_bloomcompactor_progress{cluster=~\"$cluster\", job=~\"$namespace/bloom-compactor\"}\n )\n)",
- "hide": false,
- "instant": false,
- "legendFormat": "p90",
- "range": true,
- "refId": "B"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "quantile(\n 0.1, \n sum by (pod) (\n loki_bloomcompactor_progress{cluster=~\"$cluster\", job=~\"$namespace/bloom-compactor\"}\n )\n)",
+ "expr": "sum(loki_bloomplanner_tenant_tasks_completed{cluster=~\"$cluster\", job=~\"$namespace/bloom-planner\"})\n/\nsum(loki_bloomplanner_tenant_tasks_planned{cluster=~\"$cluster\", job=~\"$namespace/bloom-planner\"})",
"hide": false,
"instant": false,
- "legendFormat": "p10",
+ "legendFormat": "Progress",
"range": true,
- "refId": "C"
+ "refId": "D"
}
],
- "title": "Progress",
+ "title": "Overall progress",
"type": "timeseries"
},
{
@@ -194,7 +163,7 @@
"type": "prometheus",
"uid": "${datasource}"
},
- "description": "Uncompressed size of chunks in a series VS the size of the blooms built.",
+ "description": "Cell-wide compaction progress. Should increase till completion throughout each compaction period.",
"fieldConfig": {
"defaults": {
"color": {
@@ -207,8 +176,9 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
- "fillOpacity": 15,
+ "fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -242,33 +212,29 @@
{
"color": "green",
"value": null
+ },
+ {
+ "color": "red",
+ "value": 80
}
]
},
- "unit": "bytes"
+ "unit": "percentunit"
},
"overrides": [
{
"matcher": {
- "id": "byName",
- "options": "Ratio"
+ "id": "byRegexp",
+ "options": "/(Planned|success|failure)/"
},
"properties": [
{
"id": "unit",
- "value": "percentunit"
- }
- ]
- },
- {
- "matcher": {
- "id": "byName",
- "options": "Ratio over range"
- },
- "properties": [
+ "value": "none"
+ },
{
- "id": "unit",
- "value": "percentunit"
+ "id": "custom.fillOpacity",
+ "value": 0
}
]
}
@@ -278,9 +244,9 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 9
+ "y": 1
},
- "id": 41,
+ "id": 116,
"options": {
"legend": {
"calcs": [ ],
@@ -295,7 +261,7 @@
}
},
"panels": [ ],
- "pluginVersion": "11.1.0-69868",
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -303,54 +269,158 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum(rate(loki_bloom_size_sum{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval]))",
+ "expr": "sum by (tenant) (loki_bloomplanner_tenant_tasks_completed{cluster=~\"$cluster\", job=~\"$namespace/bloom-planner\"})\n/\nsum by (tenant) (loki_bloomplanner_tenant_tasks_planned{cluster=~\"$cluster\", job=~\"$namespace/bloom-planner\"})",
"hide": false,
"instant": false,
- "legendFormat": "Bloom",
+ "legendFormat": "{{tenant}}",
"range": true,
- "refId": "A"
- },
+ "refId": "D"
+ }
+ ],
+ "title": "Progress by tenant",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "loki",
+ "uid": "${loki_datasource}"
+ },
+ "description": "Blooms size vs uncompressed chunk size.",
+ "fieldConfig": {
+ "defaults": { },
+ "overrides": [ ]
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 17,
+ "x": 0,
+ "y": 8
+ },
+ "id": 51,
+ "options": {
+ "dedupStrategy": "none",
+ "enableLogDetails": true,
+ "prettifyLogMessage": false,
+ "showCommonLabels": false,
+ "showLabels": false,
+ "showTime": false,
+ "sortOrder": "Descending",
+ "wrapLogMessage": false
+ },
+ "panels": [ ],
+ "pluginVersion": "11.4.0-77663",
+ "targets": [
{
"datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
+ "type": "loki",
+ "uid": "${loki_datasource}"
},
"editorMode": "code",
- "expr": "sum(rate(loki_bloomcompactor_chunk_series_size_sum{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval]))",
- "hide": false,
- "instant": false,
- "legendFormat": "Chunk",
- "range": true,
+ "expr": "{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"} |= \"level=error\" |= \"component=bloom-planner\"",
+ "queryType": "range",
"refId": "B"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
+ }
+ ],
+ "title": "Errors Planner",
+ "type": "logs"
+ },
+ {
+ "datasource": {
+ "type": "loki",
+ "uid": "${loki_datasource}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "fixedColor": "red",
+ "mode": "fixed"
},
- "editorMode": "code",
- "expr": "sum(rate(loki_bloom_size_sum{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval]))\n/\nsum(rate(loki_bloomcompactor_chunk_series_size_sum{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval]))",
- "hide": false,
- "instant": false,
- "legendFormat": "Ratio",
- "range": true,
- "refId": "C"
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
+ "drawStyle": "bars",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 3,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 1
+ }
+ ]
+ }
+ },
+ "overrides": [ ]
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 7,
+ "x": 17,
+ "y": 8
+ },
+ "id": 53,
+ "options": {
+ "legend": {
+ "calcs": [ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
},
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "panels": [ ],
+ "pluginVersion": "11.4.0-77663",
+ "targets": [
{
"datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
+ "type": "loki",
+ "uid": "${loki_datasource}"
},
"editorMode": "code",
- "expr": "sum(rate(loki_bloom_size_sum{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval]))\n/\nsum(rate(loki_bloomcompactor_chunk_series_size_sum{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval]))",
- "hide": false,
- "instant": false,
- "legendFormat": "Ratio over range",
- "range": true,
- "refId": "D"
+ "expr": "sum(count_over_time({cluster=\"$cluster\", job=\"$namespace/bloom-planner\"} |= \"level=error\" |= \"component=bloom-planner\" [$__auto]))",
+ "legendFormat": "Error rate",
+ "queryType": "range",
+ "refId": "A"
}
],
- "title": "Chunks and Bloom size",
+ "title": "Errors Rate Planner",
"type": "timeseries"
},
{
@@ -359,13 +429,17 @@
"uid": "${loki_datasource}"
},
"description": "Blooms size vs uncompressed chunk size.",
+ "fieldConfig": {
+ "defaults": { },
+ "overrides": [ ]
+ },
"gridPos": {
"h": 7,
"w": 17,
"x": 0,
- "y": 16
+ "y": 15
},
- "id": 51,
+ "id": 133,
"options": {
"dedupStrategy": "none",
"enableLogDetails": true,
@@ -377,7 +451,7 @@
"wrapLogMessage": false
},
"panels": [ ],
- "pluginVersion": "11.1.0-69868",
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -385,12 +459,12 @@
"uid": "${loki_datasource}"
},
"editorMode": "code",
- "expr": "{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"} |= \"level=error\" |= \"component=bloom-compactor\"",
+ "expr": "{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"} |= \"level=error\" |= \"component=bloom-builder\"",
"queryType": "range",
"refId": "B"
}
],
- "title": "Errors",
+ "title": "Errors Builder",
"type": "logs"
},
{
@@ -411,6 +485,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "bars",
"fillOpacity": 0,
"gradientMode": "none",
@@ -457,9 +532,9 @@
"h": 7,
"w": 7,
"x": 17,
- "y": 16
+ "y": 15
},
- "id": 53,
+ "id": 134,
"options": {
"legend": {
"calcs": [ ],
@@ -474,7 +549,7 @@
}
},
"panels": [ ],
- "pluginVersion": "11.1.0-69868",
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -482,12 +557,13 @@
"uid": "${loki_datasource}"
},
"editorMode": "code",
- "expr": "sum(count_over_time({cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"} |= \"level=error\" |= \"component=bloom-compactor\" [$__auto]))",
+ "expr": "sum(count_over_time({cluster=\"$cluster\", job=\"$namespace/bloom-builder\"} |= \"level=error\" |= \"component=bloom-builder\" [$__auto]))",
+ "legendFormat": "Error rate",
"queryType": "range",
"refId": "A"
}
],
- "title": "Errors Rate",
+ "title": "Errors Rate Builder",
"type": "timeseries"
},
{
@@ -496,7 +572,7 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 23
+ "y": 22
},
"id": 112,
"panels": [
@@ -517,9 +593,11 @@
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
+ "axisSoftMin": 0,
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
- "fillOpacity": 15,
+ "fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -551,7 +629,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -559,47 +638,1093 @@
}
]
},
- "unit": "percentunit"
- },
- "overrides": [ ]
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 0,
- "y": 24
- },
- "id": 114,
- "options": {
- "legend": {
- "calcs": [ ],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
+ "unit": "none"
},
- "tooltip": {
- "maxHeight": 600,
- "mode": "single",
- "sort": "none"
- }
- },
- "panels": [ ],
- "targets": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "sum by (pod) (\n loki_bloomcompactor_progress{cluster=~\"$cluster\", job=~\"$namespace/bloom-compactor\"}\n)",
- "hide": false,
- "instant": false,
- "legendFormat": "__auto",
- "range": true,
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byRegexp",
+ "options": "/(success|failure)/"
+ },
+ "properties": [
+ {
+ "id": "custom.fillOpacity",
+ "value": 100
+ },
+ {
+ "id": "custom.stacking",
+ "value": {
+ "group": "A",
+ "mode": "normal"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Planned"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "yellow",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 15
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Completed - success"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "green",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Completed - failure"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "red",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Queued"
+ },
+ "properties": [
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ },
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "blue",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 64
+ },
+ "id": 125,
+ "options": {
+ "legend": {
+ "calcs": [ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "panels": [ ],
+ "pluginVersion": "11.4.0-77663",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "sum(loki_bloomplanner_tenant_tasks_planned{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"}) > 0",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Planned",
+ "range": true,
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "sum by (status) (loki_bloomplanner_tenant_tasks_completed{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"}) > 0",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Completed - {{status}}",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "loki_bloomplanner_inflight_tasks{cluster=\"$cluster\", job=\"$namespace/bloom-planner\", quantile=\"0.95\"}",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "inflight p95",
+ "range": true,
+ "refId": "C"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "sum(loki_bloomplanner_queue_length{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"}) > 0",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Queued",
+ "range": true,
+ "refId": "D"
+ }
+ ],
+ "title": "Tasks",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byRegexp",
+ "options": "/(success|failure)/"
+ },
+ "properties": [
+ {
+ "id": "custom.fillOpacity",
+ "value": 100
+ },
+ {
+ "id": "custom.stacking",
+ "value": {
+ "group": "A",
+ "mode": "normal"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Planned"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "yellow",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 15
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Completed - success"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "green",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Completed - failure"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "red",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Connected builders"
+ },
+ "properties": [
+ {
+ "id": "custom.axisPlacement",
+ "value": "right"
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "IDLE Builders"
+ },
+ "properties": [
+ {
+ "id": "custom.axisPlacement",
+ "value": "right"
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Builders processing task"
+ },
+ "properties": [
+ {
+ "id": "custom.axisPlacement",
+ "value": "right"
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 64
+ },
+ "id": 126,
+ "options": {
+ "legend": {
+ "calcs": [ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "panels": [ ],
+ "pluginVersion": "11.4.0-77663",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "sum(loki_bloomplanner_connected_builders{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"})",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Connected builders",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "count(loki_bloombuilder_processing_task{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"} > 0)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Builders processing task",
+ "range": true,
+ "refId": "C"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "count(loki_bloombuilder_processing_task{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"} == 0)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "IDLE Builders",
+ "range": true,
+ "refId": "D"
+ }
+ ],
+ "title": "Tasks per builder",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 72
+ },
+ "id": 81,
+ "options": {
+ "legend": {
+ "calcs": [ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "panels": [ ],
+ "pluginVersion": "11.4.0-77663",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "# series checked per compaction\nhistogram_quantile(\n 0.99, \n sum by (le) (\n rate(loki_bloombuilder_series_per_task_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__rate_interval])\n )\n)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "p99",
+ "range": true,
+ "refId": "C"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "# series checked per compaction\nhistogram_quantile(\n 0.9, \n sum by (le) (\n rate(loki_bloombuilder_series_per_task_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__rate_interval])\n )\n)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "p90",
+ "range": true,
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "# series checked per compaction\nhistogram_quantile(\n 0.75, \n sum by (le) (\n rate(loki_bloombuilder_series_per_task_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__rate_interval])\n )\n)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "p75",
+ "range": true,
+ "refId": "D"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "# series checked per compaction\nhistogram_quantile(\n 0.5, \n sum by (le) (\n rate(loki_bloombuilder_series_per_task_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__rate_interval])\n )\n)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "p50",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Series per task (includes series copied from other blocks)",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": [ ]
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 12,
+ "y": 72
+ },
+ "id": 91,
+ "options": {
+ "legend": {
+ "calcs": [ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "panels": [ ],
+ "pluginVersion": "11.4.0-77663",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "# series checked per compaction\nhistogram_quantile(\n 0.99, \n sum by (le) (\n rate(loki_bloombuilder_bytes_per_task_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__rate_interval])\n )\n)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "p99",
+ "range": true,
+ "refId": "C"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "# series checked per compaction\nhistogram_quantile(\n 0.9, \n sum by (le) (\n rate(loki_bloombuilder_bytes_per_task_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__rate_interval])\n )\n)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "p90",
+ "range": true,
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "# series checked per compaction\nhistogram_quantile(\n 0.5, \n sum by (le) (\n rate(loki_bloombuilder_bytes_per_task_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__rate_interval])\n )\n)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "p50",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Number of bytes from chunks added to blocks during each compaction.",
+ "type": "timeseries"
+ },
+ {
+ "fieldConfig": {
+ "defaults": { },
+ "overrides": [ ]
+ },
+ "gridPos": {
+ "h": 2,
+ "w": 24,
+ "x": 0,
+ "y": 79
+ },
+ "id": 117,
+ "options": {
+ "code": {
+ "language": "plaintext",
+ "showLineNumbers": false,
+ "showMiniMap": false
+ },
+ "content": "Identify the tenant using the **_Progress by tenant_** panel from the overview and set tenant variable",
+ "mode": "markdown"
+ },
+ "panels": [ ],
+ "pluginVersion": "11.4.0-77663",
+ "targets": [ ],
+ "title": "Tip",
+ "transparent": true,
+ "type": "text"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "axisSoftMin": 0,
+ "barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byRegexp",
+ "options": "/(success|failure)/"
+ },
+ "properties": [
+ {
+ "id": "custom.fillOpacity",
+ "value": 100
+ },
+ {
+ "id": "custom.stacking",
+ "value": {
+ "group": "A",
+ "mode": "normal"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Planned"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "yellow",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 15
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Completed - success"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "green",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Completed - failure"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "red",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Queued"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "blue",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 81
+ },
+ "id": 114,
+ "options": {
+ "legend": {
+ "calcs": [ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "panels": [ ],
+ "pluginVersion": "11.4.0-77663",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "sum(loki_bloomplanner_tenant_tasks_planned{cluster=\"$cluster\", job=\"$namespace/bloom-planner\", tenant=\"$tenant\"}) > 0",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Planned",
+ "range": true,
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "sum by (status) (loki_bloomplanner_tenant_tasks_completed{cluster=\"$cluster\", job=\"$namespace/bloom-planner\", tenant=\"$tenant\"}) > 0",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Completed - {{status}}",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "sum(loki_bloomplanner_queue_length{cluster=\"$cluster\", job=\"$namespace/bloom-planner\", user=\"$tenant\"}) > 0",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Queued",
+ "range": true,
+ "refId": "C"
+ }
+ ],
+ "title": "Tasks per tenant",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "loki",
+ "uid": "${loki_datasource}"
+ },
+ "fieldConfig": {
+ "defaults": { },
+ "overrides": [ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 81
+ },
+ "id": 115,
+ "options": {
+ "dedupStrategy": "none",
+ "enableLogDetails": true,
+ "prettifyLogMessage": false,
+ "showCommonLabels": false,
+ "showLabels": false,
+ "showTime": false,
+ "sortOrder": "Descending",
+ "wrapLogMessage": false
+ },
+ "panels": [ ],
+ "pluginVersion": "11.4.0-77663",
+ "targets": [
+ {
+ "datasource": {
+ "type": "loki",
+ "uid": "${loki_datasource}"
+ },
+ "editorMode": "code",
+ "expr": "{cluster=~\"$cluster\", job=~\"$namespace/bloom-planner\"}\n|= \"level=error\"\n|= \"tenant=$tenant\"",
+ "queryType": "range",
+ "refId": "B"
+ }
+ ],
+ "title": "Tenant errors",
+ "type": "logs"
+ }
+ ],
+ "targets": [ ],
+ "title": "Tasks",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 23
+ },
+ "id": 95,
+ "panels": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "description": "How many tokens each builder is appending to blooms. Accounts for tokens that are not actually added to the blooms since they are already there. See the panel on the right for a drill down on the collision.\n",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "log": 2,
+ "type": "log"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 90
+ },
+ "id": 96,
+ "options": {
+ "legend": {
+ "calcs": [ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "panels": [ ],
+ "pluginVersion": "11.4.0-77663",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "sum(rate(loki_bloom_tokens_total{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__rate_interval]))\n/\nsum(count(loki_bloom_tokens_total{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}))",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Per core",
+ "range": true,
"refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "sum(rate(loki_bloom_inserts_total{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__rate_interval]))",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Total",
+ "range": true,
+ "refId": "C"
}
],
- "title": "Progress per pod",
+ "title": "Tokens rate",
"type": "timeseries"
},
{
@@ -607,23 +1732,56 @@
"type": "prometheus",
"uid": "${datasource}"
},
+ "description": "Collision type may be `false` (no collision), `cache` (found in token cache) or true (found in bloom filter).\n\nType may be either `raw` (the original ngram) or `chunk_prefixed` (the ngram with the chunk prefix)",
"fieldConfig": {
"defaults": {
- "fieldMinMax": false,
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
"mappings": [ ],
"thresholds": {
- "mode": "percentage",
+ "mode": "absolute",
"steps": [
{
- "color": "green"
- },
- {
- "color": "#EAB839",
- "value": 0
+ "color": "green",
+ "value": null
},
{
- "color": "green",
- "value": 100
+ "color": "red",
+ "value": 80
}
]
},
@@ -635,26 +1793,24 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 24
+ "y": 90
},
- "id": 115,
+ "id": 97,
"options": {
- "minVizHeight": 75,
- "minVizWidth": 75,
- "orientation": "auto",
- "reduceOptions": {
- "calcs": [
- "lastNotNull"
- ],
- "fields": "",
- "values": false
+ "legend": {
+ "calcs": [ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
},
- "showThresholdLabels": false,
- "showThresholdMarkers": false,
- "sizing": "auto"
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
},
"panels": [ ],
- "pluginVersion": "11.0.0-68102",
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -662,7 +1818,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by (pod) (\n loki_bloomcompactor_progress{cluster=~\"$cluster\", job=~\"$namespace/bloom-compactor\"}\n)",
+ "expr": "# tokens/s by type+collision\nsum by (collision) (\n rate(loki_bloom_inserts_total{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__rate_interval])\n) \n/ on () group_left\nsum (\n rate(loki_bloom_inserts_total{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__rate_interval])\n)",
"hide": false,
"instant": false,
"legendFormat": "__auto",
@@ -670,55 +1826,15 @@
"refId": "B"
}
],
- "title": "Current Progress per pod",
- "type": "gauge"
- }
- ],
- "targets": [ ],
- "title": "Progress per pod",
- "type": "row"
- },
- {
- "collapsed": true,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 24
- },
- "id": 56,
- "panels": [
- {
- "description": "",
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 25
- },
- "id": 85,
- "options": {
- "code": {
- "language": "plaintext",
- "showLineNumbers": false,
- "showMiniMap": false
- },
- "content": "",
- "mode": "markdown"
- },
- "panels": [ ],
- "pluginVersion": "11.1.0-70005",
- "targets": [ ],
- "title": "We use tenant sharding so each compactor will process a subset of the tenants.",
- "transparent": true,
- "type": "text"
+ "title": "tokens/s by collision type",
+ "type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
- "description": "Shows the expected number of cpu cores we need to provision to build blooms as fast as we ingest data so a compaction iteration doesn't take longer than the compaction interval.\n\nWe may decide to have more to speed up compaction.",
+ "description": "The sizes of the blooms created by the compactor. We build one bloom per series. The more unique ngrams and chunks the series has, the bigger their blooms will be.",
"fieldConfig": {
"defaults": {
"color": {
@@ -731,6 +1847,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -744,7 +1861,8 @@
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
- "type": "linear"
+ "log": 2,
+ "type": "log"
},
"showPoints": "auto",
"spanNulls": false,
@@ -763,19 +1881,24 @@
{
"color": "green",
"value": null
+ },
+ {
+ "color": "red",
+ "value": 80
}
]
- }
+ },
+ "unit": "bytes"
},
"overrides": [ ]
},
"gridPos": {
- "h": 7,
+ "h": 8,
"w": 12,
"x": 0,
- "y": 26
+ "y": 98
},
- "id": 94,
+ "id": 98,
"options": {
"legend": {
"calcs": [ ],
@@ -790,7 +1913,7 @@
}
},
"panels": [ ],
- "pluginVersion": "11.1.0-69868",
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -798,12 +1921,12 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "# This query shows the expected number of cpu cores we need to not fall behind\n# building blooms for data we're ingesting.\n# conceptually, the formula is:\n# (cell_bytes * space_amplification / bloom_bytes_processed_per_core)\n\n# number of replicas needed\nsum(avg_over_time(loki_cell:bytes:rate1m{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval]))\n*\n## Space amplification (how much data do we write compared to what we ingest?)\n(\n # rep factor\n 3 *\n sum(\n # 1 - dedupe_ratio\n 1 - \n sum(rate(loki_chunk_store_deduped_chunks_total{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (cluster, namespace)\n /\n sum(rate(loki_ingester_chunks_flushed_total{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (cluster, namespace)\n )\n)\n/\n(\nsum(rate(loki_bloomcompactor_chunk_series_size_sum{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval]))\n/\nsum(rate(container_cpu_usage_seconds_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval]))\n)",
+ "expr": "histogram_quantile(\n 1.0,\n sum by (le) (\n rate(loki_bloom_size_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__rate_interval])\n )\n)",
"hide": false,
"instant": false,
- "legendFormat": "Needed",
+ "legendFormat": "max",
"range": true,
- "refId": "B"
+ "refId": "D"
},
{
"datasource": {
@@ -811,15 +1934,28 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum(rate(container_cpu_usage_seconds_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval]))",
+ "expr": "histogram_quantile(\n 0.99, \n sum by (le) (\n rate(loki_bloom_size_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__rate_interval])\n )\n)",
"hide": false,
"instant": false,
- "legendFormat": "Available",
+ "legendFormat": "p99",
"range": true,
- "refId": "A"
+ "refId": "E"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(\n 0.50, \n sum by (le) (\n rate(loki_bloom_size_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__rate_interval])\n )\n)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "p50",
+ "range": true,
+ "refId": "F"
}
],
- "title": "Required CPUs to not lag behind",
+ "title": "Bloom size",
"type": "timeseries"
},
{
@@ -827,7 +1963,7 @@
"type": "prometheus",
"uid": "${datasource}"
},
- "description": "",
+ "description": "How many chunks are we indexing in the blooms. Either:\n- `copied` from a pre-existing bloom block, or \n- `iterated` through all its entries if processed for the first time.",
"fieldConfig": {
"defaults": {
"color": {
@@ -840,8 +1976,9 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
- "fillOpacity": 15,
+ "fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -850,9 +1987,6 @@
},
"insertNulls": false,
"lineInterpolation": "linear",
- "lineStyle": {
- "fill": "solid"
- },
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
@@ -882,17 +2016,17 @@
}
]
},
- "unit": "Bps"
+ "unit": "short"
},
"overrides": [ ]
},
"gridPos": {
- "h": 7,
+ "h": 8,
"w": 12,
"x": 12,
- "y": 26
+ "y": 98
},
- "id": 72,
+ "id": 99,
"options": {
"legend": {
"calcs": [ ],
@@ -907,7 +2041,7 @@
}
},
"panels": [ ],
- "pluginVersion": "11.1.0-69868",
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -915,35 +2049,38 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "# MB/s/core chunk data processed\nsum(rate(loki_bloomcompactor_chunk_series_size_sum{cluster=~\"$cluster\", job=~\"$namespace/bloom-compactor\"}[$__rate_interval])) by (pod)\n/\nsum(rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval])) by (pod)",
- "hide": true,
- "instant": false,
- "legendFormat": "{{pod}}",
- "range": true,
- "refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "# MB/s/core chunk data processed\nsum(rate(loki_bloomcompactor_chunk_series_size_sum{cluster=~\"$cluster\", job=~\"$namespace/bloom-compactor\"}[$__rate_interval]))\n/\nsum(rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval]))",
+ "expr": "# chunks indexed, by iteration or copied from a pre-existing bloom\nsum(rate(loki_bloom_chunks_indexed_total{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__rate_interval])) by (type)",
"hide": false,
"instant": false,
- "legendFormat": "Total",
+ "legendFormat": "__auto",
"range": true,
"refId": "B"
}
],
- "title": "MB/s per core",
+ "title": "Chunks indexed",
"type": "timeseries"
- },
+ }
+ ],
+ "targets": [ ],
+ "title": "Bloom building",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 24
+ },
+ "id": 56,
+ "panels": [
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
+ "description": "Shows the expected number of cpu cores we need to provision to build blooms as fast as we ingest data so a build iteration doesn't take longer than the build interval.\n\nWe may decide to have more to speed up building blooms.",
"fieldConfig": {
"defaults": {
"color": {
@@ -956,6 +2093,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -998,9 +2136,9 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 33
+ "y": 2030
},
- "id": 1,
+ "id": 94,
"options": {
"legend": {
"calcs": [ ],
@@ -1015,7 +2153,7 @@
}
},
"panels": [ ],
- "pluginVersion": "11.1.0-69868",
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -1023,10 +2161,10 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-compactor\", resource=\"cpu\"} > 0)",
+ "expr": "# This query shows the expected number of cpu cores we need to not fall behind\n# building blooms for data we're ingesting.\n# conceptually, the formula is:\n# (cell_bytes * space_amplification / bloom_bytes_processed_per_core)\n\n# number of replicas needed\nsum(avg_over_time(loki_cell:bytes:rate1m{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval]))\n*\n## Space amplification (how much data do we write compared to what we ingest?)\n(\n # rep factor\n 3 *\n sum(\n # 1 - dedupe_ratio\n 1 - \n sum(rate(loki_chunk_store_deduped_chunks_total{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (cluster, namespace)\n /\n sum(rate(loki_ingester_chunks_flushed_total{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (cluster, namespace)\n )\n)\n/\n(\nsum(rate(loki_bloombuilder_chunk_series_size_sum{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-builder\"}[$__rate_interval]))\n/\nsum(rate(container_cpu_usage_seconds_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-builder\"}[$__rate_interval]))\n)",
"hide": false,
"instant": false,
- "legendFormat": "Request",
+ "legendFormat": "Needed",
"range": true,
"refId": "B"
},
@@ -1036,22 +2174,10 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "min(kube_pod_container_resource_limits{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-compactor\", resource=\"cpu\"} > 0)",
+ "expr": "sum(rate(container_cpu_usage_seconds_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-builder\"}[$__rate_interval]))",
"hide": false,
"instant": false,
- "legendFormat": "Limit",
- "range": true,
- "refId": "C"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "quantile(\n 0.99,\n rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-compactor\"}[$__rate_interval])\n)",
- "instant": false,
- "legendFormat": "p99",
+ "legendFormat": "Available",
"range": true,
"refId": "A"
},
@@ -1061,28 +2187,15 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "quantile(\n 0.50,\n rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-compactor\"}[$__rate_interval])\n)",
- "hide": false,
- "instant": false,
- "legendFormat": "p50",
- "range": true,
- "refId": "D"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "avg(\n rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-compactor\"}[$__rate_interval])\n)",
+ "expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\", resource=\"cpu\"} > 0)\n*\ncount(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\", resource=\"cpu\"} > 0)",
"hide": false,
"instant": false,
- "legendFormat": "Avg",
+ "legendFormat": "Provisioned",
"range": true,
- "refId": "E"
+ "refId": "C"
}
],
- "title": "CPU",
+ "title": "Required CPUs to not lag behind",
"type": "timeseries"
},
{
@@ -1090,6 +2203,7 @@
"type": "prometheus",
"uid": "${datasource}"
},
+ "description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -1102,8 +2216,9 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
- "fillOpacity": 0,
+ "fillOpacity": 15,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -1112,6 +2227,9 @@
},
"insertNulls": false,
"lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
@@ -1140,7 +2258,8 @@
"value": 80
}
]
- }
+ },
+ "unit": "Bps"
},
"overrides": [ ]
},
@@ -1148,9 +2267,9 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 33
+ "y": 2030
},
- "id": 75,
+ "id": 72,
"options": {
"legend": {
"calcs": [ ],
@@ -1165,7 +2284,7 @@
}
},
"panels": [ ],
- "pluginVersion": "11.1.0-69868",
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -1173,40 +2292,15 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by(pod) (rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-compactor\"}[$__rate_interval]))",
- "instant": false,
- "legendFormat": "{{pod}}",
- "range": true,
- "refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-compactor\", resource=\"cpu\"} > 0)",
+ "expr": "# MB/s/core chunk data processed\nsum(rate(loki_bloombuilder_chunk_series_size_sum{cluster=~\"$cluster\", job=~\"$namespace/bloom-builder\"}[$__rate_interval]))\n/\nsum(rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"bloom-builder\"}[$__rate_interval]))",
"hide": false,
"instant": false,
- "legendFormat": "Request",
+ "legendFormat": "Total",
"range": true,
"refId": "B"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "min(kube_pod_container_resource_limits{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-compactor\", resource=\"cpu\"} > 0)",
- "hide": false,
- "instant": false,
- "legendFormat": "Limit",
- "range": true,
- "refId": "C"
}
],
- "title": "CPU per pod",
+ "title": "MB/s per core",
"type": "timeseries"
},
{
@@ -1226,6 +2320,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -1258,14 +2353,9 @@
{
"color": "green",
"value": null
- },
- {
- "color": "red",
- "value": 80
}
]
- },
- "unit": "bytes"
+ }
},
"overrides": [ ]
},
@@ -1273,9 +2363,9 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 40
+ "y": 2037
},
- "id": 76,
+ "id": 1,
"options": {
"legend": {
"calcs": [ ],
@@ -1290,7 +2380,7 @@
}
},
"panels": [ ],
- "pluginVersion": "11.1.0-69868",
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -1298,7 +2388,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-compactor\", resource=\"memory\"} > 0)",
+ "expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\", resource=\"cpu\"} > 0)",
"hide": false,
"instant": false,
"legendFormat": "Request",
@@ -1311,7 +2401,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "min(container_spec_memory_limit_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-compactor\"} > 0)",
+ "expr": "min(kube_pod_container_resource_limits{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\", resource=\"cpu\"} > 0)",
"hide": false,
"instant": false,
"legendFormat": "Limit",
@@ -1324,7 +2414,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "quantile (\n 0.99,\n container_memory_working_set_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-compactor\"}\n)",
+ "expr": "quantile(\n 0.99,\n rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\"}[$__rate_interval])\n)",
"instant": false,
"legendFormat": "p99",
"range": true,
@@ -1336,7 +2426,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "quantile (\n 0.50,\n container_memory_working_set_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-compactor\"}\n)",
+ "expr": "quantile(\n 0.50,\n rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\"}[$__rate_interval])\n)",
"hide": false,
"instant": false,
"legendFormat": "p50",
@@ -1349,15 +2439,28 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "avg (\n container_memory_working_set_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-compactor\"}\n)",
+ "expr": "avg(\n rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\"}[$__rate_interval])\n)",
"hide": false,
"instant": false,
"legendFormat": "Avg",
"range": true,
"refId": "E"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "max(\n rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\"}[$__rate_interval])\n)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Max",
+ "range": true,
+ "refId": "F"
}
],
- "title": "Memory (workingset)",
+ "title": "CPU",
"type": "timeseries"
},
{
@@ -1377,6 +2480,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -1415,43 +2519,17 @@
"value": 80
}
]
- },
- "unit": "bytes"
- },
- "overrides": [
- {
- "__systemRef": "hideSeriesFrom",
- "matcher": {
- "id": "byNames",
- "options": {
- "mode": "exclude",
- "names": [
- "bloom-compactor-106"
- ],
- "prefix": "All except:",
- "readOnly": true
- }
- },
- "properties": [
- {
- "id": "custom.hideFrom",
- "value": {
- "legend": false,
- "tooltip": false,
- "viz": true
- }
- }
- ]
}
- ]
+ },
+ "overrides": [ ]
},
"gridPos": {
"h": 7,
"w": 12,
"x": 12,
- "y": 40
+ "y": 2037
},
- "id": 5,
+ "id": 75,
"options": {
"legend": {
"calcs": [ ],
@@ -1466,7 +2544,7 @@
}
},
"panels": [ ],
- "pluginVersion": "11.1.0-69868",
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -1474,7 +2552,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "max by(pod) (container_memory_working_set_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-compactor\"})",
+ "expr": "sum by(pod) (rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\"}[$__rate_interval]))",
"instant": false,
"legendFormat": "{{pod}}",
"range": true,
@@ -1486,7 +2564,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-compactor\", resource=\"memory\"} > 0)",
+ "expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\", resource=\"cpu\"} > 0)",
"hide": false,
"instant": false,
"legendFormat": "Request",
@@ -1499,7 +2577,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "min(container_spec_memory_limit_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-compactor\"} > 0)",
+ "expr": "min(kube_pod_container_resource_limits{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\", resource=\"cpu\"} > 0)",
"hide": false,
"instant": false,
"legendFormat": "Limit",
@@ -1507,7 +2585,7 @@
"refId": "C"
}
],
- "title": "Memory per pod (workingset)",
+ "title": "CPU per pod",
"type": "timeseries"
},
{
@@ -1515,7 +2593,6 @@
"type": "prometheus",
"uid": "${datasource}"
},
- "description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -1528,8 +2605,9 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
- "fillOpacity": 15,
+ "fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -1538,9 +2616,6 @@
},
"insertNulls": false,
"lineInterpolation": "linear",
- "lineStyle": {
- "fill": "solid"
- },
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
@@ -1570,7 +2645,7 @@
}
]
},
- "unit": "none"
+ "unit": "bytes"
},
"overrides": [ ]
},
@@ -1578,9 +2653,9 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 47
+ "y": 2044
},
- "id": 27,
+ "id": 76,
"options": {
"legend": {
"calcs": [ ],
@@ -1595,7 +2670,7 @@
}
},
"panels": [ ],
- "pluginVersion": "11.1.0-69868",
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -1603,140 +2678,86 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum (\n increase(\n kube_pod_container_status_restarts_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[10m]\n )\n) > 0",
+ "expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\", resource=\"memory\"} > 0)",
+ "hide": false,
"instant": false,
- "legendFormat": "Restarts",
+ "legendFormat": "Request",
"range": true,
- "refId": "A"
- }
- ],
- "title": "Container restarts",
- "type": "timeseries"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "description": "",
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
},
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 15,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineStyle": {
- "fill": "solid"
- },
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
+ "editorMode": "code",
+ "expr": "min(container_spec_memory_limit_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\"} > 0)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Limit",
+ "range": true,
+ "refId": "C"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
},
- "mappings": [ ],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
+ "editorMode": "code",
+ "expr": "quantile (\n 0.99,\n container_memory_working_set_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\"}\n)",
+ "instant": false,
+ "legendFormat": "p99",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
},
- "unit": "none"
+ "editorMode": "code",
+ "expr": "quantile (\n 0.50,\n container_memory_working_set_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\"}\n)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "p50",
+ "range": true,
+ "refId": "D"
},
- "overrides": [ ]
- },
- "gridPos": {
- "h": 7,
- "w": 12,
- "x": 12,
- "y": 47
- },
- "id": 77,
- "options": {
- "legend": {
- "calcs": [ ],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "avg (\n container_memory_working_set_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\"}\n)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Avg",
+ "range": true,
+ "refId": "E"
},
- "tooltip": {
- "maxHeight": 600,
- "mode": "single",
- "sort": "none"
- }
- },
- "panels": [ ],
- "pluginVersion": "11.1.0-69868",
- "targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "(\n sum by (pod) (\n increase(\n kube_pod_container_status_restarts_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[10m]\n )\n )\n * on (pod) group_right\n max by (pod, reason) (\n kube_pod_container_status_last_terminated_reason{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}\n )\n) > 0",
+ "expr": "max (\n container_memory_working_set_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\"}\n)",
+ "hide": false,
"instant": false,
- "legendFormat": "{{reason}} / {{pod}}",
+ "legendFormat": "Max",
"range": true,
- "refId": "A"
+ "refId": "F"
}
],
- "title": "Container restarts reason per pod",
+ "title": "Memory (workingset)",
"type": "timeseries"
- }
- ],
- "targets": [ ],
- "title": "Resource Usage",
- "type": "row"
- },
- {
- "collapsed": true,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 25
- },
- "id": 95,
- "panels": [
+ },
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
- "description": "How many tokens each compactor is appending to blooms. Accounts for tokens that are not actually added to the blooms since they are already there. See the panel on the right for a drill down on the collision.\n",
"fieldConfig": {
"defaults": {
"color": {
@@ -1749,6 +2770,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -1762,8 +2784,7 @@
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
- "log": 2,
- "type": "log"
+ "type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
@@ -1788,17 +2809,18 @@
"value": 80
}
]
- }
+ },
+ "unit": "bytes"
},
"overrides": [ ]
},
"gridPos": {
- "h": 8,
+ "h": 7,
"w": 12,
- "x": 0,
- "y": 55
+ "x": 12,
+ "y": 2044
},
- "id": 96,
+ "id": 5,
"options": {
"legend": {
"calcs": [ ],
@@ -1813,7 +2835,7 @@
}
},
"panels": [ ],
- "pluginVersion": "11.1.0-69868",
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -1821,10 +2843,22 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "# tokens checked per pod, millions/s\nsum(rate(loki_bloom_tokens_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval]))\n/\nsum(count(loki_bloom_tokens_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}))\n/ 1e6",
+ "expr": "max by(pod) (container_memory_working_set_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\"})",
+ "instant": false,
+ "legendFormat": "{{pod}}",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\", resource=\"memory\"} > 0)",
"hide": false,
"instant": false,
- "legendFormat": "Per core",
+ "legendFormat": "Request",
"range": true,
"refId": "B"
},
@@ -1834,15 +2868,15 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum(rate(loki_bloom_inserts_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval])) / 1e6",
+ "expr": "min(container_spec_memory_limit_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\"} > 0)",
"hide": false,
"instant": false,
- "legendFormat": "Total",
+ "legendFormat": "Limit",
"range": true,
"refId": "C"
}
],
- "title": "Tokens rate (millions)",
+ "title": "Memory per pod (workingset)",
"type": "timeseries"
},
{
@@ -1850,7 +2884,7 @@
"type": "prometheus",
"uid": "${datasource}"
},
- "description": "Collision type may be `false` (no collision), `cache` (found in token cache) or true (found in bloom filter).\n\nType may be either `raw` (the original ngram) or `chunk_prefixed` (the ngram with the chunk prefix)",
+ "description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -1863,8 +2897,9 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
- "fillOpacity": 0,
+ "fillOpacity": 15,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -1873,6 +2908,9 @@
},
"insertNulls": false,
"lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
@@ -1902,17 +2940,17 @@
}
]
},
- "unit": "percentunit"
+ "unit": "none"
},
"overrides": [ ]
},
"gridPos": {
- "h": 8,
+ "h": 7,
"w": 12,
- "x": 12,
- "y": 55
+ "x": 0,
+ "y": 2051
},
- "id": 97,
+ "id": 27,
"options": {
"legend": {
"calcs": [ ],
@@ -1927,7 +2965,7 @@
}
},
"panels": [ ],
- "pluginVersion": "11.1.0-69868",
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -1935,15 +2973,14 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "# tokens/s by type+collision\nsum by (collision) (\n rate(loki_bloom_inserts_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval])\n) \n/ on () group_left\nsum (\n rate(loki_bloom_inserts_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval])\n)",
- "hide": false,
+ "expr": "sum (\n increase(\n kube_pod_container_status_restarts_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-builder\"}[10m]\n )\n) > 0",
"instant": false,
- "legendFormat": "__auto",
+ "legendFormat": "Restarts",
"range": true,
- "refId": "B"
+ "refId": "A"
}
],
- "title": "tokens/s by collision type",
+ "title": "Container restarts",
"type": "timeseries"
},
{
@@ -1951,7 +2988,7 @@
"type": "prometheus",
"uid": "${datasource}"
},
- "description": "The sizes of the blooms created by the compactor. We build one bloom per series. The more unique ngrams and chunks the series has, the bigger their blooms will be.",
+ "description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -1964,8 +3001,9 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
- "fillOpacity": 0,
+ "fillOpacity": 15,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -1974,6 +3012,9 @@
},
"insertNulls": false,
"lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
@@ -2003,17 +3044,17 @@
}
]
},
- "unit": "bytes"
+ "unit": "none"
},
"overrides": [ ]
},
"gridPos": {
- "h": 8,
+ "h": 7,
"w": 12,
- "x": 0,
- "y": 63
+ "x": 12,
+ "y": 2051
},
- "id": 98,
+ "id": 77,
"options": {
"legend": {
"calcs": [ ],
@@ -2028,7 +3069,7 @@
}
},
"panels": [ ],
- "pluginVersion": "11.1.0-69868",
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -2036,49 +3077,36 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(\n 0.99, \n sum by (le) (\n rate(loki_bloom_size_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval])\n )\n)",
- "hide": false,
- "instant": false,
- "legendFormat": "p99",
- "range": true,
- "refId": "D"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "histogram_quantile(\n 0.90, \n sum by (le) (\n rate(loki_bloom_size_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval])\n )\n)",
- "hide": false,
- "instant": false,
- "legendFormat": "p90",
- "range": true,
- "refId": "E"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "histogram_quantile(\n 0.50, \n sum by (le) (\n rate(loki_bloom_size_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval])\n )\n)",
- "hide": false,
+ "expr": "(\n sum by (pod) (\n increase(\n kube_pod_container_status_restarts_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-builder\"}[10m]\n )\n )\n * on (pod) group_right\n max by (pod, reason) (\n kube_pod_container_status_last_terminated_reason{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-builder\"}\n )\n) > 0",
"instant": false,
- "legendFormat": "p50",
+ "legendFormat": "{{reason}} / {{pod}}",
"range": true,
- "refId": "F"
+ "refId": "A"
}
],
- "title": "Bloom size",
+ "title": "Container restarts reason per pod",
"type": "timeseries"
- },
+ }
+ ],
+ "targets": [ ],
+ "title": "Builder Resource Usage",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 25
+ },
+ "id": 118,
+ "panels": [
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
- "description": "How many chunks are we indexing in the blooms. Either:\n- `copied` from a pre-existing bloom block, or \n- `iterated` through all its entries if processed for the first time.",
"fieldConfig": {
"defaults": {
"color": {
@@ -2091,6 +3119,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -2123,24 +3152,19 @@
{
"color": "green",
"value": null
- },
- {
- "color": "red",
- "value": 80
}
]
- },
- "unit": "none"
+ }
},
"overrides": [ ]
},
"gridPos": {
- "h": 8,
+ "h": 7,
"w": 12,
- "x": 12,
- "y": 63
+ "x": 0,
+ "y": 2302
},
- "id": 99,
+ "id": 119,
"options": {
"legend": {
"calcs": [ ],
@@ -2155,7 +3179,7 @@
}
},
"panels": [ ],
- "pluginVersion": "11.1.0-69868",
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -2163,32 +3187,68 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "# chunks indexed, by iteration or copied from a pre-existing bloom\nsum(rate(loki_bloom_chunks_indexed_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval])) by (type)",
+ "expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-planner\", resource=\"cpu\"} > 0)",
"hide": false,
"instant": false,
- "legendFormat": "__auto",
+ "legendFormat": "Request",
"range": true,
"refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "min(kube_pod_container_resource_limits{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-planner\", resource=\"cpu\"} > 0)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Limit",
+ "range": true,
+ "refId": "C"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "quantile(\n 0.99,\n rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-planner\"}[$__rate_interval])\n)",
+ "instant": false,
+ "legendFormat": "p99",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "quantile(\n 0.50,\n rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-planner\"}[$__rate_interval])\n)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "p50",
+ "range": true,
+ "refId": "D"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "avg(\n rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-planner\"}[$__rate_interval])\n)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Avg",
+ "range": true,
+ "refId": "E"
}
],
- "title": "Chunks indexed",
+ "title": "CPU",
"type": "timeseries"
- }
- ],
- "targets": [ ],
- "title": "Bloom building",
- "type": "row"
- },
- {
- "collapsed": true,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 26
- },
- "id": 103,
- "panels": [
+ },
{
"datasource": {
"type": "prometheus",
@@ -2206,6 +3266,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -2238,6 +3299,10 @@
{
"color": "green",
"value": null
+ },
+ {
+ "color": "red",
+ "value": 80
}
]
}
@@ -2247,10 +3312,10 @@
"gridPos": {
"h": 7,
"w": 12,
- "x": 0,
- "y": 72
+ "x": 12,
+ "y": 2302
},
- "id": 107,
+ "id": 120,
"options": {
"legend": {
"calcs": [ ],
@@ -2258,29 +3323,55 @@
"placement": "bottom",
"showLegend": true
},
- "tooltip": {
- "maxHeight": 600,
- "mode": "single",
- "sort": "none"
- }
- },
- "panels": [ ],
- "targets": [
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "panels": [ ],
+ "pluginVersion": "11.4.0-77663",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "sum by(pod) (rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-planner\"}[$__rate_interval]))",
+ "instant": false,
+ "legendFormat": "{{pod}}",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-planner\", resource=\"cpu\"} > 0)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Request",
+ "range": true,
+ "refId": "B"
+ },
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum(increase(loki_bloomcompactor_blocks_created_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval]))",
+ "expr": "min(kube_pod_container_resource_limits{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-planner\", resource=\"cpu\"} > 0)",
"hide": false,
"instant": false,
- "legendFormat": "Blocks",
+ "legendFormat": "Limit",
"range": true,
- "refId": "A"
+ "refId": "C"
}
],
- "title": "Created Blocks",
+ "title": "CPU per pod",
"type": "timeseries"
},
{
@@ -2288,7 +3379,6 @@
"type": "prometheus",
"uid": "${datasource}"
},
- "description": "Compactors delete metas and blocks marked for deletion in the metas tombstones.",
"fieldConfig": {
"defaults": {
"color": {
@@ -2301,6 +3391,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -2333,19 +3424,24 @@
{
"color": "green",
"value": null
+ },
+ {
+ "color": "red",
+ "value": 80
}
]
- }
+ },
+ "unit": "bytes"
},
"overrides": [ ]
},
"gridPos": {
"h": 7,
"w": 12,
- "x": 12,
- "y": 72
+ "x": 0,
+ "y": 2309
},
- "id": 106,
+ "id": 121,
"options": {
"legend": {
"calcs": [ ],
@@ -2360,6 +3456,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -2367,15 +3464,66 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum(increase(loki_bloomcompactor_blocks_deleted_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval]))",
+ "expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-planner\", resource=\"memory\"} > 0)",
"hide": false,
"instant": false,
- "legendFormat": "Blocks",
+ "legendFormat": "Request",
+ "range": true,
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "min(container_spec_memory_limit_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-planner\"} > 0)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Limit",
+ "range": true,
+ "refId": "C"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "quantile (\n 0.99,\n container_memory_working_set_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-planner\"}\n)",
+ "instant": false,
+ "legendFormat": "p99",
"range": true,
"refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "quantile (\n 0.50,\n container_memory_working_set_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\"}\n)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "p50",
+ "range": true,
+ "refId": "D"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "avg (\n container_memory_working_set_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\"}\n)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Avg",
+ "range": true,
+ "refId": "E"
}
],
- "title": "Deleted Blocks",
+ "title": "Memory (workingset)",
"type": "timeseries"
},
{
@@ -2383,7 +3531,6 @@
"type": "prometheus",
"uid": "${datasource}"
},
- "description": "Number of overlapping bloom blocks reused when creating new blocks\n",
"fieldConfig": {
"defaults": {
"color": {
@@ -2396,6 +3543,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -2428,19 +3576,24 @@
{
"color": "green",
"value": null
+ },
+ {
+ "color": "red",
+ "value": 80
}
]
- }
+ },
+ "unit": "bytes"
},
"overrides": [ ]
},
"gridPos": {
"h": 7,
"w": 12,
- "x": 0,
- "y": 79
+ "x": 12,
+ "y": 2309
},
- "id": 109,
+ "id": 122,
"options": {
"legend": {
"calcs": [ ],
@@ -2455,6 +3608,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -2462,37 +3616,48 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum(increase(loki_bloomcompactor_blocks_reused_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval]))",
- "hide": false,
+ "expr": "max by(pod) (container_memory_working_set_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-planner\"})",
"instant": false,
- "legendFormat": "Blocks",
+ "legendFormat": "{{pod}}",
"range": true,
"refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-planner\", resource=\"memory\"} > 0)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Request",
+ "range": true,
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "min(container_spec_memory_limit_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-planner\"} > 0)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Limit",
+ "range": true,
+ "refId": "C"
}
],
- "title": "Blocks reused",
+ "title": "Memory per pod (workingset)",
"type": "timeseries"
- }
- ],
- "targets": [ ],
- "title": "Blocks building",
- "type": "row"
- },
- {
- "collapsed": true,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 27
- },
- "id": 110,
- "panels": [
+ },
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
+ "description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -2505,8 +3670,9 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
- "fillOpacity": 0,
+ "fillOpacity": 15,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -2515,6 +3681,9 @@
},
"insertNulls": false,
"lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
@@ -2537,9 +3706,14 @@
{
"color": "green",
"value": null
+ },
+ {
+ "color": "red",
+ "value": 80
}
]
- }
+ },
+ "unit": "none"
},
"overrides": [ ]
},
@@ -2547,9 +3721,9 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 87
+ "y": 2316
},
- "id": 108,
+ "id": 123,
"options": {
"legend": {
"calcs": [ ],
@@ -2564,7 +3738,7 @@
}
},
"panels": [ ],
- "pluginVersion": "11.1.0-69868",
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -2572,15 +3746,14 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum(increase(loki_bloomcompactor_metas_created_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval]))",
- "hide": false,
+ "expr": "sum (\n increase(\n kube_pod_container_status_restarts_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-planner\"}[10m]\n )\n) > 0",
"instant": false,
- "legendFormat": "Metas",
+ "legendFormat": "Restarts",
"range": true,
"refId": "A"
}
],
- "title": "Created Metas",
+ "title": "Container restarts",
"type": "timeseries"
},
{
@@ -2588,7 +3761,7 @@
"type": "prometheus",
"uid": "${datasource}"
},
- "description": "Compactors delete metas and blocks marked for deletion in the metas tombstones.",
+ "description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -2601,8 +3774,9 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
- "fillOpacity": 0,
+ "fillOpacity": 15,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -2611,6 +3785,9 @@
},
"insertNulls": false,
"lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
@@ -2633,9 +3810,14 @@
{
"color": "green",
"value": null
+ },
+ {
+ "color": "red",
+ "value": 80
}
]
- }
+ },
+ "unit": "none"
},
"overrides": [ ]
},
@@ -2643,9 +3825,9 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 87
+ "y": 2316
},
- "id": 105,
+ "id": 124,
"options": {
"legend": {
"calcs": [ ],
@@ -2660,7 +3842,7 @@
}
},
"panels": [ ],
- "pluginVersion": "11.1.0-69868",
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -2668,20 +3850,19 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum(increase(loki_bloomcompactor_metas_deleted_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval]))",
- "hide": false,
+ "expr": "(\n sum by (pod) (\n increase(\n kube_pod_container_status_restarts_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-planner\"}[10m]\n )\n )\n * on (pod) group_right\n max by (pod, reason) (\n kube_pod_container_status_last_terminated_reason{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-planner\"}\n )\n) > 0",
"instant": false,
- "legendFormat": "Metas",
+ "legendFormat": "{{reason}} / {{pod}}",
"range": true,
"refId": "A"
}
],
- "title": "Deleted Metas",
+ "title": "Container restarts reason per pod",
"type": "timeseries"
}
],
"targets": [ ],
- "title": "Metas building",
+ "title": "Planner Resource Usage",
"type": "row"
},
{
@@ -2690,35 +3871,10 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 28
+ "y": 26
},
- "id": 80,
+ "id": 110,
"panels": [
- {
- "description": "",
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 95
- },
- "id": 93,
- "options": {
- "code": {
- "language": "plaintext",
- "showLineNumbers": false,
- "showMiniMap": false
- },
- "content": "",
- "mode": "markdown"
- },
- "panels": [ ],
- "pluginVersion": "11.1.0-70005",
- "targets": [ ],
- "title": "We use tenant sharding so each compactor will process a subset of the tenants.",
- "transparent": true,
- "type": "text"
- },
{
"datasource": {
"type": "prometheus",
@@ -2736,6 +3892,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -2776,11 +3933,11 @@
},
"gridPos": {
"h": 7,
- "w": 12,
+ "w": 9,
"x": 0,
- "y": 96
+ "y": 2497
},
- "id": 83,
+ "id": 108,
"options": {
"legend": {
"calcs": [ ],
@@ -2795,6 +3952,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -2802,48 +3960,92 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "quantile(\n 0.99,\n increase(\n loki_bloomcompactor_tenants_started_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval]\n )\n)",
- "hide": false,
- "instant": false,
- "legendFormat": "p99",
- "range": true,
- "refId": "C"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "quantile(\n 0.90,\n increase(\n loki_bloomcompactor_tenants_started_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval]\n )\n)",
+ "expr": "sum(increase(loki_bloombuilder_metas_created_total{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__rate_interval]))",
"hide": false,
"instant": false,
- "legendFormat": "p90",
+ "legendFormat": "Metas",
"range": true,
"refId": "A"
+ }
+ ],
+ "title": "Created Metas",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "mappings": [ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ }
+ },
+ "overrides": [ ]
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 3,
+ "x": 9,
+ "y": 2497
+ },
+ "id": 140,
+ "options": {
+ "colorMode": "value",
+ "graphMode": "area",
+ "justifyMode": "auto",
+ "orientation": "auto",
+ "percentChangeColorMode": "standard",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
},
+ "showPercentChange": false,
+ "textMode": "auto",
+ "wideLayout": true
+ },
+ "panels": [ ],
+ "pluginVersion": "11.4.0-77663",
+ "targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "quantile(\n 0.50,\n increase(\n loki_bloomcompactor_tenants_started_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[30m]\n )\n)",
+ "exemplar": false,
+ "expr": "sum(increase(loki_bloombuilder_metas_created_total{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__range]))",
+ "format": "table",
"hide": false,
- "instant": false,
- "legendFormat": "p50",
- "range": true,
- "refId": "B"
+ "instant": true,
+ "legendFormat": "Metas",
+ "range": false,
+ "refId": "A"
}
],
- "title": "Tenants",
- "type": "timeseries"
+ "title": "Created Metas",
+ "type": "stat"
},
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
+ "description": "Compactors delete metas and blocks marked for deletion in the metas tombstones.",
"fieldConfig": {
"defaults": {
"color": {
@@ -2856,6 +4058,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -2898,9 +4101,9 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 96
+ "y": 2497
},
- "id": 84,
+ "id": 105,
"options": {
"legend": {
"calcs": [ ],
@@ -2915,6 +4118,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -2922,42 +4126,32 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by (pod) (\n increase(\n loki_bloomcompactor_tenants_started_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval]\n )\n)",
+ "expr": "sum by (phase) (increase(loki_bloomplanner_metas_deleted_total{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"}[$__rate_interval]))",
"hide": false,
"instant": false,
- "legendFormat": "{{pod}}",
+ "legendFormat": "Deleted during {{phase}}",
"range": true,
- "refId": "C"
+ "refId": "A"
}
],
- "title": "Tenants per pod",
+ "title": "Deleted Metas",
"type": "timeseries"
- },
- {
- "description": "",
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 103
- },
- "id": 86,
- "options": {
- "code": {
- "language": "plaintext",
- "showLineNumbers": false,
- "showMiniMap": false
- },
- "content": "",
- "mode": "markdown"
- },
- "panels": [ ],
- "pluginVersion": "11.1.0-70005",
- "targets": [ ],
- "title": "Number of tenant tables processed. ",
- "transparent": true,
- "type": "text"
- },
+ }
+ ],
+ "targets": [ ],
+ "title": "Metas building",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 27
+ },
+ "id": 103,
+ "panels": [
{
"datasource": {
"type": "prometheus",
@@ -2975,6 +4169,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -3015,11 +4210,11 @@
},
"gridPos": {
"h": 7,
- "w": 12,
+ "w": 9,
"x": 0,
- "y": 104
+ "y": 2505
},
- "id": 88,
+ "id": 107,
"options": {
"legend": {
"calcs": [ ],
@@ -3034,6 +4229,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -3041,41 +4237,15 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "quantile(\n 0.99,\n increase(\n loki_bloomcompactor_tenant_table_ranges_completed_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval]\n )\n)",
- "hide": false,
- "instant": false,
- "legendFormat": "p99",
- "range": true,
- "refId": "C"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "quantile(\n 0.90,\n increase(\n loki_bloomcompactor_tenant_table_ranges_completed_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval]\n )\n)",
+ "expr": "sum(increase(loki_bloombuilder_blocks_created_total{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__rate_interval]))",
"hide": false,
"instant": false,
- "legendFormat": "p90",
+ "legendFormat": "Blocks",
"range": true,
"refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "quantile(\n 0.50,\n increase(\n loki_bloomcompactor_tenant_table_ranges_completed_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval]\n )\n)",
- "hide": false,
- "instant": false,
- "legendFormat": "p50",
- "range": true,
- "refId": "B"
}
],
- "title": "Tenant Tables",
+ "title": "Created Blocks",
"type": "timeseries"
},
{
@@ -3086,39 +4256,7 @@
"fieldConfig": {
"defaults": {
"color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
+ "mode": "thresholds"
},
"mappings": [ ],
"thresholds": {
@@ -3135,25 +4273,30 @@
},
"gridPos": {
"h": 7,
- "w": 12,
- "x": 12,
- "y": 104
+ "w": 3,
+ "x": 9,
+ "y": 2505
},
- "id": 89,
+ "id": 139,
"options": {
- "legend": {
- "calcs": [ ],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
+ "colorMode": "value",
+ "graphMode": "area",
+ "justifyMode": "auto",
+ "orientation": "auto",
+ "percentChangeColorMode": "standard",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
},
- "tooltip": {
- "maxHeight": 600,
- "mode": "single",
- "sort": "none"
- }
+ "showPercentChange": false,
+ "textMode": "auto",
+ "wideLayout": true
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -3161,48 +4304,25 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by (pod) (\n increase(\n loki_bloomcompactor_tenant_table_ranges_completed_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval]\n )\n)",
+ "exemplar": false,
+ "expr": "sum(increase(loki_bloombuilder_blocks_created_total{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__range]))",
+ "format": "time_series",
"hide": false,
- "instant": false,
- "legendFormat": "{{pod}}",
- "range": true,
- "refId": "C"
+ "instant": true,
+ "legendFormat": "Blocks",
+ "range": false,
+ "refId": "A"
}
],
- "title": "Tenant Tables per pod",
- "type": "timeseries"
- },
- {
- "description": "",
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 111
- },
- "id": 87,
- "options": {
- "code": {
- "language": "plaintext",
- "showLineNumbers": false,
- "showMiniMap": false
- },
- "content": "",
- "mode": "markdown"
- },
- "panels": [ ],
- "pluginVersion": "11.1.0-70005",
- "targets": [ ],
- "title": "Series per compaction (includes series copied from other blocks)",
- "transparent": true,
- "type": "text"
+ "title": "Created Blocks",
+ "type": "stat"
},
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
- "description": "",
+ "description": "Compactors delete metas and blocks marked for deletion in the metas tombstones.",
"fieldConfig": {
"defaults": {
"color": {
@@ -3215,6 +4335,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -3256,10 +4377,10 @@
"gridPos": {
"h": 7,
"w": 12,
- "x": 0,
- "y": 112
+ "x": 12,
+ "y": 2505
},
- "id": 81,
+ "id": 106,
"options": {
"legend": {
"calcs": [ ],
@@ -3274,6 +4395,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -3281,41 +4403,15 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "# series checked per compaction\nhistogram_quantile(\n 0.99, \n sum by (le) (\n rate(loki_bloomcompactor_series_per_compaction_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval])\n )\n)",
- "hide": false,
- "instant": false,
- "legendFormat": "p99",
- "range": true,
- "refId": "C"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "# series checked per compaction\nhistogram_quantile(\n 0.9, \n sum by (le) (\n rate(loki_bloomcompactor_series_per_compaction_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval])\n )\n)",
- "hide": false,
- "instant": false,
- "legendFormat": "p90",
- "range": true,
- "refId": "B"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "# series checked per compaction\nhistogram_quantile(\n 0.5, \n sum by (le) (\n rate(loki_bloomcompactor_series_per_compaction_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval])\n )\n)",
+ "expr": "sum by (phase) (increase(loki_bloomplanner_blocks_deleted_total{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"}[$__rate_interval]))",
"hide": false,
"instant": false,
- "legendFormat": "p50",
+ "legendFormat": "Deleted during {{phase}}",
"range": true,
"refId": "A"
}
],
- "title": "Series",
+ "title": "Deleted Blocks",
"type": "timeseries"
},
{
@@ -3323,6 +4419,7 @@
"type": "prometheus",
"uid": "${datasource}"
},
+ "description": "Number of overlapping bloom blocks reused when creating new blocks\n",
"fieldConfig": {
"defaults": {
"color": {
@@ -3335,6 +4432,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -3376,10 +4474,10 @@
"gridPos": {
"h": 7,
"w": 12,
- "x": 12,
- "y": 112
+ "x": 0,
+ "y": 2512
},
- "id": 82,
+ "id": 109,
"options": {
"legend": {
"calcs": [ ],
@@ -3394,6 +4492,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -3401,47 +4500,125 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by (pod) (\n rate(loki_bloomcompactor_series_per_compaction_sum{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval])\n /\n rate(loki_bloomcompactor_series_per_compaction_count{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval])\n)",
+ "expr": "sum(increase(loki_bloombuilder_blocks_reused_total{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__rate_interval]))",
"hide": false,
"instant": false,
- "legendFormat": "{{pod}}",
+ "legendFormat": "Blocks",
"range": true,
- "refId": "C"
+ "refId": "A"
}
],
- "title": "avg series per compaction by pod",
+ "title": "Blocks reused",
"type": "timeseries"
- },
+ }
+ ],
+ "targets": [ ],
+ "title": "Blocks building",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 28
+ },
+ "id": 135,
+ "panels": [
{
- "description": "",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "description": "Is the retention currently running?",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "fieldMinMax": false,
+ "mappings": [
+ {
+ "options": {
+ "0": {
+ "color": "yellow",
+ "index": 0,
+ "text": "No"
+ },
+ "1": {
+ "color": "green",
+ "index": 1,
+ "text": "Yes"
+ }
+ },
+ "type": "value"
+ }
+ ],
+ "max": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": [ ]
+ },
"gridPos": {
- "h": 1,
- "w": 24,
+ "h": 7,
+ "w": 3,
"x": 0,
- "y": 119
+ "y": 2573
},
- "id": 90,
+ "id": 136,
"options": {
- "code": {
- "language": "plaintext",
- "showLineNumbers": false,
- "showMiniMap": false
+ "colorMode": "value",
+ "graphMode": "area",
+ "justifyMode": "auto",
+ "orientation": "auto",
+ "percentChangeColorMode": "standard",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
},
- "content": "",
- "mode": "markdown"
+ "showPercentChange": false,
+ "textMode": "auto",
+ "wideLayout": true
},
"panels": [ ],
- "pluginVersion": "11.1.0-70005",
- "targets": [ ],
- "title": "Number of bytes from chunks added to blocks during each compaction.",
- "transparent": true,
- "type": "text"
+ "pluginVersion": "11.4.0-77663",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "sum by (cluster, namespace) (loki_bloomplanner_retention_running{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"})",
+ "instant": true,
+ "legendFormat": "__auto",
+ "range": false,
+ "refId": "A"
+ }
+ ],
+ "title": "Running now?",
+ "type": "stat"
},
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
+ "description": "Is the retention currently running?",
"fieldConfig": {
"defaults": {
"color": {
@@ -3453,7 +4630,10 @@
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
+ "axisSoftMax": 1,
+ "axisSoftMin": 0,
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -3479,7 +4659,9 @@
"mode": "off"
}
},
+ "fieldMinMax": false,
"mappings": [ ],
+ "max": 2,
"thresholds": {
"mode": "absolute",
"steps": [
@@ -3489,17 +4671,17 @@
}
]
},
- "unit": "bytes"
+ "unit": "bool_yes_no"
},
"overrides": [ ]
},
"gridPos": {
"h": 7,
- "w": 12,
- "x": 0,
- "y": 120
+ "w": 9,
+ "x": 3,
+ "y": 2573
},
- "id": 91,
+ "id": 137,
"options": {
"legend": {
"calcs": [ ],
@@ -3508,12 +4690,12 @@
"showLegend": true
},
"tooltip": {
- "maxHeight": 600,
"mode": "single",
"sort": "none"
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -3521,41 +4703,13 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "# series checked per compaction\nhistogram_quantile(\n 0.99, \n sum by (le) (\n rate(loki_bloomcompactor_bytes_per_compaction_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval])\n )\n)",
- "hide": false,
- "instant": false,
- "legendFormat": "p99",
- "range": true,
- "refId": "C"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "# series checked per compaction\nhistogram_quantile(\n 0.9, \n sum by (le) (\n rate(loki_bloomcompactor_bytes_per_compaction_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval])\n )\n)",
- "hide": false,
- "instant": false,
- "legendFormat": "p90",
- "range": true,
- "refId": "B"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "# series checked per compaction\nhistogram_quantile(\n 0.5, \n sum by (le) (\n rate(loki_bloomcompactor_bytes_per_compaction_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval])\n )\n)",
- "hide": false,
- "instant": false,
- "legendFormat": "p50",
+ "expr": "sum by (cluster, namespace) (loki_bloomplanner_retention_running{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"})",
+ "legendFormat": "__auto",
"range": true,
"refId": "A"
}
],
- "title": "Bytes",
+ "title": "Retention running",
"type": "timeseries"
},
{
@@ -3563,6 +4717,7 @@
"type": "prometheus",
"uid": "${datasource}"
},
+ "description": "How much time applying retention took",
"fieldConfig": {
"defaults": {
"color": {
@@ -3575,6 +4730,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -3600,6 +4756,7 @@
"mode": "off"
}
},
+ "fieldMinMax": false,
"mappings": [ ],
"thresholds": {
"mode": "absolute",
@@ -3610,7 +4767,7 @@
}
]
},
- "unit": "bytes"
+ "unit": "dtdurations"
},
"overrides": [ ]
},
@@ -3618,9 +4775,9 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 120
+ "y": 2573
},
- "id": 92,
+ "id": 138,
"options": {
"legend": {
"calcs": [ ],
@@ -3629,12 +4786,12 @@
"showLegend": true
},
"tooltip": {
- "maxHeight": 600,
"mode": "single",
"sort": "none"
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -3642,20 +4799,18 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by (pod) (\n rate(loki_bloomcompactor_bytes_per_compaction_sum{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval])\n /\n rate(loki_bloomcompactor_bytes_per_compaction_count{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval])\n)",
- "hide": false,
- "instant": false,
- "legendFormat": "p99",
+ "expr": "histogram_quantile(0.9, \n sum by (status, le) (\n rate(loki_bloomplanner_retention_time_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"}[$__rate_interval])\n )\n)",
+ "legendFormat": "__auto",
"range": true,
- "refId": "C"
+ "refId": "A"
}
],
- "title": "avg bytes per compaction by pod",
+ "title": "Retention time",
"type": "timeseries"
}
],
"targets": [ ],
- "title": "Data processed",
+ "title": "Retention",
"type": "row"
},
{
@@ -3666,7 +4821,7 @@
"x": 0,
"y": 29
},
- "id": 58,
+ "id": 62,
"panels": [
{
"description": "",
@@ -3675,23 +4830,52 @@
"overrides": [ ]
},
"gridPos": {
- "h": 3,
+ "h": 4,
"w": 24,
"x": 0,
- "y": 82
+ "y": 2581
+ },
+ "id": 71,
+ "options": {
+ "code": {
+ "language": "plaintext",
+ "showLineNumbers": false,
+ "showMiniMap": false
+ },
+ "content": "During the planning phase, the planner downloads the metas and TSDBs to build the plan.\n\nOnce all blocks and metas are built, the builder flushes them to the object store.\n\nAfter each iteration, the planner deletes the metas and blocks marked for deletion in the tombstones.",
+ "mode": "markdown"
+ },
+ "panels": [ ],
+ "pluginVersion": "11.4.0-77663",
+ "targets": [ ],
+ "title": "",
+ "transparent": true,
+ "type": "text"
+ },
+ {
+ "description": "",
+ "fieldConfig": {
+ "defaults": { },
+ "overrides": [ ]
+ },
+ "gridPos": {
+ "h": 13,
+ "w": 2,
+ "x": 0,
+ "y": 2585
},
- "id": 47,
+ "id": 63,
"options": {
"code": {
"language": "plaintext",
"showLineNumbers": false,
"showMiniMap": false
},
- "content": "\nCompactors write blocks to the attached PVs before flushing them into the object store.\nIt also download chunks and index files.\n\nAfter compacting a given tenant, all the downloaded index files and chunks, as well as the already flushed blocks are deleted.",
+ "content": "---\n#### GCS\n",
"mode": "markdown"
},
"panels": [ ],
- "pluginVersion": "11.1.0-69747",
+ "pluginVersion": "11.4.0-77663",
"targets": [ ],
"title": "",
"transparent": true,
@@ -3702,6 +4886,7 @@
"type": "prometheus",
"uid": "${datasource}"
},
+ "description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -3714,8 +4899,9 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
- "fillOpacity": 0,
+ "fillOpacity": 25,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -3736,35 +4922,30 @@
"mode": "none"
},
"thresholdsStyle": {
- "mode": "area"
+ "mode": "off"
}
},
"mappings": [ ],
- "max": 1,
- "min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
- "color": "green"
- },
- {
- "color": "red",
- "value": 0.80000000000000004
+ "color": "green",
+ "value": null
}
]
},
- "unit": "percentunit"
+ "unit": "none"
},
"overrides": [ ]
},
"gridPos": {
- "h": 8,
- "w": 12,
- "x": 0,
- "y": 85
+ "h": 7,
+ "w": 11,
+ "x": 2,
+ "y": 2585
},
- "id": 9,
+ "id": 61,
"options": {
"legend": {
"calcs": [ ],
@@ -3779,6 +4960,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -3786,40 +4968,15 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "quantile(\n 0.99,\n max by(persistentvolumeclaim) (\n kubelet_volume_stats_used_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\"} \n / \n kubelet_volume_stats_capacity_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\"}\n ) \n and \n count by(persistentvolumeclaim) (\n kube_persistentvolumeclaim_labels{cluster=~\"$cluster\", namespace=~\"$namespace\",label_name=~\"bloom-compactor\"}\n )\n)",
- "instant": false,
- "legendFormat": "p99",
- "range": true,
- "refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "quantile(\n 0.90,\n max by(persistentvolumeclaim) (\n kubelet_volume_stats_used_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\"} \n / \n kubelet_volume_stats_capacity_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\"}\n ) \n and \n count by(persistentvolumeclaim) (\n kube_persistentvolumeclaim_labels{cluster=~\"$cluster\", namespace=~\"$namespace\",label_name=~\"bloom-compactor\"}\n )\n)",
+ "expr": "sum by (container, status_code, operation) (rate(loki_gcs_request_duration_seconds_count{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"} [$__rate_interval]))",
"hide": false,
"instant": false,
- "legendFormat": "p90",
+ "legendFormat": "{{operation}} {{status_code}}",
"range": true,
"refId": "B"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "quantile(\n 0.50,\n max by(persistentvolumeclaim) (\n kubelet_volume_stats_used_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\"} \n / \n kubelet_volume_stats_capacity_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\"}\n ) \n and \n count by(persistentvolumeclaim) (\n kube_persistentvolumeclaim_labels{cluster=~\"$cluster\", namespace=~\"$namespace\",label_name=~\"bloom-compactor\"}\n )\n)",
- "hide": false,
- "instant": false,
- "legendFormat": "p50",
- "range": true,
- "refId": "C"
}
],
- "title": "Disk Utilization",
+ "title": "QPS Planner",
"type": "timeseries"
},
{
@@ -3827,6 +4984,7 @@
"type": "prometheus",
"uid": "${datasource}"
},
+ "description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -3839,8 +4997,9 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
- "fillOpacity": 0,
+ "fillOpacity": 25,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -3861,35 +5020,30 @@
"mode": "none"
},
"thresholdsStyle": {
- "mode": "area"
+ "mode": "off"
}
},
"mappings": [ ],
- "max": 1,
- "min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
- "color": "green"
- },
- {
- "color": "red",
- "value": 0.80000000000000004
+ "color": "green",
+ "value": null
}
]
},
- "unit": "percentunit"
+ "unit": "s"
},
"overrides": [ ]
},
"gridPos": {
- "h": 8,
- "w": 12,
- "x": 12,
- "y": 85
+ "h": 7,
+ "w": 11,
+ "x": 13,
+ "y": 2585
},
- "id": 100,
+ "id": 64,
"options": {
"legend": {
"calcs": [ ],
@@ -3904,6 +5058,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -3911,14 +5066,41 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "max by(persistentvolumeclaim) (kubelet_volume_stats_used_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\"} / kubelet_volume_stats_capacity_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\"}) and count by(persistentvolumeclaim) (kube_persistentvolumeclaim_labels{cluster=~\"$cluster\", namespace=~\"$namespace\",label_name=~\"bloom-compactor\"})",
+ "expr": "histogram_quantile(0.99, sum by (operation, le) (rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"} [$__rate_interval])))",
+ "hide": false,
"instant": false,
- "legendFormat": "{{pod}}",
+ "legendFormat": "{{operation}} p99",
"range": true,
- "refId": "A"
+ "refId": "D"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.90, sum by (operation, le) (rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"} [$__rate_interval])))",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "{{operation}} p90",
+ "range": true,
+ "refId": "E"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.50, sum by (operation, le) (rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"} [$__rate_interval])))",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "{{operation}} p50",
+ "range": true,
+ "refId": "F"
}
],
- "title": "Disk Utilization per pod",
+ "title": "Latency Planner",
"type": "timeseries"
},
{
@@ -3926,6 +5108,7 @@
"type": "prometheus",
"uid": "${datasource}"
},
+ "description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -3938,8 +5121,9 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
- "fillOpacity": 0,
+ "fillOpacity": 25,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -3968,25 +5152,22 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
- },
- {
- "color": "red",
- "value": 80
+ "color": "green",
+ "value": null
}
]
},
- "unit": "bytes"
+ "unit": "none"
},
"overrides": [ ]
},
"gridPos": {
"h": 7,
- "w": 12,
- "x": 0,
- "y": 93
+ "w": 11,
+ "x": 2,
+ "y": 2592
},
- "id": 7,
+ "id": 127,
"options": {
"legend": {
"calcs": [ ],
@@ -4001,6 +5182,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -4008,40 +5190,15 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "quantile(\n 0.99,\n sum by(instance, pod, device) (\n rate(node_disk_written_bytes_total[$__rate_interval])\n ) \n + ignoring(pod) group_right() \n (\n label_replace(\n count by(instance, pod, device) (\n container_fs_writes_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"bloom-compactor\", device!~\".*sda.*\"}\n ), \n \"device\", \"$1\", \"device\", \"/dev/(.*)\"\n ) * 0\n )\n)",
- "instant": false,
- "legendFormat": "p99",
- "range": true,
- "refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "quantile(\n 0.90,\n sum by(instance, pod, device) (\n rate(node_disk_written_bytes_total[$__rate_interval])\n ) \n + ignoring(pod) group_right() \n (\n label_replace(\n count by(instance, pod, device) (\n container_fs_writes_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"bloom-compactor\", device!~\".*sda.*\"}\n ), \n \"device\", \"$1\", \"device\", \"/dev/(.*)\"\n ) * 0\n )\n)",
+ "expr": "sum by (container, status_code, operation) (rate(loki_gcs_request_duration_seconds_count{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"} [$__rate_interval]))",
"hide": false,
"instant": false,
- "legendFormat": "p90",
+ "legendFormat": "{{operation}} {{status_code}}",
"range": true,
"refId": "B"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "quantile(\n 0.50,\n sum by(instance, pod, device) (\n rate(node_disk_written_bytes_total[$__rate_interval])\n ) \n + ignoring(pod) group_right() \n (\n label_replace(\n count by(instance, pod, device) (\n container_fs_writes_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"bloom-compactor\", device!~\".*sda.*\"}\n ), \n \"device\", \"$1\", \"device\", \"/dev/(.*)\"\n ) * 0\n )\n)",
- "hide": false,
- "instant": false,
- "legendFormat": "p50",
- "range": true,
- "refId": "C"
}
],
- "title": "Disk Writes",
+ "title": "QPS Builder",
"type": "timeseries"
},
{
@@ -4049,6 +5206,7 @@
"type": "prometheus",
"uid": "${datasource}"
},
+ "description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -4061,8 +5219,9 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
- "fillOpacity": 0,
+ "fillOpacity": 25,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -4091,25 +5250,22 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
- },
- {
- "color": "red",
- "value": 80
+ "color": "green",
+ "value": null
}
]
},
- "unit": "bytes"
+ "unit": "s"
},
"overrides": [ ]
},
"gridPos": {
"h": 7,
- "w": 12,
- "x": 12,
- "y": 93
+ "w": 11,
+ "x": 13,
+ "y": 2592
},
- "id": 101,
+ "id": 128,
"options": {
"legend": {
"calcs": [ ],
@@ -4124,6 +5280,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -4131,21 +5288,78 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by(instance, pod, device) (rate(node_disk_written_bytes_total[$__rate_interval])) + ignoring(pod) group_right() (label_replace(count by(instance, pod, device) (container_fs_writes_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"bloom-compactor\", device!~\".*sda.*\"}), \"device\", \"$1\", \"device\", \"/dev/(.*)\") * 0)",
+ "expr": "histogram_quantile(0.99, sum by (operation, le) (rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"} [$__rate_interval])))",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "{{operation}} p99",
+ "range": true,
+ "refId": "D"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.90, sum by (operation, le) (rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"} [$__rate_interval])))",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "{{operation}} p90",
+ "range": true,
+ "refId": "E"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.50, sum by (operation, le) (rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"} [$__rate_interval])))",
+ "hide": false,
"instant": false,
- "legendFormat": "{{pod}}",
+ "legendFormat": "{{operation}} p50",
"range": true,
- "refId": "A"
+ "refId": "F"
}
],
- "title": "Disk Writes per pod",
+ "title": "Latency Planner",
"type": "timeseries"
},
+ {
+ "description": "",
+ "fieldConfig": {
+ "defaults": { },
+ "overrides": [ ]
+ },
+ "gridPos": {
+ "h": 14,
+ "w": 2,
+ "x": 0,
+ "y": 2598
+ },
+ "id": 65,
+ "options": {
+ "code": {
+ "language": "plaintext",
+ "showLineNumbers": false,
+ "showMiniMap": false
+ },
+ "content": "---\n#### S3\n",
+ "mode": "markdown"
+ },
+ "panels": [ ],
+ "pluginVersion": "11.4.0-77663",
+ "targets": [ ],
+ "title": "",
+ "transparent": true,
+ "type": "text"
+ },
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
+ "description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -4158,8 +5372,9 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
- "fillOpacity": 0,
+ "fillOpacity": 25,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -4188,25 +5403,22 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
- },
- {
- "color": "red",
- "value": 80
+ "color": "green",
+ "value": null
}
]
},
- "unit": "bytes"
+ "unit": "none"
},
"overrides": [ ]
},
"gridPos": {
- "h": 8,
- "w": 12,
- "x": 0,
- "y": 100
+ "h": 7,
+ "w": 11,
+ "x": 2,
+ "y": 2599
},
- "id": 8,
+ "id": 67,
"options": {
"legend": {
"calcs": [ ],
@@ -4221,6 +5433,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -4228,40 +5441,15 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "quantile(\n 0.99,\n sum by(instance, pod, device) (\n rate(node_disk_read_bytes_total[$__rate_interval])\n ) + ignoring(pod) group_right()\n (\n label_replace(\n count by(instance, pod, device) (\n container_fs_writes_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"bloom-compactor\", device!~\".*sda.*\"}\n ), \n \"device\", \"$1\", \"device\", \"/dev/(.*)\"\n ) * 0\n )\n)",
- "instant": false,
- "legendFormat": "p99",
- "range": true,
- "refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "quantile(\n 0.90,\n sum by(instance, pod, device) (\n rate(node_disk_read_bytes_total[$__rate_interval])\n ) + ignoring(pod) group_right()\n (\n label_replace(\n count by(instance, pod, device) (\n container_fs_writes_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"bloom-compactor\", device!~\".*sda.*\"}\n ), \n \"device\", \"$1\", \"device\", \"/dev/(.*)\"\n ) * 0\n )\n)",
+ "expr": "sum by (status_code, operation) (rate(loki_s3_request_duration_seconds_count{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"} [$__rate_interval]))",
"hide": false,
"instant": false,
- "legendFormat": "p90",
+ "legendFormat": "{{operation}} {{status_code}}",
"range": true,
"refId": "B"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "quantile(\n 0.50,\n sum by(instance, pod, device) (\n rate(node_disk_read_bytes_total[$__rate_interval])\n ) + ignoring(pod) group_right()\n (\n label_replace(\n count by(instance, pod, device) (\n container_fs_writes_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"bloom-compactor\", device!~\".*sda.*\"}\n ), \n \"device\", \"$1\", \"device\", \"/dev/(.*)\"\n ) * 0\n )\n)",
- "hide": false,
- "instant": false,
- "legendFormat": "p50",
- "range": true,
- "refId": "C"
}
],
- "title": "Disk Reads",
+ "title": "QPS Planner",
"type": "timeseries"
},
{
@@ -4269,6 +5457,7 @@
"type": "prometheus",
"uid": "${datasource}"
},
+ "description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -4281,8 +5470,9 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
- "fillOpacity": 0,
+ "fillOpacity": 25,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -4311,25 +5501,22 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
- },
- {
- "color": "red",
- "value": 80
+ "color": "green",
+ "value": null
}
]
},
- "unit": "bytes"
+ "unit": "s"
},
"overrides": [ ]
},
"gridPos": {
- "h": 8,
- "w": 12,
- "x": 12,
- "y": 100
+ "h": 7,
+ "w": 11,
+ "x": 13,
+ "y": 2599
},
- "id": 102,
+ "id": 69,
"options": {
"legend": {
"calcs": [ ],
@@ -4344,6 +5531,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -4351,88 +5539,42 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by(instance, pod, device) (rate(node_disk_read_bytes_total[$__rate_interval])) + ignoring(pod) group_right() (label_replace(count by(instance, pod, device) (container_fs_writes_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"bloom-compactor\", device!~\".*sda.*\"}), \"device\", \"$1\", \"device\", \"/dev/(.*)\") * 0)",
+ "expr": "histogram_quantile(0.99, sum by (operation, le) (rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"} [$__rate_interval])))",
+ "hide": false,
"instant": false,
- "legendFormat": "{{pod}}",
+ "legendFormat": "{{operation}} p99",
"range": true,
- "refId": "A"
+ "refId": "D"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.90, sum by (operation, le) (rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"} [$__rate_interval])))",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "{{operation}} p90",
+ "range": true,
+ "refId": "E"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.50, sum by (operation, le) (rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-planner\", job=\"\"} [$__rate_interval])))",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "{{operation}} p50",
+ "range": true,
+ "refId": "F"
}
],
- "title": "Disk Reads per pod",
+ "title": "Latency Planner",
"type": "timeseries"
- }
- ],
- "targets": [ ],
- "title": "Disk Usage",
- "type": "row"
- },
- {
- "collapsed": true,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 30
- },
- "id": 62,
- "panels": [
- {
- "description": "",
- "fieldConfig": {
- "defaults": { },
- "overrides": [ ]
- },
- "gridPos": {
- "h": 3,
- "w": 24,
- "x": 0,
- "y": 83
- },
- "id": 71,
- "options": {
- "code": {
- "language": "plaintext",
- "showLineNumbers": false,
- "showMiniMap": false
- },
- "content": "Once all blocks and metas are built locally, the compactor flushes them to the object store.\n\nAfter each iteration, the compactor deletes the metas and blocks marked for deletion in the tombstones.",
- "mode": "markdown"
- },
- "panels": [ ],
- "pluginVersion": "11.1.0-69747",
- "targets": [ ],
- "title": "",
- "transparent": true,
- "type": "text"
- },
- {
- "description": "",
- "fieldConfig": {
- "defaults": { },
- "overrides": [ ]
- },
- "gridPos": {
- "h": 7,
- "w": 2,
- "x": 0,
- "y": 86
- },
- "id": 63,
- "options": {
- "code": {
- "language": "plaintext",
- "showLineNumbers": false,
- "showMiniMap": false
- },
- "content": "---\n#### GCS\n",
- "mode": "markdown"
- },
- "panels": [ ],
- "pluginVersion": "11.1.0-69747",
- "targets": [ ],
- "title": "",
- "transparent": true,
- "type": "text"
},
{
"datasource": {
@@ -4452,6 +5594,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 25,
"gradientMode": "none",
@@ -4482,7 +5625,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
}
]
},
@@ -4494,9 +5638,9 @@
"h": 7,
"w": 11,
"x": 2,
- "y": 86
+ "y": 2606
},
- "id": 61,
+ "id": 129,
"options": {
"legend": {
"calcs": [ ],
@@ -4511,6 +5655,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -4518,7 +5663,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by (status_code, operation) (rate(loki_gcs_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"} [$__rate_interval]))",
+ "expr": "sum by (status_code, operation) (rate(loki_s3_request_duration_seconds_count{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"} [$__rate_interval]))",
"hide": false,
"instant": false,
"legendFormat": "{{operation}} {{status_code}}",
@@ -4526,7 +5671,7 @@
"refId": "B"
}
],
- "title": "QPS",
+ "title": "QPS Builder",
"type": "timeseries"
},
{
@@ -4547,6 +5692,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 25,
"gradientMode": "none",
@@ -4577,7 +5723,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
}
]
},
@@ -4589,9 +5736,9 @@
"h": 7,
"w": 11,
"x": 13,
- "y": 86
+ "y": 2606
},
- "id": 64,
+ "id": 130,
"options": {
"legend": {
"calcs": [ ],
@@ -4606,6 +5753,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -4613,7 +5761,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.99, sum by (operation, le) (rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"} [$__rate_interval])))",
+ "expr": "histogram_quantile(0.99, sum by (operation, le) (rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"} [$__rate_interval])))",
"hide": false,
"instant": false,
"legendFormat": "{{operation}} p99",
@@ -4626,7 +5774,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.90, sum by (operation, le) (rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"} [$__rate_interval])))",
+ "expr": "histogram_quantile(0.90, sum by (operation, le) (rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"} [$__rate_interval])))",
"hide": false,
"instant": false,
"legendFormat": "{{operation}} p90",
@@ -4639,7 +5787,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.50, sum by (operation, le) (rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"} [$__rate_interval])))",
+ "expr": "histogram_quantile(0.50, sum by (operation, le) (rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"} [$__rate_interval])))",
"hide": false,
"instant": false,
"legendFormat": "{{operation}} p50",
@@ -4647,7 +5795,7 @@
"refId": "F"
}
],
- "title": "Latency",
+ "title": "Latency Builder",
"type": "timeseries"
},
{
@@ -4660,20 +5808,20 @@
"h": 7,
"w": 2,
"x": 0,
- "y": 93
+ "y": 2612
},
- "id": 65,
+ "id": 66,
"options": {
"code": {
"language": "plaintext",
"showLineNumbers": false,
"showMiniMap": false
},
- "content": "---\n#### S3\n",
+ "content": "---\n#### Azure\nBlob Storage",
"mode": "markdown"
},
"panels": [ ],
- "pluginVersion": "11.1.0-69747",
+ "pluginVersion": "11.4.0-77663",
"targets": [ ],
"title": "",
"transparent": true,
@@ -4697,6 +5845,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 25,
"gradientMode": "none",
@@ -4727,7 +5876,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
}
]
},
@@ -4739,9 +5889,9 @@
"h": 7,
"w": 11,
"x": 2,
- "y": 93
+ "y": 2613
},
- "id": 67,
+ "id": 68,
"options": {
"legend": {
"calcs": [ ],
@@ -4756,6 +5906,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -4763,7 +5914,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by (status_code, operation) (rate(loki_s3_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"} [$__rate_interval]))",
+ "expr": "sum by (status_code, operation) (rate(loki_azure_blob_request_duration_seconds_count{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"} [$__rate_interval]))",
"hide": false,
"instant": false,
"legendFormat": "{{operation}} {{status_code}}",
@@ -4771,7 +5922,7 @@
"refId": "B"
}
],
- "title": "QPS",
+ "title": "QPS Planner",
"type": "timeseries"
},
{
@@ -4792,6 +5943,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 25,
"gradientMode": "none",
@@ -4822,7 +5974,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
}
]
},
@@ -4834,9 +5987,9 @@
"h": 7,
"w": 11,
"x": 13,
- "y": 93
+ "y": 2613
},
- "id": 69,
+ "id": 70,
"options": {
"legend": {
"calcs": [ ],
@@ -4851,6 +6004,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -4858,7 +6012,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.99, sum by (operation, le) (rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"} [$__rate_interval])))",
+ "expr": "histogram_quantile(0.99, sum by (operation, le) (rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"} [$__rate_interval])))",
"hide": false,
"instant": false,
"legendFormat": "{{operation}} p99",
@@ -4871,7 +6025,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.90, sum by (operation, le) (rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"} [$__rate_interval])))",
+ "expr": "histogram_quantile(0.90, sum by (operation, le) (rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"} [$__rate_interval])))",
"hide": false,
"instant": false,
"legendFormat": "{{operation}} p90",
@@ -4884,7 +6038,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.50, sum by (operation, le) (rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"} [$__rate_interval])))",
+ "expr": "histogram_quantile(0.50, sum by (operation, le) (rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"} [$__rate_interval])))",
"hide": false,
"instant": false,
"legendFormat": "{{operation}} p50",
@@ -4892,38 +6046,9 @@
"refId": "F"
}
],
- "title": "Latency",
+ "title": "Latency Planner",
"type": "timeseries"
},
- {
- "description": "",
- "fieldConfig": {
- "defaults": { },
- "overrides": [ ]
- },
- "gridPos": {
- "h": 7,
- "w": 2,
- "x": 0,
- "y": 100
- },
- "id": 66,
- "options": {
- "code": {
- "language": "plaintext",
- "showLineNumbers": false,
- "showMiniMap": false
- },
- "content": "---\n#### Azure\nBlob Storage",
- "mode": "markdown"
- },
- "panels": [ ],
- "pluginVersion": "11.1.0-69747",
- "targets": [ ],
- "title": "",
- "transparent": true,
- "type": "text"
- },
{
"datasource": {
"type": "prometheus",
@@ -4942,6 +6067,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 25,
"gradientMode": "none",
@@ -4972,7 +6098,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
}
]
},
@@ -4984,9 +6111,9 @@
"h": 7,
"w": 11,
"x": 2,
- "y": 100
+ "y": 2620
},
- "id": 68,
+ "id": 131,
"options": {
"legend": {
"calcs": [ ],
@@ -5001,6 +6128,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -5008,7 +6136,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by (status_code, operation) (rate(loki_azure_blob_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"} [$__rate_interval]))",
+ "expr": "sum by (status_code, operation) (rate(loki_azure_blob_request_duration_seconds_count{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"} [$__rate_interval]))",
"hide": false,
"instant": false,
"legendFormat": "{{operation}} {{status_code}}",
@@ -5016,7 +6144,7 @@
"refId": "B"
}
],
- "title": "QPS",
+ "title": "QPS Builder",
"type": "timeseries"
},
{
@@ -5037,6 +6165,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 25,
"gradientMode": "none",
@@ -5067,7 +6196,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
}
]
},
@@ -5079,9 +6209,9 @@
"h": 7,
"w": 11,
"x": 13,
- "y": 100
+ "y": 2620
},
- "id": 70,
+ "id": 132,
"options": {
"legend": {
"calcs": [ ],
@@ -5096,6 +6226,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -5103,7 +6234,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.99, sum by (operation, le) (rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"} [$__rate_interval])))",
+ "expr": "histogram_quantile(0.99, sum by (operation, le) (rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"} [$__rate_interval])))",
"hide": false,
"instant": false,
"legendFormat": "{{operation}} p99",
@@ -5116,7 +6247,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.90, sum by (operation, le) (rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"} [$__rate_interval])))",
+ "expr": "histogram_quantile(0.90, sum by (operation, le) (rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"} [$__rate_interval])))",
"hide": false,
"instant": false,
"legendFormat": "{{operation}} p90",
@@ -5129,7 +6260,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.50, sum by (operation, le) (rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"} [$__rate_interval])))",
+ "expr": "histogram_quantile(0.50, sum by (operation, le) (rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"} [$__rate_interval])))",
"hide": false,
"instant": false,
"legendFormat": "{{operation}} p50",
@@ -5137,7 +6268,7 @@
"refId": "F"
}
],
- "title": "Latency",
+ "title": "Latency Builder",
"type": "timeseries"
}
],
@@ -5146,6 +6277,7 @@
"type": "row"
}
],
+ "preload": false,
"refresh": "10s",
"rows": [ ],
"schemaVersion": 14,
@@ -5224,6 +6356,26 @@
"refresh": 1,
"regex": "",
"type": "datasource"
+ },
+ {
+ "allValue": ".+",
+ "current": { },
+ "datasource": "$datasource",
+ "hide": 0,
+ "includeAll": true,
+ "label": "Tenant",
+ "multi": false,
+ "name": "tenant",
+ "options": [ ],
+ "query": "label_values(loki_bloomplanner_tenant_tasks_planned{cluster=\"$cluster\", namespace=\"$namespace\"}, tenant)",
+ "refresh": 0,
+ "regex": "",
+ "sort": 3,
+ "tagValuesQuery": "",
+ "tags": [ ],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
}
]
},
@@ -5231,7 +6383,6 @@
"from": "now-1h",
"to": "now"
},
- "timeRangeUpdatedDuringEditOrView": false,
"timepicker": {
"refresh_intervals": [
"5s",
@@ -5258,8 +6409,8 @@
]
},
"timezone": "utc",
- "title": "Loki / Bloom Compactor",
- "uid": "bloom-compactor",
+ "title": "Loki / Bloom Build",
+ "uid": "bloom-build",
"version": 0,
"weekStart": ""
}
\ No newline at end of file
diff --git a/production/loki-mixin-compiled-ssd/dashboards/loki-bloom-gateway.json b/production/loki-mixin-compiled-ssd/dashboards/loki-bloom-gateway.json
index 2d5e16a9d7e0f..0deb5e33b5d7e 100644
--- a/production/loki-mixin-compiled-ssd/dashboards/loki-bloom-gateway.json
+++ b/production/loki-mixin-compiled-ssd/dashboards/loki-bloom-gateway.json
@@ -21,7 +21,6 @@
"type": "dashboards"
}
],
- "liveNow": false,
"panels": [
{
"collapsed": false,
@@ -46,8 +45,7 @@
"fieldConfig": {
"defaults": {
"color": {
- "mode": "thresholds",
- "seriesBy": "last"
+ "mode": "thresholds"
},
"custom": {
"axisBorderShow": false,
@@ -56,9 +54,10 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
- "gradientMode": "none",
+ "gradientMode": "scheme",
"hideFrom": {
"legend": false,
"tooltip": false,
@@ -69,7 +68,7 @@
"lineStyle": {
"fill": "solid"
},
- "lineWidth": 2,
+ "lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
@@ -114,7 +113,7 @@
},
"gridPos": {
"h": 6,
- "w": 6,
+ "w": 12,
"x": 0,
"y": 1
},
@@ -133,6 +132,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77765",
"targets": [
{
"datasource": {
@@ -140,7 +140,8 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum(rate(loki_bloom_gateway_filtered_chunks_sum{job=\"$namespace/bloom-gateway\"}[$__rate_interval]))\n/\nsum(rate(loki_bloom_gateway_requested_chunks_sum{job=\"$namespace/bloom-gateway\"}[$__rate_interval]))",
+ "expr": "sum(rate(loki_bloom_gateway_filtered_chunks_sum{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"}[$__rate_interval]))\n/\nsum(rate(loki_bloom_gateway_requested_chunks_sum{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"}[$__rate_interval]))",
+ "hide": false,
"instant": false,
"legendFormat": "Chunks",
"range": true,
@@ -153,7 +154,7 @@
},
"editorMode": "code",
"exemplar": false,
- "expr": "sum(rate(loki_bloom_gateway_filtered_series_sum{job=\"$namespace/bloom-gateway\"}[$__rate_interval]))\n/\nsum(rate(loki_bloom_gateway_requested_series_sum{job=\"$namespace/bloom-gateway\"}[$__rate_interval]))",
+ "expr": "sum(rate(loki_bloom_gateway_filtered_series_sum{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"}[$__rate_interval]))\n/\nsum(rate(loki_bloom_gateway_requested_series_sum{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"}[$__rate_interval]))",
"hide": false,
"instant": false,
"legendFormat": "Series",
@@ -161,7 +162,7 @@
"refId": "B"
}
],
- "title": "Filter ratio",
+ "title": "Filter ratio - Bloom Gateway (server)",
"type": "timeseries"
},
{
@@ -206,7 +207,7 @@
"gridPos": {
"h": 6,
"w": 6,
- "x": 6,
+ "x": 12,
"y": 1
},
"id": 75,
@@ -226,7 +227,7 @@
"sizing": "auto"
},
"panels": [ ],
- "pluginVersion": "11.1.0-70005",
+ "pluginVersion": "11.4.0-77765",
"targets": [
{
"datasource": {
@@ -235,7 +236,7 @@
},
"editorMode": "code",
"exemplar": false,
- "expr": "sum(rate(loki_bloom_gateway_filtered_chunks_sum{job=\"$namespace/bloom-gateway\"}[$__rate_interval]))\n/\nsum(rate(loki_bloom_gateway_requested_chunks_sum{job=\"$namespace/bloom-gateway\"}[$__rate_interval]))",
+ "expr": "sum(increase(loki_bloom_gateway_filtered_chunks_sum{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"}[$__range]))\n/\nsum(increase(loki_bloom_gateway_requested_chunks_sum{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"}[$__range]))",
"instant": true,
"legendFormat": "Chunks",
"range": false,
@@ -248,40 +249,12 @@
},
"editorMode": "code",
"exemplar": false,
- "expr": "sum(rate(loki_bloom_gateway_filtered_series_sum{job=\"$namespace/bloom-gateway\"}[$__rate_interval]))\n/\nsum(rate(loki_bloom_gateway_requested_series_sum{job=\"$namespace/bloom-gateway\"}[$__rate_interval]))",
+ "expr": "sum(increase(loki_bloom_gateway_filtered_series_sum{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"}[$__range]))\n/\nsum(increase(loki_bloom_gateway_requested_series_sum{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"}[$__range]))",
"hide": false,
"instant": true,
"legendFormat": "Series",
"range": false,
"refId": "B"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "exemplar": false,
- "expr": "sum(loki_bloom_gateway_filtered_chunks_sum{job=\"$namespace/bloom-gateway\"})\n/\nsum(loki_bloom_gateway_requested_chunks_sum{job=\"$namespace/bloom-gateway\"})",
- "hide": true,
- "instant": true,
- "legendFormat": "Chunks avg",
- "range": false,
- "refId": "C"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "exemplar": false,
- "expr": "sum(loki_bloom_gateway_filtered_series_sum{job=\"$namespace/bloom-gateway\"})\n/\nsum(loki_bloom_gateway_requested_series_sum{job=\"$namespace/bloom-gateway\"})",
- "hide": true,
- "instant": true,
- "legendFormat": "Series avg",
- "range": false,
- "refId": "D"
}
],
"title": "Filter ratio",
@@ -305,6 +278,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
@@ -381,7 +355,7 @@
"gridPos": {
"h": 6,
"w": 6,
- "x": 12,
+ "x": 18,
"y": 1
},
"id": 72,
@@ -399,6 +373,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77765",
"targets": [
{
"datasource": {
@@ -413,19 +388,6 @@
"range": true,
"refId": "D"
},
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "sum(kube_pod_container_status_ready{container=\"bloom-gateway\", cluster=\"$cluster\", namespace=\"$namespace\"})",
- "hide": true,
- "instant": false,
- "legendFormat": "Running",
- "range": true,
- "refId": "A"
- },
{
"datasource": {
"type": "prometheus",
@@ -438,19 +400,6 @@
"legendFormat": "Desired",
"range": true,
"refId": "B"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "kube_statefulset_status_replicas_available{cluster=\"$cluster\", namespace=\"$namespace\", statefulset=\"bloom-gateway\"}",
- "hide": true,
- "instant": false,
- "legendFormat": "Available",
- "range": true,
- "refId": "C"
}
],
"title": "Readiness",
@@ -461,11 +410,11 @@
"type": "prometheus",
"uid": "${datasource}"
},
- "description": "",
+ "description": "Percentage of chunks that are filtered by using bloom filters",
"fieldConfig": {
"defaults": {
"color": {
- "mode": "palette-classic"
+ "mode": "thresholds"
},
"custom": {
"axisBorderShow": false,
@@ -474,9 +423,10 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
- "fillOpacity": 50,
- "gradientMode": "none",
+ "fillOpacity": 0,
+ "gradientMode": "scheme",
"hideFrom": {
"legend": false,
"tooltip": false,
@@ -499,34 +449,44 @@
"mode": "none"
},
"thresholdsStyle": {
- "mode": "off"
+ "mode": "area"
}
},
"mappings": [ ],
+ "max": 1,
+ "min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
- "color": "green",
+ "color": "red",
"value": null
},
{
- "color": "red",
- "value": 80
+ "color": "orange",
+ "value": 0.5
+ },
+ {
+ "color": "yellow",
+ "value": 0.75
+ },
+ {
+ "color": "green",
+ "value": 0.90000000000000002
}
]
},
- "unit": "none"
+ "unit": "percentunit"
},
"overrides": [ ]
},
"gridPos": {
"h": 6,
- "w": 6,
- "x": 18,
- "y": 1
+ "w": 12,
+ "x": 0,
+ "y": 7
},
- "id": 37,
+ "id": 93,
"options": {
"legend": {
"calcs": [ ],
@@ -541,6 +501,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77765",
"targets": [
{
"datasource": {
@@ -548,77 +509,135 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "(\n max by (pod, reason) (kube_pod_container_status_last_terminated_reason{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"})\n * on (pod) group_left\n sum by (pod) (increase(kube_pod_container_status_restarts_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"}[$__rate_interval]))\n) > 0",
+ "expr": "sum(rate(loki_bloom_gateway_querier_chunks_filtered_total{cluster=\"$cluster\", job=\"$namespace/index-gateway\"}[$__rate_interval]))\n/\nsum(rate(loki_bloom_gateway_querier_chunks_total{cluster=\"$cluster\", job=\"$namespace/index-gateway\"}[$__rate_interval]))",
+ "instant": false,
+ "legendFormat": "Chunks",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "sum(rate(loki_bloom_gateway_querier_series_filtered_total{cluster=\"$cluster\", job=\"$namespace/index-gateway\"}[$__rate_interval]))\n/\nsum(rate(loki_bloom_gateway_querier_series_total{cluster=\"$cluster\", job=\"$namespace/index-gateway\"}[$__rate_interval]))",
"hide": false,
"instant": false,
- "interval": "",
- "legendFormat": "{{pod}} ({{reason}})",
+ "legendFormat": "Series",
"range": true,
- "refId": "C"
+ "refId": "B"
}
],
- "title": "Container restarts",
+ "title": "Filter ratio - Index Gateway (client)",
"type": "timeseries"
},
{
"datasource": {
- "type": "loki",
- "uid": "${loki_datasource}"
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "description": "Percentage of chunks that are filtered by using bloom filters",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "mappings": [ ],
+ "max": 1,
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "red",
+ "value": null
+ },
+ {
+ "color": "orange",
+ "value": 0.5
+ },
+ {
+ "color": "yellow",
+ "value": 0.75
+ },
+ {
+ "color": "green",
+ "value": 0.90000000000000002
+ }
+ ]
+ },
+ "unit": "percentunit"
+ },
+ "overrides": [ ]
},
"gridPos": {
- "h": 9,
- "w": 15,
- "x": 0,
+ "h": 6,
+ "w": 6,
+ "x": 12,
"y": 7
},
- "id": 48,
+ "id": 94,
"options": {
- "dedupStrategy": "none",
- "enableLogDetails": true,
- "prettifyLogMessage": false,
- "showCommonLabels": false,
- "showLabels": false,
- "showTime": false,
- "sortOrder": "Descending",
- "wrapLogMessage": true
+ "minVizHeight": 75,
+ "minVizWidth": 75,
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "showThresholdLabels": false,
+ "showThresholdMarkers": true,
+ "sizing": "auto"
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77765",
"targets": [
{
"datasource": {
- "type": "loki",
- "uid": "${loki_datasource}"
+ "type": "prometheus",
+ "uid": "${datasource}"
},
"editorMode": "code",
- "expr": "{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} |= \"level=error\" or \"panic:\" | logfmt",
- "queryType": "range",
+ "exemplar": false,
+ "expr": "sum(increase(loki_bloom_gateway_querier_chunks_filtered_total{cluster=\"$cluster\", job=\"$namespace/index-gateway\"}[$__range]))\n/\nsum(increase(loki_bloom_gateway_querier_chunks_total{cluster=\"$cluster\", job=\"$namespace/index-gateway\"}[$__range]))",
+ "instant": true,
+ "legendFormat": "Chunks",
+ "range": false,
"refId": "A"
},
{
"datasource": {
- "type": "loki",
- "uid": "${loki_datasource}"
+ "type": "prometheus",
+ "uid": "${datasource}"
},
"editorMode": "code",
- "expr": "{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} |= \"level=warn\" | logfmt",
- "hide": true,
- "queryType": "range",
+ "exemplar": false,
+ "expr": "sum(increase(loki_bloom_gateway_querier_series_filtered_total{cluster=\"$cluster\", job=\"$namespace/index-gateway\"}[$__range]))\n/\nsum(increase(loki_bloom_gateway_querier_series_total{cluster=\"$cluster\", job=\"$namespace/index-gateway\"}[$__range]))",
+ "hide": false,
+ "instant": true,
+ "legendFormat": "Series",
+ "range": false,
"refId": "B"
}
],
- "title": "Errors",
- "type": "logs"
+ "title": "Filter ratio",
+ "type": "gauge"
},
{
"datasource": {
- "type": "loki",
- "uid": "${loki_datasource}"
+ "type": "prometheus",
+ "uid": "${datasource}"
},
+ "description": "",
"fieldConfig": {
"defaults": {
"color": {
- "fixedColor": "red",
- "mode": "fixed"
+ "mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
@@ -627,8 +646,9 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
- "drawStyle": "bars",
- "fillOpacity": 0,
+ "barWidthFactor": 0.59999999999999998,
+ "drawStyle": "line",
+ "fillOpacity": 50,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -637,11 +657,13 @@
},
"insertNulls": false,
"lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
- "log": 2,
- "type": "symlog"
+ "type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
@@ -663,66 +685,21 @@
},
{
"color": "red",
- "value": 1
- }
- ]
- }
- },
- "overrides": [
- {
- "matcher": {
- "id": "byName",
- "options": "warn"
- },
- "properties": [
- {
- "id": "color",
- "value": {
- "fixedColor": "orange",
- "mode": "fixed"
- }
- }
- ]
- },
- {
- "matcher": {
- "id": "byName",
- "options": "error"
- },
- "properties": [
- {
- "id": "color",
- "value": {
- "fixedColor": "red",
- "mode": "fixed"
- }
+ "value": 80
}
]
},
- {
- "matcher": {
- "id": "byName",
- "options": "panic"
- },
- "properties": [
- {
- "id": "color",
- "value": {
- "fixedColor": "semi-dark-red",
- "mode": "fixed"
- }
- }
- ]
- }
- ]
+ "unit": "none"
+ },
+ "overrides": [ ]
},
"gridPos": {
- "h": 9,
- "w": 9,
- "x": 15,
+ "h": 6,
+ "w": 6,
+ "x": 18,
"y": 7
},
- "id": 52,
+ "id": 37,
"options": {
"legend": {
"calcs": [ ],
@@ -737,42 +714,935 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77765",
"targets": [
{
"datasource": {
- "type": "loki",
- "uid": "${loki_datasource}"
- },
- "editorMode": "code",
- "expr": "sum by (level) (count_over_time({cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} |~ \"level=(warn|error)\" | logfmt [$__auto]))",
- "legendFormat": "{{ level }}",
- "queryType": "range",
- "refId": "A"
- },
- {
- "datasource": {
- "type": "loki",
- "uid": "${loki_datasource}"
+ "type": "prometheus",
+ "uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum (count_over_time({cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} |= \"panic:\" | logfmt [$__auto]))",
+ "expr": "(\n max by (pod, reason) (kube_pod_container_status_last_terminated_reason{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"})\n * on (pod) group_left\n sum by (pod) (increase(kube_pod_container_status_restarts_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"}[$__rate_interval]))\n) > 0",
"hide": false,
- "legendFormat": "panic",
- "queryType": "range",
- "refId": "B"
+ "instant": false,
+ "interval": "",
+ "legendFormat": "{{pod}} ({{reason}})",
+ "range": true,
+ "refId": "C"
}
],
- "title": "Errors Rate",
+ "title": "Container restarts",
"type": "timeseries"
},
{
- "collapsed": true,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 16
- },
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "description": "Percentage of chunks that are filtered by using bloom filters",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "scheme",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "area"
+ }
+ },
+ "mappings": [ ],
+ "max": 1,
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "red",
+ "value": null
+ },
+ {
+ "color": "orange",
+ "value": 0.5
+ },
+ {
+ "color": "yellow",
+ "value": 0.75
+ },
+ {
+ "color": "green",
+ "value": 0.90000000000000002
+ }
+ ]
+ },
+ "unit": "percentunit"
+ },
+ "overrides": [ ]
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 12,
+ "x": 0,
+ "y": 13
+ },
+ "id": 99,
+ "options": {
+ "legend": {
+ "calcs": [ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "panels": [ ],
+ "pluginVersion": "11.4.0-77765",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "(\n sum(rate(loki_index_gateway_prefilter_chunks_sum{cluster=\"$cluster\", job=\"$namespace/index-gateway\"}[$__rate_interval])) by (route)\n -\n sum(rate(loki_index_gateway_postfilter_chunks_sum{cluster=\"$cluster\", job=\"$namespace/index-gateway\"}[$__rate_interval])) by (route)\n)\n/\nsum(rate(loki_index_gateway_prefilter_chunks_sum{cluster=\"$cluster\", job=\"$namespace/index-gateway\"}[$__rate_interval])) by (route)",
+ "instant": false,
+ "legendFormat": "chunks {{ route}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Filter ratio - Index Gateway by route",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "scheme",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "area"
+ }
+ },
+ "mappings": [ ],
+ "max": 1,
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "#EAB839",
+ "value": 0.10000000000000001
+ },
+ {
+ "color": "#EF843C",
+ "value": 0.25
+ },
+ {
+ "color": "red",
+ "value": 0.5
+ }
+ ]
+ },
+ "unit": "percentunit"
+ },
+ "overrides": [ ]
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 12,
+ "x": 12,
+ "y": 13
+ },
+ "id": 100,
+ "options": {
+ "legend": {
+ "calcs": [ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "panels": [ ],
+ "pluginVersion": "11.4.0-77765",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "sum(rate(loki_bloom_gateway_querier_series_skipped_total{cluster=\"$cluster\", job=\"$namespace/index-gateway\"}[$__rate_interval]))\n/\nsum(rate(loki_bloom_gateway_querier_series_total{cluster=\"$cluster\", job=\"$namespace/index-gateway\"}[$__rate_interval]))",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "series",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "sum(rate(loki_bloom_gateway_querier_chunks_skipped_total{cluster=\"$cluster\", job=\"$namespace/index-gateway\"}[$__rate_interval]))\n/\nsum(rate(loki_bloom_gateway_querier_chunks_total{cluster=\"$cluster\", job=\"$namespace/index-gateway\"}[$__rate_interval]))",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "chunks",
+ "range": true,
+ "refId": "B"
+ }
+ ],
+ "title": "Data skipped because they don't match any bocks",
+ "type": "timeseries"
+ },
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 19
+ },
+ "id": 96,
+ "panels": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
+ "drawStyle": "line",
+ "fillOpacity": 50,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "percent"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "percentunit"
+ },
+ "overrides": [ ]
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 447
+ },
+ "id": 97,
+ "options": {
+ "legend": {
+ "calcs": [ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "panels": [ ],
+ "pluginVersion": "11.4.0-77765",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "000000134"
+ },
+ "editorMode": "code",
+ "expr": "sum(rate(loki_bloom_recorder_chunks_total{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\", type=~\"(found|skipped|missed)\"}[$__rate_interval])) by (type)\n/ on () group_left\nsum(rate(loki_bloom_recorder_chunks_total{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\", type=\"requested\"}[$__rate_interval]))",
+ "instant": false,
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "000000134"
+ },
+ "editorMode": "code",
+ "expr": "sum(rate(loki_bloom_recorder_chunks_total{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\", type=\"filtered\"}[$__rate_interval])) by (type)\n/ on () group_left\nsum(rate(loki_bloom_recorder_chunks_total{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\", type=\"requested\"}[$__rate_interval]))",
+ "hide": true,
+ "instant": false,
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "B"
+ }
+ ],
+ "title": "Found/Skipped/Missing chunks",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
+ "drawStyle": "line",
+ "fillOpacity": 50,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [ ],
+ "max": 1,
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "percentunit"
+ },
+ "overrides": [ ]
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 12,
+ "y": 447
+ },
+ "id": 98,
+ "options": {
+ "legend": {
+ "calcs": [ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "panels": [ ],
+ "pluginVersion": "11.4.0-77765",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "000000134"
+ },
+ "editorMode": "code",
+ "expr": "sum(rate(loki_bloom_recorder_chunks_total{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\", type=\"filtered\"}[$__rate_interval])) by (type)\n/ on () group_left\nsum(rate(loki_bloom_recorder_chunks_total{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\", type=\"found\"}[$__rate_interval]))",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "B"
+ }
+ ],
+ "title": "Filtered chunks",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
+ "drawStyle": "line",
+ "fillOpacity": 50,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "percent"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "percentunit"
+ },
+ "overrides": [ ]
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 454
+ },
+ "id": 107,
+ "options": {
+ "legend": {
+ "calcs": [ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "panels": [ ],
+ "pluginVersion": "11.4.0-77765",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "000000134"
+ },
+ "editorMode": "code",
+ "expr": "sum(rate(loki_bloom_recorder_series_total{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\", type=~\"(found|skipped|missed)\"}[$__rate_interval])) by (type)\n/ on () group_left\nsum(rate(loki_bloom_recorder_series_total{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\", type=\"requested\"}[$__rate_interval]))",
+ "instant": false,
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Found/Skipped/Missing series",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
+ "drawStyle": "line",
+ "fillOpacity": 50,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [ ],
+ "max": 1,
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "percentunit"
+ },
+ "overrides": [ ]
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 12,
+ "y": 454
+ },
+ "id": 108,
+ "options": {
+ "legend": {
+ "calcs": [ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "panels": [ ],
+ "pluginVersion": "11.4.0-77765",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "000000134"
+ },
+ "editorMode": "code",
+ "expr": "sum(rate(loki_bloom_recorder_series_total{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\", type=\"filtered\"}[$__rate_interval])) by (type)\n/ on () group_left\nsum(rate(loki_bloom_recorder_series_total{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\", type=\"found\"}[$__rate_interval]))",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "B"
+ }
+ ],
+ "title": "Filtered series",
+ "type": "timeseries"
+ }
+ ],
+ "targets": [ ],
+ "title": "Bloom Recorder",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 20
+ },
+ "id": 95,
+ "panels": [
+ {
+ "datasource": {
+ "type": "loki",
+ "uid": "${loki_datasource}"
+ },
+ "fieldConfig": {
+ "defaults": { },
+ "overrides": [ ]
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 15,
+ "x": 0,
+ "y": 920
+ },
+ "id": 48,
+ "options": {
+ "dedupStrategy": "none",
+ "enableLogDetails": true,
+ "prettifyLogMessage": false,
+ "showCommonLabels": false,
+ "showLabels": false,
+ "showTime": false,
+ "sortOrder": "Descending",
+ "wrapLogMessage": true
+ },
+ "panels": [ ],
+ "pluginVersion": "11.4.0-77765",
+ "targets": [
+ {
+ "datasource": {
+ "type": "loki",
+ "uid": "${loki_datasource}"
+ },
+ "editorMode": "code",
+ "expr": "{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} |= \"level=error\" or \"panic:\" | logfmt",
+ "queryType": "range",
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "loki",
+ "uid": "${loki_datasource}"
+ },
+ "editorMode": "code",
+ "expr": "{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} |= \"level=warn\" | logfmt",
+ "queryType": "range",
+ "refId": "B"
+ }
+ ],
+ "title": "Errors",
+ "type": "logs"
+ },
+ {
+ "datasource": {
+ "type": "loki",
+ "uid": "${loki_datasource}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "fixedColor": "red",
+ "mode": "fixed"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
+ "drawStyle": "bars",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "log": 2,
+ "type": "symlog"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 1
+ }
+ ]
+ }
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "warn"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "orange",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "error"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "red",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "panic"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "semi-dark-red",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 9,
+ "x": 15,
+ "y": 920
+ },
+ "id": 52,
+ "options": {
+ "legend": {
+ "calcs": [ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "panels": [ ],
+ "pluginVersion": "11.4.0-77765",
+ "targets": [
+ {
+ "datasource": {
+ "type": "loki",
+ "uid": "${loki_datasource}"
+ },
+ "editorMode": "code",
+ "expr": "sum by (level) (count_over_time({cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} |~ \"level=(warn|error)\" | logfmt [$__auto]))",
+ "queryType": "range",
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "loki",
+ "uid": "${loki_datasource}"
+ },
+ "editorMode": "code",
+ "expr": "sum (count_over_time({cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} |= \"panic:\" | logfmt [$__auto]))",
+ "queryType": "range",
+ "refId": "B"
+ }
+ ],
+ "title": "Errors Rate",
+ "type": "timeseries"
+ }
+ ],
+ "targets": [ ],
+ "title": "Logs",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 21
+ },
"id": 56,
"panels": [
{
@@ -792,6 +1662,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -822,8 +1693,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -838,7 +1708,7 @@
"h": 14,
"w": 12,
"x": 0,
- "y": 17
+ "y": 1764
},
"id": 10,
"options": {
@@ -855,6 +1725,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -915,6 +1786,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -945,8 +1817,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -962,7 +1833,7 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 17
+ "y": 1764
},
"id": 11,
"options": {
@@ -979,6 +1850,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -1040,6 +1912,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -1070,8 +1943,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -1087,7 +1959,7 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 24
+ "y": 2140
},
"id": 81,
"options": {
@@ -1104,6 +1976,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -1179,6 +2052,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
@@ -1212,8 +2086,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -1228,7 +2101,7 @@
"h": 7,
"w": 8,
"x": 0,
- "y": 31
+ "y": 2147
},
"id": 87,
"options": {
@@ -1245,6 +2118,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -1281,6 +2155,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
@@ -1314,8 +2189,110 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "s"
+ },
+ "overrides": [ ]
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 8,
+ "x": 8,
+ "y": 2147
+ },
+ "id": 88,
+ "options": {
+ "legend": {
+ "calcs": [ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "panels": [ ],
+ "pluginVersion": "11.4.0-77663",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "sum by (pod) (rate(go_gc_duration_seconds_sum{container=\"bloom-gateway\"}[$__rate_interval]))\n/\nsum by (pod) (rate(go_gc_duration_seconds_count{container=\"bloom-gateway\"}[$__rate_interval]))",
+ "hide": false,
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "GC duration",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
},
{
"color": "red",
@@ -1330,10 +2307,10 @@
"gridPos": {
"h": 7,
"w": 8,
- "x": 8,
- "y": 31
+ "x": 16,
+ "y": 2147
},
- "id": 88,
+ "id": 89,
"options": {
"legend": {
"calcs": [ ],
@@ -1348,6 +2325,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -1355,14 +2333,26 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by (pod) (rate(go_gc_duration_seconds_sum{container=\"bloom-gateway\"}[$__rate_interval]))\n/\nsum by (pod) (rate(go_gc_duration_seconds_count{container=\"bloom-gateway\"}[$__rate_interval]))",
+ "expr": "histogram_quantile(0.99, sum(rate(go_gc_pauses_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"}[$__rate_interval])) by (le))",
"hide": false,
"legendFormat": "__auto",
"range": true,
"refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.90, sum(rate(go_gc_pauses_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"}[$__rate_interval])) by (le))",
+ "hide": false,
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "B"
}
],
- "title": "GC duration",
+ "title": "GC pauses",
"type": "timeseries"
},
{
@@ -1383,6 +2373,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
@@ -1393,9 +2384,6 @@
},
"insertNulls": false,
"lineInterpolation": "linear",
- "lineStyle": {
- "fill": "solid"
- },
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
@@ -1416,8 +2404,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -1425,17 +2412,17 @@
}
]
},
- "unit": "s"
+ "unit": "binBps"
},
"overrides": [ ]
},
"gridPos": {
"h": 7,
- "w": 8,
- "x": 16,
- "y": 31
+ "w": 12,
+ "x": 0,
+ "y": 2154
},
- "id": 89,
+ "id": 84,
"options": {
"legend": {
"calcs": [ ],
@@ -1450,6 +2437,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -1457,30 +2445,21 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.99, sum(rate(go_gc_pauses_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"}[$__rate_interval])) by (le))",
- "hide": false,
- "legendFormat": "__auto",
- "range": true,
- "refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "histogram_quantile(0.90, sum(rate(go_gc_pauses_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"}[$__rate_interval])) by (le))",
+ "expr": "sum by(instance, pod) (rate(node_disk_read_bytes_total[$__rate_interval]))\n+ ignoring(pod) group_right() \n(count by(instance, pod) (container_fs_reads_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"bloom-gateway\", device!~\".*sda.*\"}) * 0)",
"hide": false,
- "legendFormat": "__auto",
+ "instant": false,
+ "interval": "",
+ "legendFormat": "{{pod}}",
"range": true,
- "refId": "B"
+ "refId": "D"
}
],
- "title": "GC pauses",
+ "title": "Disk reads",
"type": "timeseries"
},
{
"datasource": {
+ "default": false,
"type": "prometheus",
"uid": "${datasource}"
},
@@ -1497,6 +2476,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
@@ -1527,8 +2507,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -1543,10 +2522,10 @@
"gridPos": {
"h": 7,
"w": 12,
- "x": 0,
- "y": 38
+ "x": 12,
+ "y": 2154
},
- "id": 84,
+ "id": 85,
"options": {
"legend": {
"calcs": [ ],
@@ -1561,6 +2540,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -1568,20 +2548,20 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by(instance, pod) (rate(node_disk_read_bytes_total[$__rate_interval]))\n+ ignoring(pod) group_right() \n(count by(instance, pod) (container_fs_reads_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"bloom-gateway\", device!~\".*sda.*\"}) * 0)",
+ "expr": "sum by(instance, pod) (rate(node_disk_written_bytes_total[$__rate_interval]))\n+ ignoring(pod) group_right() \n(count by(instance, pod) (container_fs_writes_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"bloom-gateway\", device!~\".*sda.*\"}) * 0)",
"hide": false,
"instant": false,
- "interval": "",
"legendFormat": "{{pod}}",
"range": true,
"refId": "D"
}
],
- "title": "Disk reads",
+ "title": "Disk writes",
"type": "timeseries"
},
{
"datasource": {
+ "default": false,
"type": "prometheus",
"uid": "${datasource}"
},
@@ -1598,6 +2578,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
@@ -1617,7 +2598,7 @@
"spanNulls": false,
"stacking": {
"group": "A",
- "mode": "none"
+ "mode": "normal"
},
"thresholdsStyle": {
"mode": "off"
@@ -1628,8 +2609,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -1643,11 +2623,11 @@
},
"gridPos": {
"h": 7,
- "w": 12,
- "x": 12,
- "y": 38
+ "w": 24,
+ "x": 0,
+ "y": 2161
},
- "id": 85,
+ "id": 102,
"options": {
"legend": {
"calcs": [ ],
@@ -1662,6 +2642,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -1669,15 +2650,29 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by(instance, pod) (rate(node_disk_written_bytes_total[$__rate_interval]))\n+ ignoring(pod) group_right() \n(count by(instance, pod) (container_fs_writes_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"bloom-gateway\", device!~\".*sda.*\"}) * 0)",
+ "expr": "sum(sum by (instance) (rate(node_disk_read_bytes_total[$__rate_interval]))\n+ on(instance) group_right() \n(count by (instance) (container_fs_reads_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"bloom-gateway\", device!~\".*sda.*\"}) * 0))",
"hide": false,
"instant": false,
- "legendFormat": "{{pod}}",
+ "interval": "",
+ "legendFormat": "Reads",
"range": true,
"refId": "D"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "sum(sum by(instance) (rate(node_disk_written_bytes_total[$__rate_interval]))\n+ on(instance) group_right() \n(count by(instance) (container_fs_writes_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"bloom-gateway\", device!~\".*sda.*\"}) * 0)) * -1",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Writes",
+ "range": true,
+ "refId": "A"
}
],
- "title": "Disk writes",
+ "title": "Disk reads/writes",
"type": "timeseries"
}
],
@@ -1691,7 +2686,7 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 17
+ "y": 22
},
"id": 2,
"panels": [
@@ -1712,6 +2707,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 100,
"gradientMode": "none",
@@ -1742,8 +2738,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
}
]
}
@@ -1799,7 +2794,7 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 18
+ "y": 1175
},
"id": 13,
"options": {
@@ -1816,6 +2811,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -1823,7 +2819,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by (status_code) (\n rate(loki_request_duration_seconds_count{cluster=~\"$cluster\",job=~\"($namespace)/bloom-gateway\", route=\"/logproto.BloomGateway/FilterChunkRefs\"}[$__rate_interval])\n)",
+ "expr": "sum by (status_code) (\n rate(loki_request_duration_seconds_count{cluster=\"$cluster\",job=\"$namespace/bloom-gateway\", route=\"/logproto.BloomGateway/FilterChunkRefs\"}[$__rate_interval])\n)",
"hide": false,
"instant": false,
"legendFormat": "__auto",
@@ -1851,6 +2847,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 20,
"gradientMode": "none",
@@ -1881,8 +2878,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
}
]
}
@@ -1938,7 +2934,7 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 18
+ "y": 1175
},
"id": 86,
"options": {
@@ -1955,6 +2951,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -1975,6 +2972,7 @@
},
{
"datasource": {
+ "default": false,
"type": "prometheus",
"uid": "${datasource}"
},
@@ -1991,6 +2989,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
@@ -2021,8 +3020,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
}
]
},
@@ -2034,7 +3032,7 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 25
+ "y": 1249
},
"id": 14,
"options": {
@@ -2051,6 +3049,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -2090,6 +3089,19 @@
"legendFormat": "{{ route }} 99th percentile",
"range": true,
"refId": "D"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(1, sum by (le,route) (cluster_job_route:loki_request_duration_seconds_bucket:sum_rate{cluster=~\"$cluster\", job=~\"($namespace)/bloom-gateway\", route=~\"/logproto.BloomGateway/FilterChunkRefs\"}))",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "{{ route }} max",
+ "range": true,
+ "refId": "A"
}
],
"title": "Latency",
@@ -2112,6 +3124,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
@@ -2142,8 +3155,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
}
]
},
@@ -2155,7 +3167,7 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 25
+ "y": 1249
},
"id": 15,
"options": {
@@ -2172,6 +3184,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -2201,7 +3214,7 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 18
+ "y": 23
},
"id": 58,
"panels": [
@@ -2223,6 +3236,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 25,
"gradientMode": "none",
@@ -2253,7 +3267,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
}
]
},
@@ -2265,7 +3280,7 @@
"h": 7,
"w": 8,
"x": 0,
- "y": 11
+ "y": 1176
},
"id": 16,
"options": {
@@ -2282,33 +3297,8 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "sum by (pod) (loki_bloom_gateway_queue_duration_seconds_sum{cluster=\"$cluster\", namespace=\"$namespace\"})\n/\nsum by (pod) (loki_bloom_gateway_queue_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\"})\n",
- "hide": true,
- "instant": false,
- "legendFormat": "__auto",
- "range": true,
- "refId": "C"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "sum (loki_bloom_gateway_queue_length{cluster=\"$cluster\", namespace=\"$namespace\"})",
- "hide": true,
- "instant": false,
- "legendFormat": "Total",
- "range": true,
- "refId": "D"
- },
{
"datasource": {
"type": "prometheus",
@@ -2344,6 +3334,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -2374,7 +3365,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
}
]
},
@@ -2386,7 +3378,7 @@
"h": 7,
"w": 8,
"x": 8,
- "y": 11
+ "y": 1176
},
"id": 17,
"options": {
@@ -2403,6 +3395,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -2417,19 +3410,6 @@
"range": true,
"refId": "E"
},
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "histogram_quantile(0.90, sum by (le) (rate(loki_bloom_gateway_queue_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])))",
- "hide": true,
- "instant": false,
- "legendFormat": "p90",
- "range": true,
- "refId": "A"
- },
{
"datasource": {
"type": "prometheus",
@@ -2478,6 +3458,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -2508,7 +3489,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
}
]
},
@@ -2520,7 +3502,7 @@
"h": 7,
"w": 8,
"x": 16,
- "y": 11
+ "y": 1176
},
"id": 22,
"options": {
@@ -2537,6 +3519,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -2566,7 +3549,7 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 19
+ "y": 24
},
"id": 68,
"panels": [
@@ -2588,6 +3571,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -2634,7 +3618,7 @@
"h": 8,
"w": 8,
"x": 0,
- "y": 12
+ "y": 1177
},
"id": 69,
"options": {
@@ -2651,6 +3635,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -2712,6 +3697,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -2758,7 +3744,7 @@
"h": 8,
"w": 8,
"x": 8,
- "y": 12
+ "y": 1177
},
"id": 70,
"options": {
@@ -2775,6 +3761,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -2836,6 +3823,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -2881,7 +3869,7 @@
"h": 8,
"w": 8,
"x": 16,
- "y": 12
+ "y": 1177
},
"id": 71,
"options": {
@@ -2898,6 +3886,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -2917,15 +3906,115 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by (status) (rate(loki_bloom_gateway_tasks_processed_total{cluster=\"$cluster\",namespace=\"$namespace\",container=\"bloom-gateway\"}[$__rate_interval]))",
- "hide": false,
+ "expr": "sum by (status) (rate(loki_bloom_gateway_tasks_processed_total{cluster=\"$cluster\",namespace=\"$namespace\",container=\"bloom-gateway\"}[$__rate_interval]))",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "processed {{status}}",
+ "range": true,
+ "refId": "B"
+ }
+ ],
+ "title": "Tasks dequeued/processed",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "ops"
+ },
+ "overrides": [ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 8,
+ "x": 0,
+ "y": 1214
+ },
+ "id": 105,
+ "options": {
+ "legend": {
+ "calcs": [ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "panels": [ ],
+ "pluginVersion": "11.4.0-77663",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "sum(rate(loki_bloom_gateway_process_duration_seconds_count{cluster=\"$cluster\",namespace=\"$namespace\",container=\"bloom-gateway\"}[$__rate_interval])) by (status)",
"instant": false,
- "legendFormat": "processed {{status}}",
+ "legendFormat": "{{status}}",
"range": true,
- "refId": "B"
+ "refId": "A"
}
],
- "title": "Tasks dequeued/processed",
+ "title": "Worker Iterations per second",
"type": "timeseries"
}
],
@@ -2939,21 +4028,21 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 20
+ "y": 25
},
"id": 59,
"panels": [
{
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
"description": "",
+ "fieldConfig": {
+ "defaults": { },
+ "overrides": [ ]
+ },
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
- "y": 13
+ "y": 1178
},
"id": 19,
"options": {
@@ -2966,7 +4055,7 @@
"mode": "markdown"
},
"panels": [ ],
- "pluginVersion": "11.1.0-70005",
+ "pluginVersion": "11.4.0-77663",
"targets": [ ],
"title": "We cache bloom blocks in memory to prevent the gateway from hitting the object store too often",
"transparent": true,
@@ -2990,6 +4079,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -3036,7 +4126,7 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 14
+ "y": 1179
},
"id": 20,
"options": {
@@ -3053,6 +4143,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -3102,6 +4193,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -3194,7 +4286,7 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 14
+ "y": 1179
},
"id": 83,
"options": {
@@ -3211,6 +4303,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -3286,6 +4379,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 100,
"gradientMode": "none",
@@ -3364,7 +4458,7 @@
"h": 7,
"w": 24,
"x": 0,
- "y": 21
+ "y": 1186
},
"id": 92,
"options": {
@@ -3381,6 +4475,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -3388,7 +4483,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by (status) (\n rate(loki_bloom_blocks_cache_fetched_total{container=\"bloom-gateway\"}[$__rate_interval])\n)\n/ ignoring(status) group_left\nsum (\n rate(loki_bloom_blocks_cache_fetched_total{container=\"bloom-gateway\"}[$__rate_interval])\n)",
+ "expr": "sum by (status) (\n rate(loki_bloom_blocks_cache_fetched_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"}[$__rate_interval])\n)\n/ ignoring(status) group_left\nsum (\n rate(loki_bloom_blocks_cache_fetched_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"}[$__rate_interval])\n)",
"hide": false,
"instant": false,
"legendFormat": "__auto",
@@ -3417,6 +4512,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -3476,7 +4572,7 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 28
+ "y": 1193
},
"id": 76,
"options": {
@@ -3493,6 +4589,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -3541,6 +4638,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -3599,255 +4697,10 @@
"gridPos": {
"h": 7,
"w": 12,
- "x": 12,
- "y": 28
- },
- "id": 21,
- "options": {
- "legend": {
- "calcs": [ ],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "maxHeight": 600,
- "mode": "single",
- "sort": "none"
- }
- },
- "panels": [ ],
- "targets": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "sum(rate(loki_bloom_store_metas_fetched_sum{cluster=\"$cluster\",namespace=\"$namespace\", container=\"bloom-gateway\"}[$__rate_interval]))",
- "hide": false,
- "instant": false,
- "legendFormat": "metas fetch rate",
- "range": true,
- "refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "sum(rate(loki_bloom_store_blocks_fetched_sum{cluster=\"$cluster\",namespace=\"$namespace\", container=\"bloom-gateway\"}[$__rate_interval]))",
- "hide": false,
- "instant": false,
- "legendFormat": "blocks fetch rate",
- "range": true,
- "refId": "B"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "histogram_quantile(0.9, sum(rate(loki_bloom_store_blocks_fetched_size_bytes_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (le))",
- "hide": false,
- "instant": false,
- "legendFormat": "p90 blocks size",
- "range": true,
- "refId": "C"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "histogram_quantile(0.9, sum(rate(loki_bloom_store_metas_fetched_size_bytes_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (le))",
- "hide": false,
- "instant": false,
- "legendFormat": "p90 metas size",
- "range": true,
- "refId": "D"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "histogram_quantile(1.0, sum(rate(loki_bloom_store_metas_fetched_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (le))",
- "hide": true,
- "instant": false,
- "legendFormat": "__auto",
- "range": true,
- "refId": "E"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "histogram_quantile(0.95, sum(rate(loki_bloom_store_metas_fetched_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (le))",
- "hide": true,
- "instant": false,
- "legendFormat": "__auto",
- "range": true,
- "refId": "F"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "histogram_quantile(0.5, sum(rate(loki_bloom_store_metas_fetched_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (le))",
- "hide": true,
- "instant": false,
- "legendFormat": "__auto",
- "range": true,
- "refId": "G"
- }
- ],
- "title": "Bloom Store",
- "type": "timeseries"
- }
- ],
- "targets": [ ],
- "title": "Blocks Cache",
- "type": "row"
- },
- {
- "collapsed": true,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 21
- },
- "id": 60,
- "panels": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "description": "",
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 14
- },
- "id": 61,
- "options": {
- "code": {
- "language": "plaintext",
- "showLineNumbers": false,
- "showMiniMap": false
- },
- "content": "",
- "mode": "markdown"
- },
- "panels": [ ],
- "pluginVersion": "11.1.0-70005",
- "targets": [ ],
- "title": "The gateway download bloom meta files and blocks from the object store.",
- "transparent": true,
- "type": "text"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "description": "",
- "gridPos": {
- "h": 7,
- "w": 2,
- "x": 0,
- "y": 15
- },
- "id": 24,
- "options": {
- "code": {
- "language": "plaintext",
- "showLineNumbers": false,
- "showMiniMap": false
- },
- "content": "---\n#### GCS\n",
- "mode": "markdown"
- },
- "panels": [ ],
- "pluginVersion": "11.1.0-70005",
- "targets": [ ],
- "transparent": true,
- "type": "text"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "description": "",
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 25,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "normal"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [ ],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green"
- }
- ]
- },
- "unit": "none"
- },
- "overrides": [ ]
- },
- "gridPos": {
- "h": 7,
- "w": 11,
- "x": 2,
- "y": 15
+ "x": 12,
+ "y": 1193
},
- "id": 25,
+ "id": 21,
"options": {
"legend": {
"calcs": [ ],
@@ -3862,6 +4715,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -3869,14 +4723,93 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by (status_code, operation) (rate(loki_gcs_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} [$__rate_interval]))",
+ "expr": "sum(rate(loki_bloom_store_metas_fetched_sum{cluster=\"$cluster\",namespace=\"$namespace\", container=\"bloom-gateway\"}[$__rate_interval]))",
+ "hide": false,
"instant": false,
- "legendFormat": "{{operation}} {{status_code}}",
+ "legendFormat": "metas fetch rate",
"range": true,
"refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "sum(rate(loki_bloom_store_blocks_fetched_sum{cluster=\"$cluster\",namespace=\"$namespace\", container=\"bloom-gateway\"}[$__rate_interval]))",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "blocks fetch rate",
+ "range": true,
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.9, sum(rate(loki_bloom_store_blocks_fetched_size_bytes_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (le))",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "p90 blocks size",
+ "range": true,
+ "refId": "C"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.9, sum(rate(loki_bloom_store_metas_fetched_size_bytes_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (le))",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "p90 metas size",
+ "range": true,
+ "refId": "D"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(1.0, sum(rate(loki_bloom_store_metas_fetched_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (le))",
+ "hide": true,
+ "instant": false,
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "E"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.95, sum(rate(loki_bloom_store_metas_fetched_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (le))",
+ "hide": true,
+ "instant": false,
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "F"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.5, sum(rate(loki_bloom_store_metas_fetched_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (le))",
+ "hide": true,
+ "instant": false,
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "G"
}
],
- "title": "QPS",
+ "title": "Bloom Store",
"type": "timeseries"
},
{
@@ -3897,6 +4830,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -3907,6 +4841,9 @@
},
"insertNulls": false,
"lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
@@ -3922,6 +4859,7 @@
"mode": "off"
}
},
+ "fieldMinMax": false,
"mappings": [ ],
"thresholds": {
"mode": "absolute",
@@ -3931,17 +4869,30 @@
}
]
},
- "unit": "none"
+ "unit": "short"
},
- "overrides": [ ]
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byRegexp",
+ "options": "/Size (.*)/"
+ },
+ "properties": [
+ {
+ "id": "unit",
+ "value": "bytes"
+ }
+ ]
+ }
+ ]
},
"gridPos": {
"h": 7,
- "w": 11,
- "x": 13,
- "y": 15
+ "w": 12,
+ "x": 0,
+ "y": 1200
},
- "id": 29,
+ "id": 101,
"options": {
"legend": {
"calcs": [ ],
@@ -3956,6 +4907,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -3963,67 +4915,88 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.99, sum by (operation, le) (rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} [$__rate_interval])))",
- "instant": false,
- "legendFormat": "{{operation}} p99",
- "range": true,
- "refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "histogram_quantile(0.90, sum by (operation, le) (rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} [$__rate_interval])))",
+ "expr": "sum by (job)(rate(loki_bloom_store_download_queue_size_sum{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"}[$__rate_interval]))",
"hide": false,
"instant": false,
- "legendFormat": "{{operation}} p90",
+ "interval": "",
+ "legendFormat": "Size",
"range": true,
"refId": "B"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "histogram_quantile(0.50, sum by (operation, le) (rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} [$__rate_interval])))",
- "hide": false,
- "instant": false,
- "legendFormat": "{{operation}} p50",
- "range": true,
- "refId": "C"
}
],
- "title": "Latency",
+ "title": "Block download queue size",
"type": "timeseries"
- },
+ }
+ ],
+ "targets": [ ],
+ "title": "Blocks Cache",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 26
+ },
+ "id": 60,
+ "panels": [
{
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
+ "description": "",
+ "fieldConfig": {
+ "defaults": { },
+ "overrides": [ ]
+ },
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 1013
+ },
+ "id": 61,
+ "options": {
+ "code": {
+ "language": "plaintext",
+ "showLineNumbers": false,
+ "showMiniMap": false
+ },
+ "content": "",
+ "mode": "markdown"
},
+ "panels": [ ],
+ "pluginVersion": "11.4.0-77765",
+ "targets": [ ],
+ "title": "The gateway download bloom meta files and blocks from the object store.",
+ "transparent": true,
+ "type": "text"
+ },
+ {
"description": "",
+ "fieldConfig": {
+ "defaults": { },
+ "overrides": [ ]
+ },
"gridPos": {
"h": 7,
"w": 2,
"x": 0,
- "y": 22
+ "y": 1014
},
- "id": 62,
+ "id": 24,
"options": {
"code": {
"language": "plaintext",
"showLineNumbers": false,
"showMiniMap": false
},
- "content": "---\n#### S3\n",
+ "content": "---\n#### GCS\n",
"mode": "markdown"
},
"panels": [ ],
- "pluginVersion": "11.1.0-70005",
+ "pluginVersion": "11.4.0-77765",
"targets": [ ],
+ "title": "",
"transparent": true,
"type": "text"
},
@@ -4045,6 +5018,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 25,
"gradientMode": "none",
@@ -4075,7 +5049,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
}
]
},
@@ -4087,9 +5062,9 @@
"h": 7,
"w": 11,
"x": 2,
- "y": 22
+ "y": 1014
},
- "id": 63,
+ "id": 25,
"options": {
"legend": {
"calcs": [ ],
@@ -4104,6 +5079,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77765",
"targets": [
{
"datasource": {
@@ -4111,7 +5087,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by (status_code, operation) (rate(loki_s3_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} [$__rate_interval]))",
+ "expr": "sum by (status_code, operation) (rate(loki_gcs_request_duration_seconds_count{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"} [$__rate_interval]))",
"instant": false,
"legendFormat": "{{operation}} {{status_code}}",
"range": true,
@@ -4139,8 +5115,9 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
- "fillOpacity": 25,
+ "fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -4158,7 +5135,7 @@
"spanNulls": false,
"stacking": {
"group": "A",
- "mode": "normal"
+ "mode": "none"
},
"thresholdsStyle": {
"mode": "off"
@@ -4169,7 +5146,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
}
]
},
@@ -4181,9 +5159,9 @@
"h": 7,
"w": 11,
"x": 13,
- "y": 22
+ "y": 1014
},
- "id": 64,
+ "id": 29,
"options": {
"legend": {
"calcs": [ ],
@@ -4198,6 +5176,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77765",
"targets": [
{
"datasource": {
@@ -4205,7 +5184,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.99, sum by (operation, le) (rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} [$__rate_interval])))",
+ "expr": "histogram_quantile(0.99, sum by (operation, le) (rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"} [$__rate_interval])))",
"instant": false,
"legendFormat": "{{operation}} p99",
"range": true,
@@ -4217,7 +5196,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.90, sum by (operation, le) (rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} [$__rate_interval])))",
+ "expr": "histogram_quantile(0.90, sum by (operation, le) (rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"} [$__rate_interval])))",
"hide": false,
"instant": false,
"legendFormat": "{{operation}} p90",
@@ -4230,7 +5209,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.50, sum by (operation, le) (rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} [$__rate_interval])))",
+ "expr": "histogram_quantile(0.50, sum by (operation, le) (rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"} [$__rate_interval])))",
"hide": false,
"instant": false,
"legendFormat": "{{operation}} p50",
@@ -4242,30 +5221,31 @@
"type": "timeseries"
},
{
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
"description": "",
+ "fieldConfig": {
+ "defaults": { },
+ "overrides": [ ]
+ },
"gridPos": {
"h": 7,
"w": 2,
"x": 0,
- "y": 29
+ "y": 1021
},
- "id": 65,
+ "id": 62,
"options": {
"code": {
"language": "plaintext",
"showLineNumbers": false,
"showMiniMap": false
},
- "content": "---\n#### Azure\nBlob Storage\n\n",
+ "content": "---\n#### S3\n",
"mode": "markdown"
},
"panels": [ ],
- "pluginVersion": "11.1.0-70005",
+ "pluginVersion": "11.4.0-77765",
"targets": [ ],
+ "title": "",
"transparent": true,
"type": "text"
},
@@ -4287,6 +5267,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 25,
"gradientMode": "none",
@@ -4317,7 +5298,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
}
]
},
@@ -4329,9 +5311,9 @@
"h": 7,
"w": 11,
"x": 2,
- "y": 29
+ "y": 1021
},
- "id": 66,
+ "id": 63,
"options": {
"legend": {
"calcs": [ ],
@@ -4346,6 +5328,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77765",
"targets": [
{
"datasource": {
@@ -4353,7 +5336,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by (status_code, operation) (rate(loki_azure_blob_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} [$__rate_interval]))",
+ "expr": "sum by (status_code, operation) (rate(loki_s3_request_duration_seconds_count{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"} [$__rate_interval]))",
"instant": false,
"legendFormat": "{{operation}} {{status_code}}",
"range": true,
@@ -4381,6 +5364,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 25,
"gradientMode": "none",
@@ -4411,7 +5395,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
}
]
},
@@ -4423,9 +5408,9 @@
"h": 7,
"w": 11,
"x": 13,
- "y": 29
+ "y": 1021
},
- "id": 67,
+ "id": 64,
"options": {
"legend": {
"calcs": [ ],
@@ -4440,6 +5425,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77765",
"targets": [
{
"datasource": {
@@ -4447,7 +5433,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.99, sum by (operation, le) (rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} [$__rate_interval])))",
+ "expr": "histogram_quantile(0.99, sum by (operation, le) (rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"} [$__rate_interval])))",
"instant": false,
"legendFormat": "{{operation}} p99",
"range": true,
@@ -4459,7 +5445,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.90, sum by (operation, le) (rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} [$__rate_interval])))",
+ "expr": "histogram_quantile(0.90, sum by (operation, le) (rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"} [$__rate_interval])))",
"hide": false,
"instant": false,
"legendFormat": "{{operation}} p90",
@@ -4472,7 +5458,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.50, sum by (operation, le) (rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} [$__rate_interval])))",
+ "expr": "histogram_quantile(0.50, sum by (operation, le) (rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"} [$__rate_interval])))",
"hide": false,
"instant": false,
"legendFormat": "{{operation}} p50",
@@ -4482,26 +5468,40 @@
],
"title": "Latency",
"type": "timeseries"
- }
- ],
- "targets": [ ],
- "title": "Object Store",
- "type": "row"
- },
- {
- "collapsed": true,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 22
- },
- "id": 77,
- "panels": [
+ },
+ {
+ "description": "",
+ "fieldConfig": {
+ "defaults": { },
+ "overrides": [ ]
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 2,
+ "x": 0,
+ "y": 1028
+ },
+ "id": 65,
+ "options": {
+ "code": {
+ "language": "plaintext",
+ "showLineNumbers": false,
+ "showMiniMap": false
+ },
+ "content": "---\n#### Azure\nBlob Storage\n\n",
+ "mode": "markdown"
+ },
+ "panels": [ ],
+ "pluginVersion": "11.4.0-77765",
+ "targets": [ ],
+ "title": "",
+ "transparent": true,
+ "type": "text"
+ },
{
"datasource": {
- "type": "loki",
- "uid": "${loki_datasource}"
+ "type": "prometheus",
+ "uid": "${datasource}"
},
"description": "",
"fieldConfig": {
@@ -4516,8 +5516,9 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
- "fillOpacity": 10,
+ "fillOpacity": 25,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -4526,9 +5527,6 @@
},
"insertNulls": false,
"lineInterpolation": "linear",
- "lineStyle": {
- "fill": "solid"
- },
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
@@ -4538,7 +5536,7 @@
"spanNulls": false,
"stacking": {
"group": "A",
- "mode": "none"
+ "mode": "normal"
},
"thresholdsStyle": {
"mode": "off"
@@ -4549,24 +5547,22 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
- },
- {
- "color": "red",
- "value": 80
+ "color": "green",
+ "value": null
}
]
- }
+ },
+ "unit": "none"
},
"overrides": [ ]
},
"gridPos": {
- "h": 8,
- "w": 12,
- "x": 0,
- "y": 23
+ "h": 7,
+ "w": 11,
+ "x": 2,
+ "y": 1028
},
- "id": 78,
+ "id": 66,
"options": {
"legend": {
"calcs": [ ],
@@ -4581,26 +5577,28 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77765",
"targets": [
{
"datasource": {
- "type": "loki",
- "uid": "${loki_datasource}"
+ "type": "prometheus",
+ "uid": "${datasource}"
},
"editorMode": "code",
- "expr": "topk(3, sum by (tasks) (count_over_time({namespace=\"loki-dev-006\", container=\"bloom-gateway\"} |= \"process tasks with bounds\" | logfmt [5s])))",
- "legendFormat": "{{tasks}}",
- "queryType": "range",
+ "expr": "sum by (status_code, operation) (rate(loki_azure_blob_request_duration_seconds_count{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"} [$__rate_interval]))",
+ "instant": false,
+ "legendFormat": "{{operation}} {{status_code}}",
+ "range": true,
"refId": "A"
}
],
- "title": "Process tasks with bounds",
+ "title": "QPS",
"type": "timeseries"
},
{
"datasource": {
- "type": "loki",
- "uid": "${loki_datasource}"
+ "type": "prometheus",
+ "uid": "${datasource}"
},
"description": "",
"fieldConfig": {
@@ -4615,8 +5613,9 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
- "fillOpacity": 10,
+ "fillOpacity": 25,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -4625,9 +5624,6 @@
},
"insertNulls": false,
"lineInterpolation": "linear",
- "lineStyle": {
- "fill": "solid"
- },
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
@@ -4637,7 +5633,7 @@
"spanNulls": false,
"stacking": {
"group": "A",
- "mode": "none"
+ "mode": "normal"
},
"thresholdsStyle": {
"mode": "off"
@@ -4648,50 +5644,22 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
- },
- {
- "color": "red",
- "value": 80
+ "color": "green",
+ "value": null
}
]
- }
+ },
+ "unit": "none"
},
- "overrides": [
- {
- "__systemRef": "hideSeriesFrom",
- "matcher": {
- "id": "byNames",
- "options": {
- "mode": "exclude",
- "names": [
- "max",
- "avg"
- ],
- "prefix": "All except:",
- "readOnly": true
- }
- },
- "properties": [
- {
- "id": "custom.hideFrom",
- "value": {
- "legend": false,
- "tooltip": false,
- "viz": true
- }
- }
- ]
- }
- ]
+ "overrides": [ ]
},
"gridPos": {
- "h": 8,
- "w": 12,
- "x": 12,
- "y": 23
+ "h": 7,
+ "w": 11,
+ "x": 13,
+ "y": 1028
},
- "id": 79,
+ "id": 67,
"options": {
"legend": {
"calcs": [ ],
@@ -4706,91 +5674,81 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77765",
"targets": [
{
"datasource": {
- "type": "loki",
- "uid": "${loki_datasource}"
+ "type": "prometheus",
+ "uid": "${datasource}"
},
"editorMode": "code",
- "expr": "max(max_over_time({cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} |= \"fetch blocks\" | logfmt | unwrap duration(duration) [$__auto]))",
- "hide": false,
- "legendFormat": "max",
- "queryType": "range",
+ "expr": "histogram_quantile(0.99, sum by (operation, le) (rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"} [$__rate_interval])))",
+ "instant": false,
+ "legendFormat": "{{operation}} p99",
+ "range": true,
"refId": "A"
},
{
"datasource": {
- "type": "loki",
- "uid": "${loki_datasource}"
+ "type": "prometheus",
+ "uid": "${datasource}"
},
"editorMode": "code",
- "expr": "avg(avg_over_time({cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} |= \"fetch blocks\" | logfmt | unwrap duration(duration) [$__auto]))",
+ "expr": "histogram_quantile(0.90, sum by (operation, le) (rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"} [$__rate_interval])))",
"hide": false,
- "legendFormat": "avg",
- "queryType": "range",
+ "instant": false,
+ "legendFormat": "{{operation}} p90",
+ "range": true,
"refId": "B"
},
{
"datasource": {
- "type": "loki",
- "uid": "${loki_datasource}"
+ "type": "prometheus",
+ "uid": "${datasource}"
},
"editorMode": "code",
- "expr": "avg(avg_over_time({cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} |= \"request unavailable blocks in the background\" | logfmt | missing > 0 | unwrap missing [$__auto]))",
+ "expr": "histogram_quantile(0.50, sum by (operation, le) (rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"} [$__rate_interval])))",
"hide": false,
- "legendFormat": "avg missing",
- "queryType": "range",
+ "instant": false,
+ "legendFormat": "{{operation}} p50",
+ "range": true,
"refId": "C"
}
],
- "title": "Download enqueue duration",
+ "title": "Latency",
"type": "timeseries"
- },
+ }
+ ],
+ "targets": [ ],
+ "title": "Object Store",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 27
+ },
+ "id": 77,
+ "panels": [
{
"datasource": {
- "type": "loki",
- "uid": "${loki_datasource}"
+ "type": "prometheus",
+ "uid": "${datasource}"
},
"fieldConfig": {
"defaults": {
- "color": {
- "fixedColor": "green",
- "mode": "palette-classic"
- },
"custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "fillOpacity": 80,
- "gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
- "lineWidth": 1,
"scaleDistribution": {
"type": "linear"
- },
- "thresholdsStyle": {
- "mode": "off"
}
- },
- "mappings": [ ],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green"
- },
- {
- "color": "red",
- "value": 100
- }
- ]
}
},
"overrides": [ ]
@@ -4799,145 +5757,127 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 31
+ "y": 1044
},
"id": 80,
"options": {
- "barRadius": 0,
- "barWidth": 0.96999999999999997,
- "fullHighlight": false,
- "groupWidth": 0.69999999999999996,
+ "calculate": false,
+ "cellGap": 1,
+ "color": {
+ "exponent": 0.5,
+ "fill": "dark-orange",
+ "mode": "scheme",
+ "reverse": false,
+ "scale": "exponential",
+ "scheme": "RdYlGn",
+ "steps": 64
+ },
+ "exemplars": {
+ "color": "rgba(255,0,255,0.7)"
+ },
+ "filterValues": {
+ "le": 1.0000000000000001e-09
+ },
"legend": {
- "calcs": [ ],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
+ "show": true
+ },
+ "rowsFrame": {
+ "layout": "auto"
},
- "orientation": "horizontal",
- "showValue": "auto",
- "stacking": "none",
"tooltip": {
- "maxHeight": 600,
"mode": "single",
- "sort": "none"
+ "showColorScale": false,
+ "yHistogram": false
},
- "xTickLabelRotation": 0,
- "xTickLabelSpacing": 0
+ "yAxis": {
+ "axisPlacement": "left",
+ "reverse": false
+ }
},
"panels": [ ],
- "pluginVersion": "11.0.0-67814",
+ "pluginVersion": "11.4.0-77765",
"targets": [
{
"datasource": {
- "type": "loki",
- "uid": "${loki_datasource}"
+ "type": "prometheus",
+ "uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sort_desc(topk(10, sum by (tasks) (count_over_time({namespace=\"loki-dev-006\", container=\"bloom-gateway\"} |= \"process tasks with bounds\" | logfmt [$__auto]))))",
- "legendFormat": "",
- "queryType": "instant",
+ "exemplar": false,
+ "expr": "increase(loki_bloom_gateway_dequeue_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"}[$__rate_interval])",
+ "format": "heatmap",
+ "instant": false,
+ "legendFormat": "__auto",
+ "range": true,
"refId": "A"
}
],
- "title": "Tasks multiplexed",
- "type": "barchart"
+ "title": "Dequeue duration",
+ "type": "heatmap"
},
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
- "description": "",
"fieldConfig": {
"defaults": {
- "color": {
- "mode": "palette-classic"
- },
"custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 10,
- "gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineStyle": {
- "fill": "solid"
- },
- "lineWidth": 1,
- "pointSize": 5,
"scaleDistribution": {
"type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
}
- },
- "mappings": [ ],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green"
- },
- {
- "color": "red",
- "value": 80
- }
- ]
}
},
- "overrides": [
- {
- "matcher": {
- "id": "byName",
- "options": "Enqueue latency"
- },
- "properties": [
- {
- "id": "unit",
- "value": "s"
- }
- ]
- }
- ]
+ "overrides": [ ]
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
- "y": 31
+ "y": 1044
},
- "id": 82,
+ "id": 106,
"options": {
+ "calculate": false,
+ "cellGap": 1,
+ "color": {
+ "exponent": 0.5,
+ "fill": "dark-orange",
+ "mode": "scheme",
+ "reverse": false,
+ "scale": "exponential",
+ "scheme": "RdYlGn",
+ "steps": 64
+ },
+ "exemplars": {
+ "color": "rgba(255,0,255,0.7)"
+ },
+ "filterValues": {
+ "le": 1.0000000000000001e-09
+ },
"legend": {
- "calcs": [ ],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
+ "show": true
+ },
+ "rowsFrame": {
+ "layout": "auto"
},
"tooltip": {
- "maxHeight": 600,
"mode": "single",
- "sort": "none"
+ "showColorScale": false,
+ "yHistogram": false
+ },
+ "yAxis": {
+ "axisPlacement": "left",
+ "reverse": false
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77765",
"targets": [
{
"datasource": {
@@ -4945,27 +5885,17 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.99, sum(rate(loki_bloom_store_download_queue_enqueue_time_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"}[$__rate_interval])) by (le))",
- "hide": false,
- "legendFormat": "Enqueue latency",
- "range": true,
- "refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "histogram_quantile(0.99, sum(rate(loki_bloom_store_download_queue_size_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"}[$__rate_interval])) by (le))",
+ "expr": "increase(loki_bloom_gateway_tasks_dequeued_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"}[$__rate_interval])",
+ "format": "heatmap",
"hide": false,
- "legendFormat": "Size",
+ "instant": false,
+ "legendFormat": "__auto",
"range": true,
"refId": "B"
}
],
- "title": "Block download queue",
- "type": "timeseries"
+ "title": "Dequeue count",
+ "type": "heatmap"
}
],
"targets": [ ],
@@ -4973,6 +5903,7 @@
"type": "row"
}
],
+ "preload": false,
"refresh": "10s",
"rows": [ ],
"schemaVersion": 14,
@@ -5058,7 +5989,6 @@
"from": "now-1h",
"to": "now"
},
- "timeRangeUpdatedDuringEditOrView": false,
"timepicker": {
"refresh_intervals": [
"5s",
diff --git a/production/loki-mixin-compiled-ssd/dashboards/loki-retention.json b/production/loki-mixin-compiled-ssd/dashboards/loki-retention.json
index cec461bff5117..316a43298b300 100644
--- a/production/loki-mixin-compiled-ssd/dashboards/loki-retention.json
+++ b/production/loki-mixin-compiled-ssd/dashboards/loki-retention.json
@@ -495,7 +495,7 @@
"span": 6,
"targets": [
{
- "expr": "sum(increase(loki_compactor_skipped_compacting_locked_table_total{cluster=~\"$cluster\", namespace=~\"$namespace\"}[$__range]))",
+ "expr": "sum(loki_compactor_locked_table_successive_compaction_skips{cluster=~\"$cluster\", namespace=~\"$namespace\"})",
"format": "time_series",
"legendFormat": "{{table_name}}",
"legendLink": null
diff --git a/production/loki-mixin-compiled/dashboards/loki-bloom-compactor.json b/production/loki-mixin-compiled/dashboards/loki-bloom-build.json
similarity index 70%
rename from production/loki-mixin-compiled/dashboards/loki-bloom-compactor.json
rename to production/loki-mixin-compiled/dashboards/loki-bloom-build.json
index c365fab0a7e59..149dfacd857d3 100644
--- a/production/loki-mixin-compiled/dashboards/loki-bloom-compactor.json
+++ b/production/loki-mixin-compiled/dashboards/loki-bloom-build.json
@@ -36,29 +36,6 @@
"title": "Overview",
"type": "row"
},
- {
- "gridPos": {
- "h": 8,
- "w": 14,
- "x": 0,
- "y": 1
- },
- "id": 35,
- "options": {
- "code": {
- "language": "plaintext",
- "showLineNumbers": false,
- "showMiniMap": false
- },
- "content": "## About the Bloom Compactor\nThe compactor iterates through chunks and creates blooms out of them.\nThe size of the resulting blooms depends on the bloom filter settings, the tokenizer settings, the number of ring tokens per compactor and the total number opf compactors.\n\nCompactors are horizontally scalable and uses a ring to:\n- Shard tenants\n- Shard series fingerprints within a tenant subring.\n\nThe blooms for the series are grouped together in blocks which are flushed to object store.",
- "mode": "markdown"
- },
- "panels": [ ],
- "pluginVersion": "11.1.0-70005",
- "targets": [ ],
- "transparent": true,
- "type": "text"
- },
{
"datasource": {
"type": "prometheus",
@@ -77,6 +54,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 15,
"gradientMode": "none",
@@ -121,13 +99,30 @@
},
"unit": "percentunit"
},
- "overrides": [ ]
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byRegexp",
+ "options": "/(Planned|success|failure)/"
+ },
+ "properties": [
+ {
+ "id": "unit",
+ "value": "none"
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ }
+ ]
+ }
+ ]
},
"gridPos": {
"h": 7,
"w": 12,
"x": 0,
- "y": 9
+ "y": 1
},
"id": 42,
"options": {
@@ -144,7 +139,7 @@
}
},
"panels": [ ],
- "pluginVersion": "11.1.0-69868",
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -152,41 +147,15 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum(loki_bloomcompactor_progress{cluster=~\"$cluster\", job=~\"$namespace/bloom-compactor\"})\n/\nsum(count(loki_bloomcompactor_progress{cluster=~\"$cluster\", job=~\"$namespace/bloom-compactor\"}))",
- "hide": false,
- "instant": false,
- "legendFormat": "avg",
- "range": true,
- "refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "quantile(\n 0.9, \n sum by (pod) (\n loki_bloomcompactor_progress{cluster=~\"$cluster\", job=~\"$namespace/bloom-compactor\"}\n )\n)",
- "hide": false,
- "instant": false,
- "legendFormat": "p90",
- "range": true,
- "refId": "B"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "quantile(\n 0.1, \n sum by (pod) (\n loki_bloomcompactor_progress{cluster=~\"$cluster\", job=~\"$namespace/bloom-compactor\"}\n )\n)",
+ "expr": "sum(loki_bloomplanner_tenant_tasks_completed{cluster=~\"$cluster\", job=~\"$namespace/bloom-planner\"})\n/\nsum(loki_bloomplanner_tenant_tasks_planned{cluster=~\"$cluster\", job=~\"$namespace/bloom-planner\"})",
"hide": false,
"instant": false,
- "legendFormat": "p10",
+ "legendFormat": "Progress",
"range": true,
- "refId": "C"
+ "refId": "D"
}
],
- "title": "Progress",
+ "title": "Overall progress",
"type": "timeseries"
},
{
@@ -194,7 +163,7 @@
"type": "prometheus",
"uid": "${datasource}"
},
- "description": "Uncompressed size of chunks in a series VS the size of the blooms built.",
+ "description": "Cell-wide compaction progress. Should increase till completion throughout each compaction period.",
"fieldConfig": {
"defaults": {
"color": {
@@ -207,8 +176,9 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
- "fillOpacity": 15,
+ "fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -242,33 +212,29 @@
{
"color": "green",
"value": null
+ },
+ {
+ "color": "red",
+ "value": 80
}
]
},
- "unit": "bytes"
+ "unit": "percentunit"
},
"overrides": [
{
"matcher": {
- "id": "byName",
- "options": "Ratio"
+ "id": "byRegexp",
+ "options": "/(Planned|success|failure)/"
},
"properties": [
{
"id": "unit",
- "value": "percentunit"
- }
- ]
- },
- {
- "matcher": {
- "id": "byName",
- "options": "Ratio over range"
- },
- "properties": [
+ "value": "none"
+ },
{
- "id": "unit",
- "value": "percentunit"
+ "id": "custom.fillOpacity",
+ "value": 0
}
]
}
@@ -278,9 +244,9 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 9
+ "y": 1
},
- "id": 41,
+ "id": 116,
"options": {
"legend": {
"calcs": [ ],
@@ -295,7 +261,7 @@
}
},
"panels": [ ],
- "pluginVersion": "11.1.0-69868",
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -303,54 +269,158 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum(rate(loki_bloom_size_sum{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval]))",
+ "expr": "sum by (tenant) (loki_bloomplanner_tenant_tasks_completed{cluster=~\"$cluster\", job=~\"$namespace/bloom-planner\"})\n/\nsum by (tenant) (loki_bloomplanner_tenant_tasks_planned{cluster=~\"$cluster\", job=~\"$namespace/bloom-planner\"})",
"hide": false,
"instant": false,
- "legendFormat": "Bloom",
+ "legendFormat": "{{tenant}}",
"range": true,
- "refId": "A"
- },
+ "refId": "D"
+ }
+ ],
+ "title": "Progress by tenant",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "loki",
+ "uid": "${loki_datasource}"
+ },
+ "description": "Blooms size vs uncompressed chunk size.",
+ "fieldConfig": {
+ "defaults": { },
+ "overrides": [ ]
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 17,
+ "x": 0,
+ "y": 8
+ },
+ "id": 51,
+ "options": {
+ "dedupStrategy": "none",
+ "enableLogDetails": true,
+ "prettifyLogMessage": false,
+ "showCommonLabels": false,
+ "showLabels": false,
+ "showTime": false,
+ "sortOrder": "Descending",
+ "wrapLogMessage": false
+ },
+ "panels": [ ],
+ "pluginVersion": "11.4.0-77663",
+ "targets": [
{
"datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
+ "type": "loki",
+ "uid": "${loki_datasource}"
},
"editorMode": "code",
- "expr": "sum(rate(loki_bloomcompactor_chunk_series_size_sum{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval]))",
- "hide": false,
- "instant": false,
- "legendFormat": "Chunk",
- "range": true,
+ "expr": "{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"} |= \"level=error\" |= \"component=bloom-planner\"",
+ "queryType": "range",
"refId": "B"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
+ }
+ ],
+ "title": "Errors Planner",
+ "type": "logs"
+ },
+ {
+ "datasource": {
+ "type": "loki",
+ "uid": "${loki_datasource}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "fixedColor": "red",
+ "mode": "fixed"
},
- "editorMode": "code",
- "expr": "sum(rate(loki_bloom_size_sum{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval]))\n/\nsum(rate(loki_bloomcompactor_chunk_series_size_sum{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval]))",
- "hide": false,
- "instant": false,
- "legendFormat": "Ratio",
- "range": true,
- "refId": "C"
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
+ "drawStyle": "bars",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 3,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 1
+ }
+ ]
+ }
+ },
+ "overrides": [ ]
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 7,
+ "x": 17,
+ "y": 8
+ },
+ "id": 53,
+ "options": {
+ "legend": {
+ "calcs": [ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
},
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "panels": [ ],
+ "pluginVersion": "11.4.0-77663",
+ "targets": [
{
"datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
+ "type": "loki",
+ "uid": "${loki_datasource}"
},
"editorMode": "code",
- "expr": "sum(rate(loki_bloom_size_sum{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval]))\n/\nsum(rate(loki_bloomcompactor_chunk_series_size_sum{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval]))",
- "hide": false,
- "instant": false,
- "legendFormat": "Ratio over range",
- "range": true,
- "refId": "D"
+ "expr": "sum(count_over_time({cluster=\"$cluster\", job=\"$namespace/bloom-planner\"} |= \"level=error\" |= \"component=bloom-planner\" [$__auto]))",
+ "legendFormat": "Error rate",
+ "queryType": "range",
+ "refId": "A"
}
],
- "title": "Chunks and Bloom size",
+ "title": "Errors Rate Planner",
"type": "timeseries"
},
{
@@ -359,13 +429,17 @@
"uid": "${loki_datasource}"
},
"description": "Blooms size vs uncompressed chunk size.",
+ "fieldConfig": {
+ "defaults": { },
+ "overrides": [ ]
+ },
"gridPos": {
"h": 7,
"w": 17,
"x": 0,
- "y": 16
+ "y": 15
},
- "id": 51,
+ "id": 133,
"options": {
"dedupStrategy": "none",
"enableLogDetails": true,
@@ -377,7 +451,7 @@
"wrapLogMessage": false
},
"panels": [ ],
- "pluginVersion": "11.1.0-69868",
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -385,12 +459,12 @@
"uid": "${loki_datasource}"
},
"editorMode": "code",
- "expr": "{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"} |= \"level=error\" |= \"component=bloom-compactor\"",
+ "expr": "{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"} |= \"level=error\" |= \"component=bloom-builder\"",
"queryType": "range",
"refId": "B"
}
],
- "title": "Errors",
+ "title": "Errors Builder",
"type": "logs"
},
{
@@ -411,6 +485,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "bars",
"fillOpacity": 0,
"gradientMode": "none",
@@ -457,9 +532,9 @@
"h": 7,
"w": 7,
"x": 17,
- "y": 16
+ "y": 15
},
- "id": 53,
+ "id": 134,
"options": {
"legend": {
"calcs": [ ],
@@ -474,7 +549,7 @@
}
},
"panels": [ ],
- "pluginVersion": "11.1.0-69868",
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -482,12 +557,13 @@
"uid": "${loki_datasource}"
},
"editorMode": "code",
- "expr": "sum(count_over_time({cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"} |= \"level=error\" |= \"component=bloom-compactor\" [$__auto]))",
+ "expr": "sum(count_over_time({cluster=\"$cluster\", job=\"$namespace/bloom-builder\"} |= \"level=error\" |= \"component=bloom-builder\" [$__auto]))",
+ "legendFormat": "Error rate",
"queryType": "range",
"refId": "A"
}
],
- "title": "Errors Rate",
+ "title": "Errors Rate Builder",
"type": "timeseries"
},
{
@@ -496,7 +572,7 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 23
+ "y": 22
},
"id": 112,
"panels": [
@@ -517,9 +593,11 @@
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
+ "axisSoftMin": 0,
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
- "fillOpacity": 15,
+ "fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -551,7 +629,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -559,47 +638,1093 @@
}
]
},
- "unit": "percentunit"
- },
- "overrides": [ ]
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 0,
- "y": 24
- },
- "id": 114,
- "options": {
- "legend": {
- "calcs": [ ],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
+ "unit": "none"
},
- "tooltip": {
- "maxHeight": 600,
- "mode": "single",
- "sort": "none"
- }
- },
- "panels": [ ],
- "targets": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "sum by (pod) (\n loki_bloomcompactor_progress{cluster=~\"$cluster\", job=~\"$namespace/bloom-compactor\"}\n)",
- "hide": false,
- "instant": false,
- "legendFormat": "__auto",
- "range": true,
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byRegexp",
+ "options": "/(success|failure)/"
+ },
+ "properties": [
+ {
+ "id": "custom.fillOpacity",
+ "value": 100
+ },
+ {
+ "id": "custom.stacking",
+ "value": {
+ "group": "A",
+ "mode": "normal"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Planned"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "yellow",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 15
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Completed - success"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "green",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Completed - failure"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "red",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Queued"
+ },
+ "properties": [
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ },
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "blue",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 64
+ },
+ "id": 125,
+ "options": {
+ "legend": {
+ "calcs": [ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "panels": [ ],
+ "pluginVersion": "11.4.0-77663",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "sum(loki_bloomplanner_tenant_tasks_planned{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"}) > 0",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Planned",
+ "range": true,
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "sum by (status) (loki_bloomplanner_tenant_tasks_completed{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"}) > 0",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Completed - {{status}}",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "loki_bloomplanner_inflight_tasks{cluster=\"$cluster\", job=\"$namespace/bloom-planner\", quantile=\"0.95\"}",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "inflight p95",
+ "range": true,
+ "refId": "C"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "sum(loki_bloomplanner_queue_length{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"}) > 0",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Queued",
+ "range": true,
+ "refId": "D"
+ }
+ ],
+ "title": "Tasks",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byRegexp",
+ "options": "/(success|failure)/"
+ },
+ "properties": [
+ {
+ "id": "custom.fillOpacity",
+ "value": 100
+ },
+ {
+ "id": "custom.stacking",
+ "value": {
+ "group": "A",
+ "mode": "normal"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Planned"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "yellow",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 15
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Completed - success"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "green",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Completed - failure"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "red",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Connected builders"
+ },
+ "properties": [
+ {
+ "id": "custom.axisPlacement",
+ "value": "right"
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "IDLE Builders"
+ },
+ "properties": [
+ {
+ "id": "custom.axisPlacement",
+ "value": "right"
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Builders processing task"
+ },
+ "properties": [
+ {
+ "id": "custom.axisPlacement",
+ "value": "right"
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 64
+ },
+ "id": 126,
+ "options": {
+ "legend": {
+ "calcs": [ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "panels": [ ],
+ "pluginVersion": "11.4.0-77663",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "sum(loki_bloomplanner_connected_builders{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"})",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Connected builders",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "count(loki_bloombuilder_processing_task{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"} > 0)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Builders processing task",
+ "range": true,
+ "refId": "C"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "count(loki_bloombuilder_processing_task{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"} == 0)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "IDLE Builders",
+ "range": true,
+ "refId": "D"
+ }
+ ],
+ "title": "Tasks per builder",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 72
+ },
+ "id": 81,
+ "options": {
+ "legend": {
+ "calcs": [ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "panels": [ ],
+ "pluginVersion": "11.4.0-77663",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "# series checked per compaction\nhistogram_quantile(\n 0.99, \n sum by (le) (\n rate(loki_bloombuilder_series_per_task_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__rate_interval])\n )\n)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "p99",
+ "range": true,
+ "refId": "C"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "# series checked per compaction\nhistogram_quantile(\n 0.9, \n sum by (le) (\n rate(loki_bloombuilder_series_per_task_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__rate_interval])\n )\n)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "p90",
+ "range": true,
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "# series checked per compaction\nhistogram_quantile(\n 0.75, \n sum by (le) (\n rate(loki_bloombuilder_series_per_task_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__rate_interval])\n )\n)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "p75",
+ "range": true,
+ "refId": "D"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "# series checked per compaction\nhistogram_quantile(\n 0.5, \n sum by (le) (\n rate(loki_bloombuilder_series_per_task_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__rate_interval])\n )\n)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "p50",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Series per task (includes series copied from other blocks)",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": [ ]
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 12,
+ "y": 72
+ },
+ "id": 91,
+ "options": {
+ "legend": {
+ "calcs": [ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "panels": [ ],
+ "pluginVersion": "11.4.0-77663",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "# series checked per compaction\nhistogram_quantile(\n 0.99, \n sum by (le) (\n rate(loki_bloombuilder_bytes_per_task_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__rate_interval])\n )\n)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "p99",
+ "range": true,
+ "refId": "C"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "# series checked per compaction\nhistogram_quantile(\n 0.9, \n sum by (le) (\n rate(loki_bloombuilder_bytes_per_task_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__rate_interval])\n )\n)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "p90",
+ "range": true,
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "# series checked per compaction\nhistogram_quantile(\n 0.5, \n sum by (le) (\n rate(loki_bloombuilder_bytes_per_task_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__rate_interval])\n )\n)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "p50",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Number of bytes from chunks added to blocks during each compaction.",
+ "type": "timeseries"
+ },
+ {
+ "fieldConfig": {
+ "defaults": { },
+ "overrides": [ ]
+ },
+ "gridPos": {
+ "h": 2,
+ "w": 24,
+ "x": 0,
+ "y": 79
+ },
+ "id": 117,
+ "options": {
+ "code": {
+ "language": "plaintext",
+ "showLineNumbers": false,
+ "showMiniMap": false
+ },
+ "content": "Identify the tenant using the **_Progress by tenant_** panel from the overview and set tenant variable",
+ "mode": "markdown"
+ },
+ "panels": [ ],
+ "pluginVersion": "11.4.0-77663",
+ "targets": [ ],
+ "title": "Tip",
+ "transparent": true,
+ "type": "text"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "axisSoftMin": 0,
+ "barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byRegexp",
+ "options": "/(success|failure)/"
+ },
+ "properties": [
+ {
+ "id": "custom.fillOpacity",
+ "value": 100
+ },
+ {
+ "id": "custom.stacking",
+ "value": {
+ "group": "A",
+ "mode": "normal"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Planned"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "yellow",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 15
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Completed - success"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "green",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Completed - failure"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "red",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Queued"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "blue",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 81
+ },
+ "id": 114,
+ "options": {
+ "legend": {
+ "calcs": [ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "panels": [ ],
+ "pluginVersion": "11.4.0-77663",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "sum(loki_bloomplanner_tenant_tasks_planned{cluster=\"$cluster\", job=\"$namespace/bloom-planner\", tenant=\"$tenant\"}) > 0",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Planned",
+ "range": true,
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "sum by (status) (loki_bloomplanner_tenant_tasks_completed{cluster=\"$cluster\", job=\"$namespace/bloom-planner\", tenant=\"$tenant\"}) > 0",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Completed - {{status}}",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "sum(loki_bloomplanner_queue_length{cluster=\"$cluster\", job=\"$namespace/bloom-planner\", user=\"$tenant\"}) > 0",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Queued",
+ "range": true,
+ "refId": "C"
+ }
+ ],
+ "title": "Tasks per tenant",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "loki",
+ "uid": "${loki_datasource}"
+ },
+ "fieldConfig": {
+ "defaults": { },
+ "overrides": [ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 81
+ },
+ "id": 115,
+ "options": {
+ "dedupStrategy": "none",
+ "enableLogDetails": true,
+ "prettifyLogMessage": false,
+ "showCommonLabels": false,
+ "showLabels": false,
+ "showTime": false,
+ "sortOrder": "Descending",
+ "wrapLogMessage": false
+ },
+ "panels": [ ],
+ "pluginVersion": "11.4.0-77663",
+ "targets": [
+ {
+ "datasource": {
+ "type": "loki",
+ "uid": "${loki_datasource}"
+ },
+ "editorMode": "code",
+ "expr": "{cluster=~\"$cluster\", job=~\"$namespace/bloom-planner\"}\n|= \"level=error\"\n|= \"tenant=$tenant\"",
+ "queryType": "range",
+ "refId": "B"
+ }
+ ],
+ "title": "Tenant errors",
+ "type": "logs"
+ }
+ ],
+ "targets": [ ],
+ "title": "Tasks",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 23
+ },
+ "id": 95,
+ "panels": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "description": "How many tokens each builder is appending to blooms. Accounts for tokens that are not actually added to the blooms since they are already there. See the panel on the right for a drill down on the collision.\n",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "log": 2,
+ "type": "log"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 90
+ },
+ "id": 96,
+ "options": {
+ "legend": {
+ "calcs": [ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "panels": [ ],
+ "pluginVersion": "11.4.0-77663",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "sum(rate(loki_bloom_tokens_total{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__rate_interval]))\n/\nsum(count(loki_bloom_tokens_total{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}))",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Per core",
+ "range": true,
"refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "sum(rate(loki_bloom_inserts_total{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__rate_interval]))",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Total",
+ "range": true,
+ "refId": "C"
}
],
- "title": "Progress per pod",
+ "title": "Tokens rate",
"type": "timeseries"
},
{
@@ -607,23 +1732,56 @@
"type": "prometheus",
"uid": "${datasource}"
},
+ "description": "Collision type may be `false` (no collision), `cache` (found in token cache) or true (found in bloom filter).\n\nType may be either `raw` (the original ngram) or `chunk_prefixed` (the ngram with the chunk prefix)",
"fieldConfig": {
"defaults": {
- "fieldMinMax": false,
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
"mappings": [ ],
"thresholds": {
- "mode": "percentage",
+ "mode": "absolute",
"steps": [
{
- "color": "green"
- },
- {
- "color": "#EAB839",
- "value": 0
+ "color": "green",
+ "value": null
},
{
- "color": "green",
- "value": 100
+ "color": "red",
+ "value": 80
}
]
},
@@ -635,26 +1793,24 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 24
+ "y": 90
},
- "id": 115,
+ "id": 97,
"options": {
- "minVizHeight": 75,
- "minVizWidth": 75,
- "orientation": "auto",
- "reduceOptions": {
- "calcs": [
- "lastNotNull"
- ],
- "fields": "",
- "values": false
+ "legend": {
+ "calcs": [ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
},
- "showThresholdLabels": false,
- "showThresholdMarkers": false,
- "sizing": "auto"
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
},
"panels": [ ],
- "pluginVersion": "11.0.0-68102",
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -662,7 +1818,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by (pod) (\n loki_bloomcompactor_progress{cluster=~\"$cluster\", job=~\"$namespace/bloom-compactor\"}\n)",
+ "expr": "# tokens/s by type+collision\nsum by (collision) (\n rate(loki_bloom_inserts_total{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__rate_interval])\n) \n/ on () group_left\nsum (\n rate(loki_bloom_inserts_total{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__rate_interval])\n)",
"hide": false,
"instant": false,
"legendFormat": "__auto",
@@ -670,55 +1826,15 @@
"refId": "B"
}
],
- "title": "Current Progress per pod",
- "type": "gauge"
- }
- ],
- "targets": [ ],
- "title": "Progress per pod",
- "type": "row"
- },
- {
- "collapsed": true,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 24
- },
- "id": 56,
- "panels": [
- {
- "description": "",
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 25
- },
- "id": 85,
- "options": {
- "code": {
- "language": "plaintext",
- "showLineNumbers": false,
- "showMiniMap": false
- },
- "content": "",
- "mode": "markdown"
- },
- "panels": [ ],
- "pluginVersion": "11.1.0-70005",
- "targets": [ ],
- "title": "We use tenant sharding so each compactor will process a subset of the tenants.",
- "transparent": true,
- "type": "text"
+ "title": "tokens/s by collision type",
+ "type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
- "description": "Shows the expected number of cpu cores we need to provision to build blooms as fast as we ingest data so a compaction iteration doesn't take longer than the compaction interval.\n\nWe may decide to have more to speed up compaction.",
+ "description": "The sizes of the blooms created by the compactor. We build one bloom per series. The more unique ngrams and chunks the series has, the bigger their blooms will be.",
"fieldConfig": {
"defaults": {
"color": {
@@ -731,6 +1847,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -744,7 +1861,8 @@
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
- "type": "linear"
+ "log": 2,
+ "type": "log"
},
"showPoints": "auto",
"spanNulls": false,
@@ -763,19 +1881,24 @@
{
"color": "green",
"value": null
+ },
+ {
+ "color": "red",
+ "value": 80
}
]
- }
+ },
+ "unit": "bytes"
},
"overrides": [ ]
},
"gridPos": {
- "h": 7,
+ "h": 8,
"w": 12,
"x": 0,
- "y": 26
+ "y": 98
},
- "id": 94,
+ "id": 98,
"options": {
"legend": {
"calcs": [ ],
@@ -790,7 +1913,7 @@
}
},
"panels": [ ],
- "pluginVersion": "11.1.0-69868",
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -798,12 +1921,12 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "# This query shows the expected number of cpu cores we need to not fall behind\n# building blooms for data we're ingesting.\n# conceptually, the formula is:\n# (cell_bytes * space_amplification / bloom_bytes_processed_per_core)\n\n# number of replicas needed\nsum(avg_over_time(loki_cell:bytes:rate1m{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval]))\n*\n## Space amplification (how much data do we write compared to what we ingest?)\n(\n # rep factor\n 3 *\n sum(\n # 1 - dedupe_ratio\n 1 - \n sum(rate(loki_chunk_store_deduped_chunks_total{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (cluster, namespace)\n /\n sum(rate(loki_ingester_chunks_flushed_total{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (cluster, namespace)\n )\n)\n/\n(\nsum(rate(loki_bloomcompactor_chunk_series_size_sum{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval]))\n/\nsum(rate(container_cpu_usage_seconds_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval]))\n)",
+ "expr": "histogram_quantile(\n 1.0,\n sum by (le) (\n rate(loki_bloom_size_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__rate_interval])\n )\n)",
"hide": false,
"instant": false,
- "legendFormat": "Needed",
+ "legendFormat": "max",
"range": true,
- "refId": "B"
+ "refId": "D"
},
{
"datasource": {
@@ -811,15 +1934,28 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum(rate(container_cpu_usage_seconds_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval]))",
+ "expr": "histogram_quantile(\n 0.99, \n sum by (le) (\n rate(loki_bloom_size_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__rate_interval])\n )\n)",
"hide": false,
"instant": false,
- "legendFormat": "Available",
+ "legendFormat": "p99",
"range": true,
- "refId": "A"
+ "refId": "E"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(\n 0.50, \n sum by (le) (\n rate(loki_bloom_size_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__rate_interval])\n )\n)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "p50",
+ "range": true,
+ "refId": "F"
}
],
- "title": "Required CPUs to not lag behind",
+ "title": "Bloom size",
"type": "timeseries"
},
{
@@ -827,7 +1963,7 @@
"type": "prometheus",
"uid": "${datasource}"
},
- "description": "",
+ "description": "How many chunks are we indexing in the blooms. Either:\n- `copied` from a pre-existing bloom block, or \n- `iterated` through all its entries if processed for the first time.",
"fieldConfig": {
"defaults": {
"color": {
@@ -840,8 +1976,9 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
- "fillOpacity": 15,
+ "fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -850,9 +1987,6 @@
},
"insertNulls": false,
"lineInterpolation": "linear",
- "lineStyle": {
- "fill": "solid"
- },
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
@@ -882,17 +2016,17 @@
}
]
},
- "unit": "Bps"
+ "unit": "short"
},
"overrides": [ ]
},
"gridPos": {
- "h": 7,
+ "h": 8,
"w": 12,
"x": 12,
- "y": 26
+ "y": 98
},
- "id": 72,
+ "id": 99,
"options": {
"legend": {
"calcs": [ ],
@@ -907,7 +2041,7 @@
}
},
"panels": [ ],
- "pluginVersion": "11.1.0-69868",
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -915,35 +2049,38 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "# MB/s/core chunk data processed\nsum(rate(loki_bloomcompactor_chunk_series_size_sum{cluster=~\"$cluster\", job=~\"$namespace/bloom-compactor\"}[$__rate_interval])) by (pod)\n/\nsum(rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval])) by (pod)",
- "hide": true,
- "instant": false,
- "legendFormat": "{{pod}}",
- "range": true,
- "refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "# MB/s/core chunk data processed\nsum(rate(loki_bloomcompactor_chunk_series_size_sum{cluster=~\"$cluster\", job=~\"$namespace/bloom-compactor\"}[$__rate_interval]))\n/\nsum(rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval]))",
+ "expr": "# chunks indexed, by iteration or copied from a pre-existing bloom\nsum(rate(loki_bloom_chunks_indexed_total{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__rate_interval])) by (type)",
"hide": false,
"instant": false,
- "legendFormat": "Total",
+ "legendFormat": "__auto",
"range": true,
"refId": "B"
}
],
- "title": "MB/s per core",
+ "title": "Chunks indexed",
"type": "timeseries"
- },
+ }
+ ],
+ "targets": [ ],
+ "title": "Bloom building",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 24
+ },
+ "id": 56,
+ "panels": [
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
+ "description": "Shows the expected number of cpu cores we need to provision to build blooms as fast as we ingest data so a build iteration doesn't take longer than the build interval.\n\nWe may decide to have more to speed up building blooms.",
"fieldConfig": {
"defaults": {
"color": {
@@ -956,6 +2093,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -998,9 +2136,9 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 33
+ "y": 2030
},
- "id": 1,
+ "id": 94,
"options": {
"legend": {
"calcs": [ ],
@@ -1015,7 +2153,7 @@
}
},
"panels": [ ],
- "pluginVersion": "11.1.0-69868",
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -1023,10 +2161,10 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-compactor\", resource=\"cpu\"} > 0)",
+ "expr": "# This query shows the expected number of cpu cores we need to not fall behind\n# building blooms for data we're ingesting.\n# conceptually, the formula is:\n# (cell_bytes * space_amplification / bloom_bytes_processed_per_core)\n\n# number of replicas needed\nsum(avg_over_time(loki_cell:bytes:rate1m{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval]))\n*\n## Space amplification (how much data do we write compared to what we ingest?)\n(\n # rep factor\n 3 *\n sum(\n # 1 - dedupe_ratio\n 1 - \n sum(rate(loki_chunk_store_deduped_chunks_total{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (cluster, namespace)\n /\n sum(rate(loki_ingester_chunks_flushed_total{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (cluster, namespace)\n )\n)\n/\n(\nsum(rate(loki_bloombuilder_chunk_series_size_sum{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-builder\"}[$__rate_interval]))\n/\nsum(rate(container_cpu_usage_seconds_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-builder\"}[$__rate_interval]))\n)",
"hide": false,
"instant": false,
- "legendFormat": "Request",
+ "legendFormat": "Needed",
"range": true,
"refId": "B"
},
@@ -1036,22 +2174,10 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "min(kube_pod_container_resource_limits{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-compactor\", resource=\"cpu\"} > 0)",
+ "expr": "sum(rate(container_cpu_usage_seconds_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-builder\"}[$__rate_interval]))",
"hide": false,
"instant": false,
- "legendFormat": "Limit",
- "range": true,
- "refId": "C"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "quantile(\n 0.99,\n rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-compactor\"}[$__rate_interval])\n)",
- "instant": false,
- "legendFormat": "p99",
+ "legendFormat": "Available",
"range": true,
"refId": "A"
},
@@ -1061,28 +2187,15 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "quantile(\n 0.50,\n rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-compactor\"}[$__rate_interval])\n)",
- "hide": false,
- "instant": false,
- "legendFormat": "p50",
- "range": true,
- "refId": "D"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "avg(\n rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-compactor\"}[$__rate_interval])\n)",
+ "expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\", resource=\"cpu\"} > 0)\n*\ncount(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\", resource=\"cpu\"} > 0)",
"hide": false,
"instant": false,
- "legendFormat": "Avg",
+ "legendFormat": "Provisioned",
"range": true,
- "refId": "E"
+ "refId": "C"
}
],
- "title": "CPU",
+ "title": "Required CPUs to not lag behind",
"type": "timeseries"
},
{
@@ -1090,6 +2203,7 @@
"type": "prometheus",
"uid": "${datasource}"
},
+ "description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -1102,8 +2216,9 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
- "fillOpacity": 0,
+ "fillOpacity": 15,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -1112,6 +2227,9 @@
},
"insertNulls": false,
"lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
@@ -1140,7 +2258,8 @@
"value": 80
}
]
- }
+ },
+ "unit": "Bps"
},
"overrides": [ ]
},
@@ -1148,9 +2267,9 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 33
+ "y": 2030
},
- "id": 75,
+ "id": 72,
"options": {
"legend": {
"calcs": [ ],
@@ -1165,7 +2284,7 @@
}
},
"panels": [ ],
- "pluginVersion": "11.1.0-69868",
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -1173,40 +2292,15 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by(pod) (rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-compactor\"}[$__rate_interval]))",
- "instant": false,
- "legendFormat": "{{pod}}",
- "range": true,
- "refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-compactor\", resource=\"cpu\"} > 0)",
+ "expr": "# MB/s/core chunk data processed\nsum(rate(loki_bloombuilder_chunk_series_size_sum{cluster=~\"$cluster\", job=~\"$namespace/bloom-builder\"}[$__rate_interval]))\n/\nsum(rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"bloom-builder\"}[$__rate_interval]))",
"hide": false,
"instant": false,
- "legendFormat": "Request",
+ "legendFormat": "Total",
"range": true,
"refId": "B"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "min(kube_pod_container_resource_limits{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-compactor\", resource=\"cpu\"} > 0)",
- "hide": false,
- "instant": false,
- "legendFormat": "Limit",
- "range": true,
- "refId": "C"
}
],
- "title": "CPU per pod",
+ "title": "MB/s per core",
"type": "timeseries"
},
{
@@ -1226,6 +2320,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -1258,14 +2353,9 @@
{
"color": "green",
"value": null
- },
- {
- "color": "red",
- "value": 80
}
]
- },
- "unit": "bytes"
+ }
},
"overrides": [ ]
},
@@ -1273,9 +2363,9 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 40
+ "y": 2037
},
- "id": 76,
+ "id": 1,
"options": {
"legend": {
"calcs": [ ],
@@ -1290,7 +2380,7 @@
}
},
"panels": [ ],
- "pluginVersion": "11.1.0-69868",
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -1298,7 +2388,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-compactor\", resource=\"memory\"} > 0)",
+ "expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\", resource=\"cpu\"} > 0)",
"hide": false,
"instant": false,
"legendFormat": "Request",
@@ -1311,7 +2401,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "min(container_spec_memory_limit_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-compactor\"} > 0)",
+ "expr": "min(kube_pod_container_resource_limits{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\", resource=\"cpu\"} > 0)",
"hide": false,
"instant": false,
"legendFormat": "Limit",
@@ -1324,7 +2414,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "quantile (\n 0.99,\n container_memory_working_set_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-compactor\"}\n)",
+ "expr": "quantile(\n 0.99,\n rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\"}[$__rate_interval])\n)",
"instant": false,
"legendFormat": "p99",
"range": true,
@@ -1336,7 +2426,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "quantile (\n 0.50,\n container_memory_working_set_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-compactor\"}\n)",
+ "expr": "quantile(\n 0.50,\n rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\"}[$__rate_interval])\n)",
"hide": false,
"instant": false,
"legendFormat": "p50",
@@ -1349,15 +2439,28 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "avg (\n container_memory_working_set_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-compactor\"}\n)",
+ "expr": "avg(\n rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\"}[$__rate_interval])\n)",
"hide": false,
"instant": false,
"legendFormat": "Avg",
"range": true,
"refId": "E"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "max(\n rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\"}[$__rate_interval])\n)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Max",
+ "range": true,
+ "refId": "F"
}
],
- "title": "Memory (workingset)",
+ "title": "CPU",
"type": "timeseries"
},
{
@@ -1377,6 +2480,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -1415,43 +2519,17 @@
"value": 80
}
]
- },
- "unit": "bytes"
- },
- "overrides": [
- {
- "__systemRef": "hideSeriesFrom",
- "matcher": {
- "id": "byNames",
- "options": {
- "mode": "exclude",
- "names": [
- "bloom-compactor-106"
- ],
- "prefix": "All except:",
- "readOnly": true
- }
- },
- "properties": [
- {
- "id": "custom.hideFrom",
- "value": {
- "legend": false,
- "tooltip": false,
- "viz": true
- }
- }
- ]
}
- ]
+ },
+ "overrides": [ ]
},
"gridPos": {
"h": 7,
"w": 12,
"x": 12,
- "y": 40
+ "y": 2037
},
- "id": 5,
+ "id": 75,
"options": {
"legend": {
"calcs": [ ],
@@ -1466,7 +2544,7 @@
}
},
"panels": [ ],
- "pluginVersion": "11.1.0-69868",
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -1474,7 +2552,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "max by(pod) (container_memory_working_set_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-compactor\"})",
+ "expr": "sum by(pod) (rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\"}[$__rate_interval]))",
"instant": false,
"legendFormat": "{{pod}}",
"range": true,
@@ -1486,7 +2564,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-compactor\", resource=\"memory\"} > 0)",
+ "expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\", resource=\"cpu\"} > 0)",
"hide": false,
"instant": false,
"legendFormat": "Request",
@@ -1499,7 +2577,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "min(container_spec_memory_limit_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-compactor\"} > 0)",
+ "expr": "min(kube_pod_container_resource_limits{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\", resource=\"cpu\"} > 0)",
"hide": false,
"instant": false,
"legendFormat": "Limit",
@@ -1507,7 +2585,7 @@
"refId": "C"
}
],
- "title": "Memory per pod (workingset)",
+ "title": "CPU per pod",
"type": "timeseries"
},
{
@@ -1515,7 +2593,6 @@
"type": "prometheus",
"uid": "${datasource}"
},
- "description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -1528,8 +2605,9 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
- "fillOpacity": 15,
+ "fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -1538,9 +2616,6 @@
},
"insertNulls": false,
"lineInterpolation": "linear",
- "lineStyle": {
- "fill": "solid"
- },
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
@@ -1570,7 +2645,7 @@
}
]
},
- "unit": "none"
+ "unit": "bytes"
},
"overrides": [ ]
},
@@ -1578,9 +2653,9 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 47
+ "y": 2044
},
- "id": 27,
+ "id": 76,
"options": {
"legend": {
"calcs": [ ],
@@ -1595,7 +2670,7 @@
}
},
"panels": [ ],
- "pluginVersion": "11.1.0-69868",
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -1603,140 +2678,86 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum (\n increase(\n kube_pod_container_status_restarts_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[10m]\n )\n) > 0",
+ "expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\", resource=\"memory\"} > 0)",
+ "hide": false,
"instant": false,
- "legendFormat": "Restarts",
+ "legendFormat": "Request",
"range": true,
- "refId": "A"
- }
- ],
- "title": "Container restarts",
- "type": "timeseries"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "description": "",
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
},
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 15,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineStyle": {
- "fill": "solid"
- },
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
+ "editorMode": "code",
+ "expr": "min(container_spec_memory_limit_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\"} > 0)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Limit",
+ "range": true,
+ "refId": "C"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
},
- "mappings": [ ],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
+ "editorMode": "code",
+ "expr": "quantile (\n 0.99,\n container_memory_working_set_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\"}\n)",
+ "instant": false,
+ "legendFormat": "p99",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
},
- "unit": "none"
+ "editorMode": "code",
+ "expr": "quantile (\n 0.50,\n container_memory_working_set_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\"}\n)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "p50",
+ "range": true,
+ "refId": "D"
},
- "overrides": [ ]
- },
- "gridPos": {
- "h": 7,
- "w": 12,
- "x": 12,
- "y": 47
- },
- "id": 77,
- "options": {
- "legend": {
- "calcs": [ ],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "avg (\n container_memory_working_set_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\"}\n)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Avg",
+ "range": true,
+ "refId": "E"
},
- "tooltip": {
- "maxHeight": 600,
- "mode": "single",
- "sort": "none"
- }
- },
- "panels": [ ],
- "pluginVersion": "11.1.0-69868",
- "targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "(\n sum by (pod) (\n increase(\n kube_pod_container_status_restarts_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[10m]\n )\n )\n * on (pod) group_right\n max by (pod, reason) (\n kube_pod_container_status_last_terminated_reason{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}\n )\n) > 0",
+ "expr": "max (\n container_memory_working_set_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\"}\n)",
+ "hide": false,
"instant": false,
- "legendFormat": "{{reason}} / {{pod}}",
+ "legendFormat": "Max",
"range": true,
- "refId": "A"
+ "refId": "F"
}
],
- "title": "Container restarts reason per pod",
+ "title": "Memory (workingset)",
"type": "timeseries"
- }
- ],
- "targets": [ ],
- "title": "Resource Usage",
- "type": "row"
- },
- {
- "collapsed": true,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 25
- },
- "id": 95,
- "panels": [
+ },
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
- "description": "How many tokens each compactor is appending to blooms. Accounts for tokens that are not actually added to the blooms since they are already there. See the panel on the right for a drill down on the collision.\n",
"fieldConfig": {
"defaults": {
"color": {
@@ -1749,6 +2770,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -1762,8 +2784,7 @@
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
- "log": 2,
- "type": "log"
+ "type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
@@ -1788,17 +2809,18 @@
"value": 80
}
]
- }
+ },
+ "unit": "bytes"
},
"overrides": [ ]
},
"gridPos": {
- "h": 8,
+ "h": 7,
"w": 12,
- "x": 0,
- "y": 55
+ "x": 12,
+ "y": 2044
},
- "id": 96,
+ "id": 5,
"options": {
"legend": {
"calcs": [ ],
@@ -1813,7 +2835,7 @@
}
},
"panels": [ ],
- "pluginVersion": "11.1.0-69868",
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -1821,10 +2843,22 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "# tokens checked per pod, millions/s\nsum(rate(loki_bloom_tokens_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval]))\n/\nsum(count(loki_bloom_tokens_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}))\n/ 1e6",
+ "expr": "max by(pod) (container_memory_working_set_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\"})",
+ "instant": false,
+ "legendFormat": "{{pod}}",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\", resource=\"memory\"} > 0)",
"hide": false,
"instant": false,
- "legendFormat": "Per core",
+ "legendFormat": "Request",
"range": true,
"refId": "B"
},
@@ -1834,15 +2868,15 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum(rate(loki_bloom_inserts_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval])) / 1e6",
+ "expr": "min(container_spec_memory_limit_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\"} > 0)",
"hide": false,
"instant": false,
- "legendFormat": "Total",
+ "legendFormat": "Limit",
"range": true,
"refId": "C"
}
],
- "title": "Tokens rate (millions)",
+ "title": "Memory per pod (workingset)",
"type": "timeseries"
},
{
@@ -1850,7 +2884,7 @@
"type": "prometheus",
"uid": "${datasource}"
},
- "description": "Collision type may be `false` (no collision), `cache` (found in token cache) or true (found in bloom filter).\n\nType may be either `raw` (the original ngram) or `chunk_prefixed` (the ngram with the chunk prefix)",
+ "description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -1863,8 +2897,9 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
- "fillOpacity": 0,
+ "fillOpacity": 15,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -1873,6 +2908,9 @@
},
"insertNulls": false,
"lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
@@ -1902,17 +2940,17 @@
}
]
},
- "unit": "percentunit"
+ "unit": "none"
},
"overrides": [ ]
},
"gridPos": {
- "h": 8,
+ "h": 7,
"w": 12,
- "x": 12,
- "y": 55
+ "x": 0,
+ "y": 2051
},
- "id": 97,
+ "id": 27,
"options": {
"legend": {
"calcs": [ ],
@@ -1927,7 +2965,7 @@
}
},
"panels": [ ],
- "pluginVersion": "11.1.0-69868",
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -1935,15 +2973,14 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "# tokens/s by type+collision\nsum by (collision) (\n rate(loki_bloom_inserts_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval])\n) \n/ on () group_left\nsum (\n rate(loki_bloom_inserts_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval])\n)",
- "hide": false,
+ "expr": "sum (\n increase(\n kube_pod_container_status_restarts_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-builder\"}[10m]\n )\n) > 0",
"instant": false,
- "legendFormat": "__auto",
+ "legendFormat": "Restarts",
"range": true,
- "refId": "B"
+ "refId": "A"
}
],
- "title": "tokens/s by collision type",
+ "title": "Container restarts",
"type": "timeseries"
},
{
@@ -1951,7 +2988,7 @@
"type": "prometheus",
"uid": "${datasource}"
},
- "description": "The sizes of the blooms created by the compactor. We build one bloom per series. The more unique ngrams and chunks the series has, the bigger their blooms will be.",
+ "description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -1964,8 +3001,9 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
- "fillOpacity": 0,
+ "fillOpacity": 15,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -1974,6 +3012,9 @@
},
"insertNulls": false,
"lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
@@ -2003,17 +3044,17 @@
}
]
},
- "unit": "bytes"
+ "unit": "none"
},
"overrides": [ ]
},
"gridPos": {
- "h": 8,
+ "h": 7,
"w": 12,
- "x": 0,
- "y": 63
+ "x": 12,
+ "y": 2051
},
- "id": 98,
+ "id": 77,
"options": {
"legend": {
"calcs": [ ],
@@ -2028,7 +3069,7 @@
}
},
"panels": [ ],
- "pluginVersion": "11.1.0-69868",
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -2036,49 +3077,36 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(\n 0.99, \n sum by (le) (\n rate(loki_bloom_size_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval])\n )\n)",
- "hide": false,
- "instant": false,
- "legendFormat": "p99",
- "range": true,
- "refId": "D"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "histogram_quantile(\n 0.90, \n sum by (le) (\n rate(loki_bloom_size_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval])\n )\n)",
- "hide": false,
- "instant": false,
- "legendFormat": "p90",
- "range": true,
- "refId": "E"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "histogram_quantile(\n 0.50, \n sum by (le) (\n rate(loki_bloom_size_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval])\n )\n)",
- "hide": false,
+ "expr": "(\n sum by (pod) (\n increase(\n kube_pod_container_status_restarts_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-builder\"}[10m]\n )\n )\n * on (pod) group_right\n max by (pod, reason) (\n kube_pod_container_status_last_terminated_reason{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-builder\"}\n )\n) > 0",
"instant": false,
- "legendFormat": "p50",
+ "legendFormat": "{{reason}} / {{pod}}",
"range": true,
- "refId": "F"
+ "refId": "A"
}
],
- "title": "Bloom size",
+ "title": "Container restarts reason per pod",
"type": "timeseries"
- },
+ }
+ ],
+ "targets": [ ],
+ "title": "Builder Resource Usage",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 25
+ },
+ "id": 118,
+ "panels": [
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
- "description": "How many chunks are we indexing in the blooms. Either:\n- `copied` from a pre-existing bloom block, or \n- `iterated` through all its entries if processed for the first time.",
"fieldConfig": {
"defaults": {
"color": {
@@ -2091,6 +3119,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -2123,24 +3152,19 @@
{
"color": "green",
"value": null
- },
- {
- "color": "red",
- "value": 80
}
]
- },
- "unit": "none"
+ }
},
"overrides": [ ]
},
"gridPos": {
- "h": 8,
+ "h": 7,
"w": 12,
- "x": 12,
- "y": 63
+ "x": 0,
+ "y": 2302
},
- "id": 99,
+ "id": 119,
"options": {
"legend": {
"calcs": [ ],
@@ -2155,7 +3179,7 @@
}
},
"panels": [ ],
- "pluginVersion": "11.1.0-69868",
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -2163,32 +3187,68 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "# chunks indexed, by iteration or copied from a pre-existing bloom\nsum(rate(loki_bloom_chunks_indexed_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval])) by (type)",
+ "expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-planner\", resource=\"cpu\"} > 0)",
"hide": false,
"instant": false,
- "legendFormat": "__auto",
+ "legendFormat": "Request",
"range": true,
"refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "min(kube_pod_container_resource_limits{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-planner\", resource=\"cpu\"} > 0)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Limit",
+ "range": true,
+ "refId": "C"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "quantile(\n 0.99,\n rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-planner\"}[$__rate_interval])\n)",
+ "instant": false,
+ "legendFormat": "p99",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "quantile(\n 0.50,\n rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-planner\"}[$__rate_interval])\n)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "p50",
+ "range": true,
+ "refId": "D"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "avg(\n rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-planner\"}[$__rate_interval])\n)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Avg",
+ "range": true,
+ "refId": "E"
}
],
- "title": "Chunks indexed",
+ "title": "CPU",
"type": "timeseries"
- }
- ],
- "targets": [ ],
- "title": "Bloom building",
- "type": "row"
- },
- {
- "collapsed": true,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 26
- },
- "id": 103,
- "panels": [
+ },
{
"datasource": {
"type": "prometheus",
@@ -2206,6 +3266,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -2238,6 +3299,10 @@
{
"color": "green",
"value": null
+ },
+ {
+ "color": "red",
+ "value": 80
}
]
}
@@ -2247,10 +3312,10 @@
"gridPos": {
"h": 7,
"w": 12,
- "x": 0,
- "y": 72
+ "x": 12,
+ "y": 2302
},
- "id": 107,
+ "id": 120,
"options": {
"legend": {
"calcs": [ ],
@@ -2258,29 +3323,55 @@
"placement": "bottom",
"showLegend": true
},
- "tooltip": {
- "maxHeight": 600,
- "mode": "single",
- "sort": "none"
- }
- },
- "panels": [ ],
- "targets": [
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "panels": [ ],
+ "pluginVersion": "11.4.0-77663",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "sum by(pod) (rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-planner\"}[$__rate_interval]))",
+ "instant": false,
+ "legendFormat": "{{pod}}",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-planner\", resource=\"cpu\"} > 0)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Request",
+ "range": true,
+ "refId": "B"
+ },
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum(increase(loki_bloomcompactor_blocks_created_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval]))",
+ "expr": "min(kube_pod_container_resource_limits{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-planner\", resource=\"cpu\"} > 0)",
"hide": false,
"instant": false,
- "legendFormat": "Blocks",
+ "legendFormat": "Limit",
"range": true,
- "refId": "A"
+ "refId": "C"
}
],
- "title": "Created Blocks",
+ "title": "CPU per pod",
"type": "timeseries"
},
{
@@ -2288,7 +3379,6 @@
"type": "prometheus",
"uid": "${datasource}"
},
- "description": "Compactors delete metas and blocks marked for deletion in the metas tombstones.",
"fieldConfig": {
"defaults": {
"color": {
@@ -2301,6 +3391,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -2333,19 +3424,24 @@
{
"color": "green",
"value": null
+ },
+ {
+ "color": "red",
+ "value": 80
}
]
- }
+ },
+ "unit": "bytes"
},
"overrides": [ ]
},
"gridPos": {
"h": 7,
"w": 12,
- "x": 12,
- "y": 72
+ "x": 0,
+ "y": 2309
},
- "id": 106,
+ "id": 121,
"options": {
"legend": {
"calcs": [ ],
@@ -2360,6 +3456,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -2367,15 +3464,66 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum(increase(loki_bloomcompactor_blocks_deleted_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval]))",
+ "expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-planner\", resource=\"memory\"} > 0)",
"hide": false,
"instant": false,
- "legendFormat": "Blocks",
+ "legendFormat": "Request",
+ "range": true,
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "min(container_spec_memory_limit_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-planner\"} > 0)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Limit",
+ "range": true,
+ "refId": "C"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "quantile (\n 0.99,\n container_memory_working_set_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-planner\"}\n)",
+ "instant": false,
+ "legendFormat": "p99",
"range": true,
"refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "quantile (\n 0.50,\n container_memory_working_set_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\"}\n)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "p50",
+ "range": true,
+ "refId": "D"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "avg (\n container_memory_working_set_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\"}\n)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Avg",
+ "range": true,
+ "refId": "E"
}
],
- "title": "Deleted Blocks",
+ "title": "Memory (workingset)",
"type": "timeseries"
},
{
@@ -2383,7 +3531,6 @@
"type": "prometheus",
"uid": "${datasource}"
},
- "description": "Number of overlapping bloom blocks reused when creating new blocks\n",
"fieldConfig": {
"defaults": {
"color": {
@@ -2396,6 +3543,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -2428,19 +3576,24 @@
{
"color": "green",
"value": null
+ },
+ {
+ "color": "red",
+ "value": 80
}
]
- }
+ },
+ "unit": "bytes"
},
"overrides": [ ]
},
"gridPos": {
"h": 7,
"w": 12,
- "x": 0,
- "y": 79
+ "x": 12,
+ "y": 2309
},
- "id": 109,
+ "id": 122,
"options": {
"legend": {
"calcs": [ ],
@@ -2455,6 +3608,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -2462,37 +3616,48 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum(increase(loki_bloomcompactor_blocks_reused_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval]))",
- "hide": false,
+ "expr": "max by(pod) (container_memory_working_set_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-planner\"})",
"instant": false,
- "legendFormat": "Blocks",
+ "legendFormat": "{{pod}}",
"range": true,
"refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-planner\", resource=\"memory\"} > 0)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Request",
+ "range": true,
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "min(container_spec_memory_limit_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-planner\"} > 0)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Limit",
+ "range": true,
+ "refId": "C"
}
],
- "title": "Blocks reused",
+ "title": "Memory per pod (workingset)",
"type": "timeseries"
- }
- ],
- "targets": [ ],
- "title": "Blocks building",
- "type": "row"
- },
- {
- "collapsed": true,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 27
- },
- "id": 110,
- "panels": [
+ },
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
+ "description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -2505,8 +3670,9 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
- "fillOpacity": 0,
+ "fillOpacity": 15,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -2515,6 +3681,9 @@
},
"insertNulls": false,
"lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
@@ -2537,9 +3706,14 @@
{
"color": "green",
"value": null
+ },
+ {
+ "color": "red",
+ "value": 80
}
]
- }
+ },
+ "unit": "none"
},
"overrides": [ ]
},
@@ -2547,9 +3721,9 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 87
+ "y": 2316
},
- "id": 108,
+ "id": 123,
"options": {
"legend": {
"calcs": [ ],
@@ -2564,7 +3738,7 @@
}
},
"panels": [ ],
- "pluginVersion": "11.1.0-69868",
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -2572,15 +3746,14 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum(increase(loki_bloomcompactor_metas_created_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval]))",
- "hide": false,
+ "expr": "sum (\n increase(\n kube_pod_container_status_restarts_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-planner\"}[10m]\n )\n) > 0",
"instant": false,
- "legendFormat": "Metas",
+ "legendFormat": "Restarts",
"range": true,
"refId": "A"
}
],
- "title": "Created Metas",
+ "title": "Container restarts",
"type": "timeseries"
},
{
@@ -2588,7 +3761,7 @@
"type": "prometheus",
"uid": "${datasource}"
},
- "description": "Compactors delete metas and blocks marked for deletion in the metas tombstones.",
+ "description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -2601,8 +3774,9 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
- "fillOpacity": 0,
+ "fillOpacity": 15,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -2611,6 +3785,9 @@
},
"insertNulls": false,
"lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
@@ -2633,9 +3810,14 @@
{
"color": "green",
"value": null
+ },
+ {
+ "color": "red",
+ "value": 80
}
]
- }
+ },
+ "unit": "none"
},
"overrides": [ ]
},
@@ -2643,9 +3825,9 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 87
+ "y": 2316
},
- "id": 105,
+ "id": 124,
"options": {
"legend": {
"calcs": [ ],
@@ -2660,7 +3842,7 @@
}
},
"panels": [ ],
- "pluginVersion": "11.1.0-69868",
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -2668,20 +3850,19 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum(increase(loki_bloomcompactor_metas_deleted_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval]))",
- "hide": false,
+ "expr": "(\n sum by (pod) (\n increase(\n kube_pod_container_status_restarts_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-planner\"}[10m]\n )\n )\n * on (pod) group_right\n max by (pod, reason) (\n kube_pod_container_status_last_terminated_reason{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-planner\"}\n )\n) > 0",
"instant": false,
- "legendFormat": "Metas",
+ "legendFormat": "{{reason}} / {{pod}}",
"range": true,
"refId": "A"
}
],
- "title": "Deleted Metas",
+ "title": "Container restarts reason per pod",
"type": "timeseries"
}
],
"targets": [ ],
- "title": "Metas building",
+ "title": "Planner Resource Usage",
"type": "row"
},
{
@@ -2690,35 +3871,10 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 28
+ "y": 26
},
- "id": 80,
+ "id": 110,
"panels": [
- {
- "description": "",
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 95
- },
- "id": 93,
- "options": {
- "code": {
- "language": "plaintext",
- "showLineNumbers": false,
- "showMiniMap": false
- },
- "content": "",
- "mode": "markdown"
- },
- "panels": [ ],
- "pluginVersion": "11.1.0-70005",
- "targets": [ ],
- "title": "We use tenant sharding so each compactor will process a subset of the tenants.",
- "transparent": true,
- "type": "text"
- },
{
"datasource": {
"type": "prometheus",
@@ -2736,6 +3892,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -2776,11 +3933,11 @@
},
"gridPos": {
"h": 7,
- "w": 12,
+ "w": 9,
"x": 0,
- "y": 96
+ "y": 2497
},
- "id": 83,
+ "id": 108,
"options": {
"legend": {
"calcs": [ ],
@@ -2795,6 +3952,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -2802,48 +3960,92 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "quantile(\n 0.99,\n increase(\n loki_bloomcompactor_tenants_started_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval]\n )\n)",
- "hide": false,
- "instant": false,
- "legendFormat": "p99",
- "range": true,
- "refId": "C"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "quantile(\n 0.90,\n increase(\n loki_bloomcompactor_tenants_started_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval]\n )\n)",
+ "expr": "sum(increase(loki_bloombuilder_metas_created_total{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__rate_interval]))",
"hide": false,
"instant": false,
- "legendFormat": "p90",
+ "legendFormat": "Metas",
"range": true,
"refId": "A"
+ }
+ ],
+ "title": "Created Metas",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "mappings": [ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ }
+ },
+ "overrides": [ ]
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 3,
+ "x": 9,
+ "y": 2497
+ },
+ "id": 140,
+ "options": {
+ "colorMode": "value",
+ "graphMode": "area",
+ "justifyMode": "auto",
+ "orientation": "auto",
+ "percentChangeColorMode": "standard",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
},
+ "showPercentChange": false,
+ "textMode": "auto",
+ "wideLayout": true
+ },
+ "panels": [ ],
+ "pluginVersion": "11.4.0-77663",
+ "targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "quantile(\n 0.50,\n increase(\n loki_bloomcompactor_tenants_started_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[30m]\n )\n)",
+ "exemplar": false,
+ "expr": "sum(increase(loki_bloombuilder_metas_created_total{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__range]))",
+ "format": "table",
"hide": false,
- "instant": false,
- "legendFormat": "p50",
- "range": true,
- "refId": "B"
+ "instant": true,
+ "legendFormat": "Metas",
+ "range": false,
+ "refId": "A"
}
],
- "title": "Tenants",
- "type": "timeseries"
+ "title": "Created Metas",
+ "type": "stat"
},
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
+ "description": "Compactors delete metas and blocks marked for deletion in the metas tombstones.",
"fieldConfig": {
"defaults": {
"color": {
@@ -2856,6 +4058,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -2898,9 +4101,9 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 96
+ "y": 2497
},
- "id": 84,
+ "id": 105,
"options": {
"legend": {
"calcs": [ ],
@@ -2915,6 +4118,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -2922,42 +4126,32 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by (pod) (\n increase(\n loki_bloomcompactor_tenants_started_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval]\n )\n)",
+ "expr": "sum by (phase) (increase(loki_bloomplanner_metas_deleted_total{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"}[$__rate_interval]))",
"hide": false,
"instant": false,
- "legendFormat": "{{pod}}",
+ "legendFormat": "Deleted during {{phase}}",
"range": true,
- "refId": "C"
+ "refId": "A"
}
],
- "title": "Tenants per pod",
+ "title": "Deleted Metas",
"type": "timeseries"
- },
- {
- "description": "",
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 103
- },
- "id": 86,
- "options": {
- "code": {
- "language": "plaintext",
- "showLineNumbers": false,
- "showMiniMap": false
- },
- "content": "",
- "mode": "markdown"
- },
- "panels": [ ],
- "pluginVersion": "11.1.0-70005",
- "targets": [ ],
- "title": "Number of tenant tables processed. ",
- "transparent": true,
- "type": "text"
- },
+ }
+ ],
+ "targets": [ ],
+ "title": "Metas building",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 27
+ },
+ "id": 103,
+ "panels": [
{
"datasource": {
"type": "prometheus",
@@ -2975,6 +4169,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -3015,11 +4210,11 @@
},
"gridPos": {
"h": 7,
- "w": 12,
+ "w": 9,
"x": 0,
- "y": 104
+ "y": 2505
},
- "id": 88,
+ "id": 107,
"options": {
"legend": {
"calcs": [ ],
@@ -3034,6 +4229,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -3041,41 +4237,15 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "quantile(\n 0.99,\n increase(\n loki_bloomcompactor_tenant_table_ranges_completed_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval]\n )\n)",
- "hide": false,
- "instant": false,
- "legendFormat": "p99",
- "range": true,
- "refId": "C"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "quantile(\n 0.90,\n increase(\n loki_bloomcompactor_tenant_table_ranges_completed_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval]\n )\n)",
+ "expr": "sum(increase(loki_bloombuilder_blocks_created_total{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__rate_interval]))",
"hide": false,
"instant": false,
- "legendFormat": "p90",
+ "legendFormat": "Blocks",
"range": true,
"refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "quantile(\n 0.50,\n increase(\n loki_bloomcompactor_tenant_table_ranges_completed_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval]\n )\n)",
- "hide": false,
- "instant": false,
- "legendFormat": "p50",
- "range": true,
- "refId": "B"
}
],
- "title": "Tenant Tables",
+ "title": "Created Blocks",
"type": "timeseries"
},
{
@@ -3086,39 +4256,7 @@
"fieldConfig": {
"defaults": {
"color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
+ "mode": "thresholds"
},
"mappings": [ ],
"thresholds": {
@@ -3135,25 +4273,30 @@
},
"gridPos": {
"h": 7,
- "w": 12,
- "x": 12,
- "y": 104
+ "w": 3,
+ "x": 9,
+ "y": 2505
},
- "id": 89,
+ "id": 139,
"options": {
- "legend": {
- "calcs": [ ],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
+ "colorMode": "value",
+ "graphMode": "area",
+ "justifyMode": "auto",
+ "orientation": "auto",
+ "percentChangeColorMode": "standard",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
},
- "tooltip": {
- "maxHeight": 600,
- "mode": "single",
- "sort": "none"
- }
+ "showPercentChange": false,
+ "textMode": "auto",
+ "wideLayout": true
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -3161,48 +4304,25 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by (pod) (\n increase(\n loki_bloomcompactor_tenant_table_ranges_completed_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval]\n )\n)",
+ "exemplar": false,
+ "expr": "sum(increase(loki_bloombuilder_blocks_created_total{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__range]))",
+ "format": "time_series",
"hide": false,
- "instant": false,
- "legendFormat": "{{pod}}",
- "range": true,
- "refId": "C"
+ "instant": true,
+ "legendFormat": "Blocks",
+ "range": false,
+ "refId": "A"
}
],
- "title": "Tenant Tables per pod",
- "type": "timeseries"
- },
- {
- "description": "",
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 111
- },
- "id": 87,
- "options": {
- "code": {
- "language": "plaintext",
- "showLineNumbers": false,
- "showMiniMap": false
- },
- "content": "",
- "mode": "markdown"
- },
- "panels": [ ],
- "pluginVersion": "11.1.0-70005",
- "targets": [ ],
- "title": "Series per compaction (includes series copied from other blocks)",
- "transparent": true,
- "type": "text"
+ "title": "Created Blocks",
+ "type": "stat"
},
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
- "description": "",
+ "description": "Compactors delete metas and blocks marked for deletion in the metas tombstones.",
"fieldConfig": {
"defaults": {
"color": {
@@ -3215,6 +4335,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -3256,10 +4377,10 @@
"gridPos": {
"h": 7,
"w": 12,
- "x": 0,
- "y": 112
+ "x": 12,
+ "y": 2505
},
- "id": 81,
+ "id": 106,
"options": {
"legend": {
"calcs": [ ],
@@ -3274,6 +4395,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -3281,41 +4403,15 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "# series checked per compaction\nhistogram_quantile(\n 0.99, \n sum by (le) (\n rate(loki_bloomcompactor_series_per_compaction_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval])\n )\n)",
- "hide": false,
- "instant": false,
- "legendFormat": "p99",
- "range": true,
- "refId": "C"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "# series checked per compaction\nhistogram_quantile(\n 0.9, \n sum by (le) (\n rate(loki_bloomcompactor_series_per_compaction_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval])\n )\n)",
- "hide": false,
- "instant": false,
- "legendFormat": "p90",
- "range": true,
- "refId": "B"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "# series checked per compaction\nhistogram_quantile(\n 0.5, \n sum by (le) (\n rate(loki_bloomcompactor_series_per_compaction_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval])\n )\n)",
+ "expr": "sum by (phase) (increase(loki_bloomplanner_blocks_deleted_total{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"}[$__rate_interval]))",
"hide": false,
"instant": false,
- "legendFormat": "p50",
+ "legendFormat": "Deleted during {{phase}}",
"range": true,
"refId": "A"
}
],
- "title": "Series",
+ "title": "Deleted Blocks",
"type": "timeseries"
},
{
@@ -3323,6 +4419,7 @@
"type": "prometheus",
"uid": "${datasource}"
},
+ "description": "Number of overlapping bloom blocks reused when creating new blocks\n",
"fieldConfig": {
"defaults": {
"color": {
@@ -3335,6 +4432,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -3376,10 +4474,10 @@
"gridPos": {
"h": 7,
"w": 12,
- "x": 12,
- "y": 112
+ "x": 0,
+ "y": 2512
},
- "id": 82,
+ "id": 109,
"options": {
"legend": {
"calcs": [ ],
@@ -3394,6 +4492,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -3401,47 +4500,125 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by (pod) (\n rate(loki_bloomcompactor_series_per_compaction_sum{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval])\n /\n rate(loki_bloomcompactor_series_per_compaction_count{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval])\n)",
+ "expr": "sum(increase(loki_bloombuilder_blocks_reused_total{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__rate_interval]))",
"hide": false,
"instant": false,
- "legendFormat": "{{pod}}",
+ "legendFormat": "Blocks",
"range": true,
- "refId": "C"
+ "refId": "A"
}
],
- "title": "avg series per compaction by pod",
+ "title": "Blocks reused",
"type": "timeseries"
- },
+ }
+ ],
+ "targets": [ ],
+ "title": "Blocks building",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 28
+ },
+ "id": 135,
+ "panels": [
{
- "description": "",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "description": "Is the retention currently running?",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "fieldMinMax": false,
+ "mappings": [
+ {
+ "options": {
+ "0": {
+ "color": "yellow",
+ "index": 0,
+ "text": "No"
+ },
+ "1": {
+ "color": "green",
+ "index": 1,
+ "text": "Yes"
+ }
+ },
+ "type": "value"
+ }
+ ],
+ "max": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": [ ]
+ },
"gridPos": {
- "h": 1,
- "w": 24,
+ "h": 7,
+ "w": 3,
"x": 0,
- "y": 119
+ "y": 2573
},
- "id": 90,
+ "id": 136,
"options": {
- "code": {
- "language": "plaintext",
- "showLineNumbers": false,
- "showMiniMap": false
+ "colorMode": "value",
+ "graphMode": "area",
+ "justifyMode": "auto",
+ "orientation": "auto",
+ "percentChangeColorMode": "standard",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
},
- "content": "",
- "mode": "markdown"
+ "showPercentChange": false,
+ "textMode": "auto",
+ "wideLayout": true
},
"panels": [ ],
- "pluginVersion": "11.1.0-70005",
- "targets": [ ],
- "title": "Number of bytes from chunks added to blocks during each compaction.",
- "transparent": true,
- "type": "text"
+ "pluginVersion": "11.4.0-77663",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "sum by (cluster, namespace) (loki_bloomplanner_retention_running{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"})",
+ "instant": true,
+ "legendFormat": "__auto",
+ "range": false,
+ "refId": "A"
+ }
+ ],
+ "title": "Running now?",
+ "type": "stat"
},
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
+ "description": "Is the retention currently running?",
"fieldConfig": {
"defaults": {
"color": {
@@ -3453,7 +4630,10 @@
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
+ "axisSoftMax": 1,
+ "axisSoftMin": 0,
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -3479,7 +4659,9 @@
"mode": "off"
}
},
+ "fieldMinMax": false,
"mappings": [ ],
+ "max": 2,
"thresholds": {
"mode": "absolute",
"steps": [
@@ -3489,17 +4671,17 @@
}
]
},
- "unit": "bytes"
+ "unit": "bool_yes_no"
},
"overrides": [ ]
},
"gridPos": {
"h": 7,
- "w": 12,
- "x": 0,
- "y": 120
+ "w": 9,
+ "x": 3,
+ "y": 2573
},
- "id": 91,
+ "id": 137,
"options": {
"legend": {
"calcs": [ ],
@@ -3508,12 +4690,12 @@
"showLegend": true
},
"tooltip": {
- "maxHeight": 600,
"mode": "single",
"sort": "none"
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -3521,41 +4703,13 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "# series checked per compaction\nhistogram_quantile(\n 0.99, \n sum by (le) (\n rate(loki_bloomcompactor_bytes_per_compaction_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval])\n )\n)",
- "hide": false,
- "instant": false,
- "legendFormat": "p99",
- "range": true,
- "refId": "C"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "# series checked per compaction\nhistogram_quantile(\n 0.9, \n sum by (le) (\n rate(loki_bloomcompactor_bytes_per_compaction_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval])\n )\n)",
- "hide": false,
- "instant": false,
- "legendFormat": "p90",
- "range": true,
- "refId": "B"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "# series checked per compaction\nhistogram_quantile(\n 0.5, \n sum by (le) (\n rate(loki_bloomcompactor_bytes_per_compaction_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval])\n )\n)",
- "hide": false,
- "instant": false,
- "legendFormat": "p50",
+ "expr": "sum by (cluster, namespace) (loki_bloomplanner_retention_running{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"})",
+ "legendFormat": "__auto",
"range": true,
"refId": "A"
}
],
- "title": "Bytes",
+ "title": "Retention running",
"type": "timeseries"
},
{
@@ -3563,6 +4717,7 @@
"type": "prometheus",
"uid": "${datasource}"
},
+ "description": "How much time applying retention took",
"fieldConfig": {
"defaults": {
"color": {
@@ -3575,6 +4730,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -3600,6 +4756,7 @@
"mode": "off"
}
},
+ "fieldMinMax": false,
"mappings": [ ],
"thresholds": {
"mode": "absolute",
@@ -3610,7 +4767,7 @@
}
]
},
- "unit": "bytes"
+ "unit": "dtdurations"
},
"overrides": [ ]
},
@@ -3618,9 +4775,9 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 120
+ "y": 2573
},
- "id": 92,
+ "id": 138,
"options": {
"legend": {
"calcs": [ ],
@@ -3629,12 +4786,12 @@
"showLegend": true
},
"tooltip": {
- "maxHeight": 600,
"mode": "single",
"sort": "none"
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -3642,20 +4799,18 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by (pod) (\n rate(loki_bloomcompactor_bytes_per_compaction_sum{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval])\n /\n rate(loki_bloomcompactor_bytes_per_compaction_count{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval])\n)",
- "hide": false,
- "instant": false,
- "legendFormat": "p99",
+ "expr": "histogram_quantile(0.9, \n sum by (status, le) (\n rate(loki_bloomplanner_retention_time_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"}[$__rate_interval])\n )\n)",
+ "legendFormat": "__auto",
"range": true,
- "refId": "C"
+ "refId": "A"
}
],
- "title": "avg bytes per compaction by pod",
+ "title": "Retention time",
"type": "timeseries"
}
],
"targets": [ ],
- "title": "Data processed",
+ "title": "Retention",
"type": "row"
},
{
@@ -3666,7 +4821,7 @@
"x": 0,
"y": 29
},
- "id": 58,
+ "id": 62,
"panels": [
{
"description": "",
@@ -3675,23 +4830,52 @@
"overrides": [ ]
},
"gridPos": {
- "h": 3,
+ "h": 4,
"w": 24,
"x": 0,
- "y": 82
+ "y": 2581
+ },
+ "id": 71,
+ "options": {
+ "code": {
+ "language": "plaintext",
+ "showLineNumbers": false,
+ "showMiniMap": false
+ },
+ "content": "During the planning phase, the planner downloads the metas and TSDBs to build the plan.\n\nOnce all blocks and metas are built, the builder flushes them to the object store.\n\nAfter each iteration, the planner deletes the metas and blocks marked for deletion in the tombstones.",
+ "mode": "markdown"
+ },
+ "panels": [ ],
+ "pluginVersion": "11.4.0-77663",
+ "targets": [ ],
+ "title": "",
+ "transparent": true,
+ "type": "text"
+ },
+ {
+ "description": "",
+ "fieldConfig": {
+ "defaults": { },
+ "overrides": [ ]
+ },
+ "gridPos": {
+ "h": 13,
+ "w": 2,
+ "x": 0,
+ "y": 2585
},
- "id": 47,
+ "id": 63,
"options": {
"code": {
"language": "plaintext",
"showLineNumbers": false,
"showMiniMap": false
},
- "content": "\nCompactors write blocks to the attached PVs before flushing them into the object store.\nIt also download chunks and index files.\n\nAfter compacting a given tenant, all the downloaded index files and chunks, as well as the already flushed blocks are deleted.",
+ "content": "---\n#### GCS\n",
"mode": "markdown"
},
"panels": [ ],
- "pluginVersion": "11.1.0-69747",
+ "pluginVersion": "11.4.0-77663",
"targets": [ ],
"title": "",
"transparent": true,
@@ -3702,6 +4886,7 @@
"type": "prometheus",
"uid": "${datasource}"
},
+ "description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -3714,8 +4899,9 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
- "fillOpacity": 0,
+ "fillOpacity": 25,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -3736,35 +4922,30 @@
"mode": "none"
},
"thresholdsStyle": {
- "mode": "area"
+ "mode": "off"
}
},
"mappings": [ ],
- "max": 1,
- "min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
- "color": "green"
- },
- {
- "color": "red",
- "value": 0.80000000000000004
+ "color": "green",
+ "value": null
}
]
},
- "unit": "percentunit"
+ "unit": "none"
},
"overrides": [ ]
},
"gridPos": {
- "h": 8,
- "w": 12,
- "x": 0,
- "y": 85
+ "h": 7,
+ "w": 11,
+ "x": 2,
+ "y": 2585
},
- "id": 9,
+ "id": 61,
"options": {
"legend": {
"calcs": [ ],
@@ -3779,6 +4960,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -3786,40 +4968,15 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "quantile(\n 0.99,\n max by(persistentvolumeclaim) (\n kubelet_volume_stats_used_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\"} \n / \n kubelet_volume_stats_capacity_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\"}\n ) \n and \n count by(persistentvolumeclaim) (\n kube_persistentvolumeclaim_labels{cluster=~\"$cluster\", namespace=~\"$namespace\",label_name=~\"bloom-compactor\"}\n )\n)",
- "instant": false,
- "legendFormat": "p99",
- "range": true,
- "refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "quantile(\n 0.90,\n max by(persistentvolumeclaim) (\n kubelet_volume_stats_used_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\"} \n / \n kubelet_volume_stats_capacity_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\"}\n ) \n and \n count by(persistentvolumeclaim) (\n kube_persistentvolumeclaim_labels{cluster=~\"$cluster\", namespace=~\"$namespace\",label_name=~\"bloom-compactor\"}\n )\n)",
+ "expr": "sum by (container, status_code, operation) (rate(loki_gcs_request_duration_seconds_count{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"} [$__rate_interval]))",
"hide": false,
"instant": false,
- "legendFormat": "p90",
+ "legendFormat": "{{operation}} {{status_code}}",
"range": true,
"refId": "B"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "quantile(\n 0.50,\n max by(persistentvolumeclaim) (\n kubelet_volume_stats_used_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\"} \n / \n kubelet_volume_stats_capacity_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\"}\n ) \n and \n count by(persistentvolumeclaim) (\n kube_persistentvolumeclaim_labels{cluster=~\"$cluster\", namespace=~\"$namespace\",label_name=~\"bloom-compactor\"}\n )\n)",
- "hide": false,
- "instant": false,
- "legendFormat": "p50",
- "range": true,
- "refId": "C"
}
],
- "title": "Disk Utilization",
+ "title": "QPS Planner",
"type": "timeseries"
},
{
@@ -3827,6 +4984,7 @@
"type": "prometheus",
"uid": "${datasource}"
},
+ "description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -3839,8 +4997,9 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
- "fillOpacity": 0,
+ "fillOpacity": 25,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -3861,35 +5020,30 @@
"mode": "none"
},
"thresholdsStyle": {
- "mode": "area"
+ "mode": "off"
}
},
"mappings": [ ],
- "max": 1,
- "min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
- "color": "green"
- },
- {
- "color": "red",
- "value": 0.80000000000000004
+ "color": "green",
+ "value": null
}
]
},
- "unit": "percentunit"
+ "unit": "s"
},
"overrides": [ ]
},
"gridPos": {
- "h": 8,
- "w": 12,
- "x": 12,
- "y": 85
+ "h": 7,
+ "w": 11,
+ "x": 13,
+ "y": 2585
},
- "id": 100,
+ "id": 64,
"options": {
"legend": {
"calcs": [ ],
@@ -3904,6 +5058,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -3911,14 +5066,41 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "max by(persistentvolumeclaim) (kubelet_volume_stats_used_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\"} / kubelet_volume_stats_capacity_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\"}) and count by(persistentvolumeclaim) (kube_persistentvolumeclaim_labels{cluster=~\"$cluster\", namespace=~\"$namespace\",label_name=~\"bloom-compactor\"})",
+ "expr": "histogram_quantile(0.99, sum by (operation, le) (rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"} [$__rate_interval])))",
+ "hide": false,
"instant": false,
- "legendFormat": "{{pod}}",
+ "legendFormat": "{{operation}} p99",
"range": true,
- "refId": "A"
+ "refId": "D"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.90, sum by (operation, le) (rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"} [$__rate_interval])))",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "{{operation}} p90",
+ "range": true,
+ "refId": "E"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.50, sum by (operation, le) (rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"} [$__rate_interval])))",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "{{operation}} p50",
+ "range": true,
+ "refId": "F"
}
],
- "title": "Disk Utilization per pod",
+ "title": "Latency Planner",
"type": "timeseries"
},
{
@@ -3926,6 +5108,7 @@
"type": "prometheus",
"uid": "${datasource}"
},
+ "description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -3938,8 +5121,9 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
- "fillOpacity": 0,
+ "fillOpacity": 25,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -3968,25 +5152,22 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
- },
- {
- "color": "red",
- "value": 80
+ "color": "green",
+ "value": null
}
]
},
- "unit": "bytes"
+ "unit": "none"
},
"overrides": [ ]
},
"gridPos": {
"h": 7,
- "w": 12,
- "x": 0,
- "y": 93
+ "w": 11,
+ "x": 2,
+ "y": 2592
},
- "id": 7,
+ "id": 127,
"options": {
"legend": {
"calcs": [ ],
@@ -4001,6 +5182,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -4008,40 +5190,15 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "quantile(\n 0.99,\n sum by(instance, pod, device) (\n rate(node_disk_written_bytes_total[$__rate_interval])\n ) \n + ignoring(pod) group_right() \n (\n label_replace(\n count by(instance, pod, device) (\n container_fs_writes_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"bloom-compactor\", device!~\".*sda.*\"}\n ), \n \"device\", \"$1\", \"device\", \"/dev/(.*)\"\n ) * 0\n )\n)",
- "instant": false,
- "legendFormat": "p99",
- "range": true,
- "refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "quantile(\n 0.90,\n sum by(instance, pod, device) (\n rate(node_disk_written_bytes_total[$__rate_interval])\n ) \n + ignoring(pod) group_right() \n (\n label_replace(\n count by(instance, pod, device) (\n container_fs_writes_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"bloom-compactor\", device!~\".*sda.*\"}\n ), \n \"device\", \"$1\", \"device\", \"/dev/(.*)\"\n ) * 0\n )\n)",
+ "expr": "sum by (container, status_code, operation) (rate(loki_gcs_request_duration_seconds_count{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"} [$__rate_interval]))",
"hide": false,
"instant": false,
- "legendFormat": "p90",
+ "legendFormat": "{{operation}} {{status_code}}",
"range": true,
"refId": "B"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "quantile(\n 0.50,\n sum by(instance, pod, device) (\n rate(node_disk_written_bytes_total[$__rate_interval])\n ) \n + ignoring(pod) group_right() \n (\n label_replace(\n count by(instance, pod, device) (\n container_fs_writes_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"bloom-compactor\", device!~\".*sda.*\"}\n ), \n \"device\", \"$1\", \"device\", \"/dev/(.*)\"\n ) * 0\n )\n)",
- "hide": false,
- "instant": false,
- "legendFormat": "p50",
- "range": true,
- "refId": "C"
}
],
- "title": "Disk Writes",
+ "title": "QPS Builder",
"type": "timeseries"
},
{
@@ -4049,6 +5206,7 @@
"type": "prometheus",
"uid": "${datasource}"
},
+ "description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -4061,8 +5219,9 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
- "fillOpacity": 0,
+ "fillOpacity": 25,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -4091,25 +5250,22 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
- },
- {
- "color": "red",
- "value": 80
+ "color": "green",
+ "value": null
}
]
},
- "unit": "bytes"
+ "unit": "s"
},
"overrides": [ ]
},
"gridPos": {
"h": 7,
- "w": 12,
- "x": 12,
- "y": 93
+ "w": 11,
+ "x": 13,
+ "y": 2592
},
- "id": 101,
+ "id": 128,
"options": {
"legend": {
"calcs": [ ],
@@ -4124,6 +5280,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -4131,21 +5288,78 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by(instance, pod, device) (rate(node_disk_written_bytes_total[$__rate_interval])) + ignoring(pod) group_right() (label_replace(count by(instance, pod, device) (container_fs_writes_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"bloom-compactor\", device!~\".*sda.*\"}), \"device\", \"$1\", \"device\", \"/dev/(.*)\") * 0)",
+ "expr": "histogram_quantile(0.99, sum by (operation, le) (rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"} [$__rate_interval])))",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "{{operation}} p99",
+ "range": true,
+ "refId": "D"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.90, sum by (operation, le) (rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"} [$__rate_interval])))",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "{{operation}} p90",
+ "range": true,
+ "refId": "E"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.50, sum by (operation, le) (rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"} [$__rate_interval])))",
+ "hide": false,
"instant": false,
- "legendFormat": "{{pod}}",
+ "legendFormat": "{{operation}} p50",
"range": true,
- "refId": "A"
+ "refId": "F"
}
],
- "title": "Disk Writes per pod",
+ "title": "Latency Planner",
"type": "timeseries"
},
+ {
+ "description": "",
+ "fieldConfig": {
+ "defaults": { },
+ "overrides": [ ]
+ },
+ "gridPos": {
+ "h": 14,
+ "w": 2,
+ "x": 0,
+ "y": 2598
+ },
+ "id": 65,
+ "options": {
+ "code": {
+ "language": "plaintext",
+ "showLineNumbers": false,
+ "showMiniMap": false
+ },
+ "content": "---\n#### S3\n",
+ "mode": "markdown"
+ },
+ "panels": [ ],
+ "pluginVersion": "11.4.0-77663",
+ "targets": [ ],
+ "title": "",
+ "transparent": true,
+ "type": "text"
+ },
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
+ "description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -4158,8 +5372,9 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
- "fillOpacity": 0,
+ "fillOpacity": 25,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -4188,25 +5403,22 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
- },
- {
- "color": "red",
- "value": 80
+ "color": "green",
+ "value": null
}
]
},
- "unit": "bytes"
+ "unit": "none"
},
"overrides": [ ]
},
"gridPos": {
- "h": 8,
- "w": 12,
- "x": 0,
- "y": 100
+ "h": 7,
+ "w": 11,
+ "x": 2,
+ "y": 2599
},
- "id": 8,
+ "id": 67,
"options": {
"legend": {
"calcs": [ ],
@@ -4221,6 +5433,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -4228,40 +5441,15 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "quantile(\n 0.99,\n sum by(instance, pod, device) (\n rate(node_disk_read_bytes_total[$__rate_interval])\n ) + ignoring(pod) group_right()\n (\n label_replace(\n count by(instance, pod, device) (\n container_fs_writes_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"bloom-compactor\", device!~\".*sda.*\"}\n ), \n \"device\", \"$1\", \"device\", \"/dev/(.*)\"\n ) * 0\n )\n)",
- "instant": false,
- "legendFormat": "p99",
- "range": true,
- "refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "quantile(\n 0.90,\n sum by(instance, pod, device) (\n rate(node_disk_read_bytes_total[$__rate_interval])\n ) + ignoring(pod) group_right()\n (\n label_replace(\n count by(instance, pod, device) (\n container_fs_writes_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"bloom-compactor\", device!~\".*sda.*\"}\n ), \n \"device\", \"$1\", \"device\", \"/dev/(.*)\"\n ) * 0\n )\n)",
+ "expr": "sum by (status_code, operation) (rate(loki_s3_request_duration_seconds_count{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"} [$__rate_interval]))",
"hide": false,
"instant": false,
- "legendFormat": "p90",
+ "legendFormat": "{{operation}} {{status_code}}",
"range": true,
"refId": "B"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "quantile(\n 0.50,\n sum by(instance, pod, device) (\n rate(node_disk_read_bytes_total[$__rate_interval])\n ) + ignoring(pod) group_right()\n (\n label_replace(\n count by(instance, pod, device) (\n container_fs_writes_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"bloom-compactor\", device!~\".*sda.*\"}\n ), \n \"device\", \"$1\", \"device\", \"/dev/(.*)\"\n ) * 0\n )\n)",
- "hide": false,
- "instant": false,
- "legendFormat": "p50",
- "range": true,
- "refId": "C"
}
],
- "title": "Disk Reads",
+ "title": "QPS Planner",
"type": "timeseries"
},
{
@@ -4269,6 +5457,7 @@
"type": "prometheus",
"uid": "${datasource}"
},
+ "description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -4281,8 +5470,9 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
- "fillOpacity": 0,
+ "fillOpacity": 25,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -4311,25 +5501,22 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
- },
- {
- "color": "red",
- "value": 80
+ "color": "green",
+ "value": null
}
]
},
- "unit": "bytes"
+ "unit": "s"
},
"overrides": [ ]
},
"gridPos": {
- "h": 8,
- "w": 12,
- "x": 12,
- "y": 100
+ "h": 7,
+ "w": 11,
+ "x": 13,
+ "y": 2599
},
- "id": 102,
+ "id": 69,
"options": {
"legend": {
"calcs": [ ],
@@ -4344,6 +5531,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -4351,88 +5539,42 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by(instance, pod, device) (rate(node_disk_read_bytes_total[$__rate_interval])) + ignoring(pod) group_right() (label_replace(count by(instance, pod, device) (container_fs_writes_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"bloom-compactor\", device!~\".*sda.*\"}), \"device\", \"$1\", \"device\", \"/dev/(.*)\") * 0)",
+ "expr": "histogram_quantile(0.99, sum by (operation, le) (rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"} [$__rate_interval])))",
+ "hide": false,
"instant": false,
- "legendFormat": "{{pod}}",
+ "legendFormat": "{{operation}} p99",
"range": true,
- "refId": "A"
+ "refId": "D"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.90, sum by (operation, le) (rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"} [$__rate_interval])))",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "{{operation}} p90",
+ "range": true,
+ "refId": "E"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.50, sum by (operation, le) (rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-planner\", job=\"\"} [$__rate_interval])))",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "{{operation}} p50",
+ "range": true,
+ "refId": "F"
}
],
- "title": "Disk Reads per pod",
+ "title": "Latency Planner",
"type": "timeseries"
- }
- ],
- "targets": [ ],
- "title": "Disk Usage",
- "type": "row"
- },
- {
- "collapsed": true,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 30
- },
- "id": 62,
- "panels": [
- {
- "description": "",
- "fieldConfig": {
- "defaults": { },
- "overrides": [ ]
- },
- "gridPos": {
- "h": 3,
- "w": 24,
- "x": 0,
- "y": 83
- },
- "id": 71,
- "options": {
- "code": {
- "language": "plaintext",
- "showLineNumbers": false,
- "showMiniMap": false
- },
- "content": "Once all blocks and metas are built locally, the compactor flushes them to the object store.\n\nAfter each iteration, the compactor deletes the metas and blocks marked for deletion in the tombstones.",
- "mode": "markdown"
- },
- "panels": [ ],
- "pluginVersion": "11.1.0-69747",
- "targets": [ ],
- "title": "",
- "transparent": true,
- "type": "text"
- },
- {
- "description": "",
- "fieldConfig": {
- "defaults": { },
- "overrides": [ ]
- },
- "gridPos": {
- "h": 7,
- "w": 2,
- "x": 0,
- "y": 86
- },
- "id": 63,
- "options": {
- "code": {
- "language": "plaintext",
- "showLineNumbers": false,
- "showMiniMap": false
- },
- "content": "---\n#### GCS\n",
- "mode": "markdown"
- },
- "panels": [ ],
- "pluginVersion": "11.1.0-69747",
- "targets": [ ],
- "title": "",
- "transparent": true,
- "type": "text"
},
{
"datasource": {
@@ -4452,6 +5594,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 25,
"gradientMode": "none",
@@ -4482,7 +5625,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
}
]
},
@@ -4494,9 +5638,9 @@
"h": 7,
"w": 11,
"x": 2,
- "y": 86
+ "y": 2606
},
- "id": 61,
+ "id": 129,
"options": {
"legend": {
"calcs": [ ],
@@ -4511,6 +5655,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -4518,7 +5663,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by (status_code, operation) (rate(loki_gcs_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"} [$__rate_interval]))",
+ "expr": "sum by (status_code, operation) (rate(loki_s3_request_duration_seconds_count{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"} [$__rate_interval]))",
"hide": false,
"instant": false,
"legendFormat": "{{operation}} {{status_code}}",
@@ -4526,7 +5671,7 @@
"refId": "B"
}
],
- "title": "QPS",
+ "title": "QPS Builder",
"type": "timeseries"
},
{
@@ -4547,6 +5692,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 25,
"gradientMode": "none",
@@ -4577,7 +5723,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
}
]
},
@@ -4589,9 +5736,9 @@
"h": 7,
"w": 11,
"x": 13,
- "y": 86
+ "y": 2606
},
- "id": 64,
+ "id": 130,
"options": {
"legend": {
"calcs": [ ],
@@ -4606,6 +5753,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -4613,7 +5761,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.99, sum by (operation, le) (rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"} [$__rate_interval])))",
+ "expr": "histogram_quantile(0.99, sum by (operation, le) (rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"} [$__rate_interval])))",
"hide": false,
"instant": false,
"legendFormat": "{{operation}} p99",
@@ -4626,7 +5774,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.90, sum by (operation, le) (rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"} [$__rate_interval])))",
+ "expr": "histogram_quantile(0.90, sum by (operation, le) (rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"} [$__rate_interval])))",
"hide": false,
"instant": false,
"legendFormat": "{{operation}} p90",
@@ -4639,7 +5787,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.50, sum by (operation, le) (rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"} [$__rate_interval])))",
+ "expr": "histogram_quantile(0.50, sum by (operation, le) (rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"} [$__rate_interval])))",
"hide": false,
"instant": false,
"legendFormat": "{{operation}} p50",
@@ -4647,7 +5795,7 @@
"refId": "F"
}
],
- "title": "Latency",
+ "title": "Latency Builder",
"type": "timeseries"
},
{
@@ -4660,20 +5808,20 @@
"h": 7,
"w": 2,
"x": 0,
- "y": 93
+ "y": 2612
},
- "id": 65,
+ "id": 66,
"options": {
"code": {
"language": "plaintext",
"showLineNumbers": false,
"showMiniMap": false
},
- "content": "---\n#### S3\n",
+ "content": "---\n#### Azure\nBlob Storage",
"mode": "markdown"
},
"panels": [ ],
- "pluginVersion": "11.1.0-69747",
+ "pluginVersion": "11.4.0-77663",
"targets": [ ],
"title": "",
"transparent": true,
@@ -4697,6 +5845,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 25,
"gradientMode": "none",
@@ -4727,7 +5876,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
}
]
},
@@ -4739,9 +5889,9 @@
"h": 7,
"w": 11,
"x": 2,
- "y": 93
+ "y": 2613
},
- "id": 67,
+ "id": 68,
"options": {
"legend": {
"calcs": [ ],
@@ -4756,6 +5906,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -4763,7 +5914,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by (status_code, operation) (rate(loki_s3_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"} [$__rate_interval]))",
+ "expr": "sum by (status_code, operation) (rate(loki_azure_blob_request_duration_seconds_count{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"} [$__rate_interval]))",
"hide": false,
"instant": false,
"legendFormat": "{{operation}} {{status_code}}",
@@ -4771,7 +5922,7 @@
"refId": "B"
}
],
- "title": "QPS",
+ "title": "QPS Planner",
"type": "timeseries"
},
{
@@ -4792,6 +5943,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 25,
"gradientMode": "none",
@@ -4822,7 +5974,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
}
]
},
@@ -4834,9 +5987,9 @@
"h": 7,
"w": 11,
"x": 13,
- "y": 93
+ "y": 2613
},
- "id": 69,
+ "id": 70,
"options": {
"legend": {
"calcs": [ ],
@@ -4851,6 +6004,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -4858,7 +6012,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.99, sum by (operation, le) (rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"} [$__rate_interval])))",
+ "expr": "histogram_quantile(0.99, sum by (operation, le) (rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"} [$__rate_interval])))",
"hide": false,
"instant": false,
"legendFormat": "{{operation}} p99",
@@ -4871,7 +6025,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.90, sum by (operation, le) (rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"} [$__rate_interval])))",
+ "expr": "histogram_quantile(0.90, sum by (operation, le) (rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"} [$__rate_interval])))",
"hide": false,
"instant": false,
"legendFormat": "{{operation}} p90",
@@ -4884,7 +6038,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.50, sum by (operation, le) (rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"} [$__rate_interval])))",
+ "expr": "histogram_quantile(0.50, sum by (operation, le) (rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"} [$__rate_interval])))",
"hide": false,
"instant": false,
"legendFormat": "{{operation}} p50",
@@ -4892,38 +6046,9 @@
"refId": "F"
}
],
- "title": "Latency",
+ "title": "Latency Planner",
"type": "timeseries"
},
- {
- "description": "",
- "fieldConfig": {
- "defaults": { },
- "overrides": [ ]
- },
- "gridPos": {
- "h": 7,
- "w": 2,
- "x": 0,
- "y": 100
- },
- "id": 66,
- "options": {
- "code": {
- "language": "plaintext",
- "showLineNumbers": false,
- "showMiniMap": false
- },
- "content": "---\n#### Azure\nBlob Storage",
- "mode": "markdown"
- },
- "panels": [ ],
- "pluginVersion": "11.1.0-69747",
- "targets": [ ],
- "title": "",
- "transparent": true,
- "type": "text"
- },
{
"datasource": {
"type": "prometheus",
@@ -4942,6 +6067,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 25,
"gradientMode": "none",
@@ -4972,7 +6098,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
}
]
},
@@ -4984,9 +6111,9 @@
"h": 7,
"w": 11,
"x": 2,
- "y": 100
+ "y": 2620
},
- "id": 68,
+ "id": 131,
"options": {
"legend": {
"calcs": [ ],
@@ -5001,6 +6128,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -5008,7 +6136,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by (status_code, operation) (rate(loki_azure_blob_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"} [$__rate_interval]))",
+ "expr": "sum by (status_code, operation) (rate(loki_azure_blob_request_duration_seconds_count{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"} [$__rate_interval]))",
"hide": false,
"instant": false,
"legendFormat": "{{operation}} {{status_code}}",
@@ -5016,7 +6144,7 @@
"refId": "B"
}
],
- "title": "QPS",
+ "title": "QPS Builder",
"type": "timeseries"
},
{
@@ -5037,6 +6165,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 25,
"gradientMode": "none",
@@ -5067,7 +6196,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
}
]
},
@@ -5079,9 +6209,9 @@
"h": 7,
"w": 11,
"x": 13,
- "y": 100
+ "y": 2620
},
- "id": 70,
+ "id": 132,
"options": {
"legend": {
"calcs": [ ],
@@ -5096,6 +6226,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -5103,7 +6234,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.99, sum by (operation, le) (rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"} [$__rate_interval])))",
+ "expr": "histogram_quantile(0.99, sum by (operation, le) (rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"} [$__rate_interval])))",
"hide": false,
"instant": false,
"legendFormat": "{{operation}} p99",
@@ -5116,7 +6247,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.90, sum by (operation, le) (rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"} [$__rate_interval])))",
+ "expr": "histogram_quantile(0.90, sum by (operation, le) (rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"} [$__rate_interval])))",
"hide": false,
"instant": false,
"legendFormat": "{{operation}} p90",
@@ -5129,7 +6260,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.50, sum by (operation, le) (rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"} [$__rate_interval])))",
+ "expr": "histogram_quantile(0.50, sum by (operation, le) (rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"} [$__rate_interval])))",
"hide": false,
"instant": false,
"legendFormat": "{{operation}} p50",
@@ -5137,7 +6268,7 @@
"refId": "F"
}
],
- "title": "Latency",
+ "title": "Latency Builder",
"type": "timeseries"
}
],
@@ -5146,6 +6277,7 @@
"type": "row"
}
],
+ "preload": false,
"refresh": "10s",
"rows": [ ],
"schemaVersion": 14,
@@ -5224,6 +6356,26 @@
"refresh": 1,
"regex": "",
"type": "datasource"
+ },
+ {
+ "allValue": ".+",
+ "current": { },
+ "datasource": "$datasource",
+ "hide": 0,
+ "includeAll": true,
+ "label": "Tenant",
+ "multi": false,
+ "name": "tenant",
+ "options": [ ],
+ "query": "label_values(loki_bloomplanner_tenant_tasks_planned{cluster=\"$cluster\", namespace=\"$namespace\"}, tenant)",
+ "refresh": 0,
+ "regex": "",
+ "sort": 3,
+ "tagValuesQuery": "",
+ "tags": [ ],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
}
]
},
@@ -5231,7 +6383,6 @@
"from": "now-1h",
"to": "now"
},
- "timeRangeUpdatedDuringEditOrView": false,
"timepicker": {
"refresh_intervals": [
"5s",
@@ -5258,8 +6409,8 @@
]
},
"timezone": "utc",
- "title": "Loki / Bloom Compactor",
- "uid": "bloom-compactor",
+ "title": "Loki / Bloom Build",
+ "uid": "bloom-build",
"version": 0,
"weekStart": ""
}
\ No newline at end of file
diff --git a/production/loki-mixin-compiled/dashboards/loki-bloom-gateway.json b/production/loki-mixin-compiled/dashboards/loki-bloom-gateway.json
index 2d5e16a9d7e0f..0deb5e33b5d7e 100644
--- a/production/loki-mixin-compiled/dashboards/loki-bloom-gateway.json
+++ b/production/loki-mixin-compiled/dashboards/loki-bloom-gateway.json
@@ -21,7 +21,6 @@
"type": "dashboards"
}
],
- "liveNow": false,
"panels": [
{
"collapsed": false,
@@ -46,8 +45,7 @@
"fieldConfig": {
"defaults": {
"color": {
- "mode": "thresholds",
- "seriesBy": "last"
+ "mode": "thresholds"
},
"custom": {
"axisBorderShow": false,
@@ -56,9 +54,10 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
- "gradientMode": "none",
+ "gradientMode": "scheme",
"hideFrom": {
"legend": false,
"tooltip": false,
@@ -69,7 +68,7 @@
"lineStyle": {
"fill": "solid"
},
- "lineWidth": 2,
+ "lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
@@ -114,7 +113,7 @@
},
"gridPos": {
"h": 6,
- "w": 6,
+ "w": 12,
"x": 0,
"y": 1
},
@@ -133,6 +132,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77765",
"targets": [
{
"datasource": {
@@ -140,7 +140,8 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum(rate(loki_bloom_gateway_filtered_chunks_sum{job=\"$namespace/bloom-gateway\"}[$__rate_interval]))\n/\nsum(rate(loki_bloom_gateway_requested_chunks_sum{job=\"$namespace/bloom-gateway\"}[$__rate_interval]))",
+ "expr": "sum(rate(loki_bloom_gateway_filtered_chunks_sum{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"}[$__rate_interval]))\n/\nsum(rate(loki_bloom_gateway_requested_chunks_sum{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"}[$__rate_interval]))",
+ "hide": false,
"instant": false,
"legendFormat": "Chunks",
"range": true,
@@ -153,7 +154,7 @@
},
"editorMode": "code",
"exemplar": false,
- "expr": "sum(rate(loki_bloom_gateway_filtered_series_sum{job=\"$namespace/bloom-gateway\"}[$__rate_interval]))\n/\nsum(rate(loki_bloom_gateway_requested_series_sum{job=\"$namespace/bloom-gateway\"}[$__rate_interval]))",
+ "expr": "sum(rate(loki_bloom_gateway_filtered_series_sum{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"}[$__rate_interval]))\n/\nsum(rate(loki_bloom_gateway_requested_series_sum{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"}[$__rate_interval]))",
"hide": false,
"instant": false,
"legendFormat": "Series",
@@ -161,7 +162,7 @@
"refId": "B"
}
],
- "title": "Filter ratio",
+ "title": "Filter ratio - Bloom Gateway (server)",
"type": "timeseries"
},
{
@@ -206,7 +207,7 @@
"gridPos": {
"h": 6,
"w": 6,
- "x": 6,
+ "x": 12,
"y": 1
},
"id": 75,
@@ -226,7 +227,7 @@
"sizing": "auto"
},
"panels": [ ],
- "pluginVersion": "11.1.0-70005",
+ "pluginVersion": "11.4.0-77765",
"targets": [
{
"datasource": {
@@ -235,7 +236,7 @@
},
"editorMode": "code",
"exemplar": false,
- "expr": "sum(rate(loki_bloom_gateway_filtered_chunks_sum{job=\"$namespace/bloom-gateway\"}[$__rate_interval]))\n/\nsum(rate(loki_bloom_gateway_requested_chunks_sum{job=\"$namespace/bloom-gateway\"}[$__rate_interval]))",
+ "expr": "sum(increase(loki_bloom_gateway_filtered_chunks_sum{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"}[$__range]))\n/\nsum(increase(loki_bloom_gateway_requested_chunks_sum{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"}[$__range]))",
"instant": true,
"legendFormat": "Chunks",
"range": false,
@@ -248,40 +249,12 @@
},
"editorMode": "code",
"exemplar": false,
- "expr": "sum(rate(loki_bloom_gateway_filtered_series_sum{job=\"$namespace/bloom-gateway\"}[$__rate_interval]))\n/\nsum(rate(loki_bloom_gateway_requested_series_sum{job=\"$namespace/bloom-gateway\"}[$__rate_interval]))",
+ "expr": "sum(increase(loki_bloom_gateway_filtered_series_sum{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"}[$__range]))\n/\nsum(increase(loki_bloom_gateway_requested_series_sum{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"}[$__range]))",
"hide": false,
"instant": true,
"legendFormat": "Series",
"range": false,
"refId": "B"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "exemplar": false,
- "expr": "sum(loki_bloom_gateway_filtered_chunks_sum{job=\"$namespace/bloom-gateway\"})\n/\nsum(loki_bloom_gateway_requested_chunks_sum{job=\"$namespace/bloom-gateway\"})",
- "hide": true,
- "instant": true,
- "legendFormat": "Chunks avg",
- "range": false,
- "refId": "C"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "exemplar": false,
- "expr": "sum(loki_bloom_gateway_filtered_series_sum{job=\"$namespace/bloom-gateway\"})\n/\nsum(loki_bloom_gateway_requested_series_sum{job=\"$namespace/bloom-gateway\"})",
- "hide": true,
- "instant": true,
- "legendFormat": "Series avg",
- "range": false,
- "refId": "D"
}
],
"title": "Filter ratio",
@@ -305,6 +278,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
@@ -381,7 +355,7 @@
"gridPos": {
"h": 6,
"w": 6,
- "x": 12,
+ "x": 18,
"y": 1
},
"id": 72,
@@ -399,6 +373,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77765",
"targets": [
{
"datasource": {
@@ -413,19 +388,6 @@
"range": true,
"refId": "D"
},
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "sum(kube_pod_container_status_ready{container=\"bloom-gateway\", cluster=\"$cluster\", namespace=\"$namespace\"})",
- "hide": true,
- "instant": false,
- "legendFormat": "Running",
- "range": true,
- "refId": "A"
- },
{
"datasource": {
"type": "prometheus",
@@ -438,19 +400,6 @@
"legendFormat": "Desired",
"range": true,
"refId": "B"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "kube_statefulset_status_replicas_available{cluster=\"$cluster\", namespace=\"$namespace\", statefulset=\"bloom-gateway\"}",
- "hide": true,
- "instant": false,
- "legendFormat": "Available",
- "range": true,
- "refId": "C"
}
],
"title": "Readiness",
@@ -461,11 +410,11 @@
"type": "prometheus",
"uid": "${datasource}"
},
- "description": "",
+ "description": "Percentage of chunks that are filtered by using bloom filters",
"fieldConfig": {
"defaults": {
"color": {
- "mode": "palette-classic"
+ "mode": "thresholds"
},
"custom": {
"axisBorderShow": false,
@@ -474,9 +423,10 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
- "fillOpacity": 50,
- "gradientMode": "none",
+ "fillOpacity": 0,
+ "gradientMode": "scheme",
"hideFrom": {
"legend": false,
"tooltip": false,
@@ -499,34 +449,44 @@
"mode": "none"
},
"thresholdsStyle": {
- "mode": "off"
+ "mode": "area"
}
},
"mappings": [ ],
+ "max": 1,
+ "min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
- "color": "green",
+ "color": "red",
"value": null
},
{
- "color": "red",
- "value": 80
+ "color": "orange",
+ "value": 0.5
+ },
+ {
+ "color": "yellow",
+ "value": 0.75
+ },
+ {
+ "color": "green",
+ "value": 0.90000000000000002
}
]
},
- "unit": "none"
+ "unit": "percentunit"
},
"overrides": [ ]
},
"gridPos": {
"h": 6,
- "w": 6,
- "x": 18,
- "y": 1
+ "w": 12,
+ "x": 0,
+ "y": 7
},
- "id": 37,
+ "id": 93,
"options": {
"legend": {
"calcs": [ ],
@@ -541,6 +501,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77765",
"targets": [
{
"datasource": {
@@ -548,77 +509,135 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "(\n max by (pod, reason) (kube_pod_container_status_last_terminated_reason{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"})\n * on (pod) group_left\n sum by (pod) (increase(kube_pod_container_status_restarts_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"}[$__rate_interval]))\n) > 0",
+ "expr": "sum(rate(loki_bloom_gateway_querier_chunks_filtered_total{cluster=\"$cluster\", job=\"$namespace/index-gateway\"}[$__rate_interval]))\n/\nsum(rate(loki_bloom_gateway_querier_chunks_total{cluster=\"$cluster\", job=\"$namespace/index-gateway\"}[$__rate_interval]))",
+ "instant": false,
+ "legendFormat": "Chunks",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "sum(rate(loki_bloom_gateway_querier_series_filtered_total{cluster=\"$cluster\", job=\"$namespace/index-gateway\"}[$__rate_interval]))\n/\nsum(rate(loki_bloom_gateway_querier_series_total{cluster=\"$cluster\", job=\"$namespace/index-gateway\"}[$__rate_interval]))",
"hide": false,
"instant": false,
- "interval": "",
- "legendFormat": "{{pod}} ({{reason}})",
+ "legendFormat": "Series",
"range": true,
- "refId": "C"
+ "refId": "B"
}
],
- "title": "Container restarts",
+ "title": "Filter ratio - Index Gateway (client)",
"type": "timeseries"
},
{
"datasource": {
- "type": "loki",
- "uid": "${loki_datasource}"
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "description": "Percentage of chunks that are filtered by using bloom filters",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "mappings": [ ],
+ "max": 1,
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "red",
+ "value": null
+ },
+ {
+ "color": "orange",
+ "value": 0.5
+ },
+ {
+ "color": "yellow",
+ "value": 0.75
+ },
+ {
+ "color": "green",
+ "value": 0.90000000000000002
+ }
+ ]
+ },
+ "unit": "percentunit"
+ },
+ "overrides": [ ]
},
"gridPos": {
- "h": 9,
- "w": 15,
- "x": 0,
+ "h": 6,
+ "w": 6,
+ "x": 12,
"y": 7
},
- "id": 48,
+ "id": 94,
"options": {
- "dedupStrategy": "none",
- "enableLogDetails": true,
- "prettifyLogMessage": false,
- "showCommonLabels": false,
- "showLabels": false,
- "showTime": false,
- "sortOrder": "Descending",
- "wrapLogMessage": true
+ "minVizHeight": 75,
+ "minVizWidth": 75,
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "showThresholdLabels": false,
+ "showThresholdMarkers": true,
+ "sizing": "auto"
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77765",
"targets": [
{
"datasource": {
- "type": "loki",
- "uid": "${loki_datasource}"
+ "type": "prometheus",
+ "uid": "${datasource}"
},
"editorMode": "code",
- "expr": "{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} |= \"level=error\" or \"panic:\" | logfmt",
- "queryType": "range",
+ "exemplar": false,
+ "expr": "sum(increase(loki_bloom_gateway_querier_chunks_filtered_total{cluster=\"$cluster\", job=\"$namespace/index-gateway\"}[$__range]))\n/\nsum(increase(loki_bloom_gateway_querier_chunks_total{cluster=\"$cluster\", job=\"$namespace/index-gateway\"}[$__range]))",
+ "instant": true,
+ "legendFormat": "Chunks",
+ "range": false,
"refId": "A"
},
{
"datasource": {
- "type": "loki",
- "uid": "${loki_datasource}"
+ "type": "prometheus",
+ "uid": "${datasource}"
},
"editorMode": "code",
- "expr": "{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} |= \"level=warn\" | logfmt",
- "hide": true,
- "queryType": "range",
+ "exemplar": false,
+ "expr": "sum(increase(loki_bloom_gateway_querier_series_filtered_total{cluster=\"$cluster\", job=\"$namespace/index-gateway\"}[$__range]))\n/\nsum(increase(loki_bloom_gateway_querier_series_total{cluster=\"$cluster\", job=\"$namespace/index-gateway\"}[$__range]))",
+ "hide": false,
+ "instant": true,
+ "legendFormat": "Series",
+ "range": false,
"refId": "B"
}
],
- "title": "Errors",
- "type": "logs"
+ "title": "Filter ratio",
+ "type": "gauge"
},
{
"datasource": {
- "type": "loki",
- "uid": "${loki_datasource}"
+ "type": "prometheus",
+ "uid": "${datasource}"
},
+ "description": "",
"fieldConfig": {
"defaults": {
"color": {
- "fixedColor": "red",
- "mode": "fixed"
+ "mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
@@ -627,8 +646,9 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
- "drawStyle": "bars",
- "fillOpacity": 0,
+ "barWidthFactor": 0.59999999999999998,
+ "drawStyle": "line",
+ "fillOpacity": 50,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -637,11 +657,13 @@
},
"insertNulls": false,
"lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
- "log": 2,
- "type": "symlog"
+ "type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
@@ -663,66 +685,21 @@
},
{
"color": "red",
- "value": 1
- }
- ]
- }
- },
- "overrides": [
- {
- "matcher": {
- "id": "byName",
- "options": "warn"
- },
- "properties": [
- {
- "id": "color",
- "value": {
- "fixedColor": "orange",
- "mode": "fixed"
- }
- }
- ]
- },
- {
- "matcher": {
- "id": "byName",
- "options": "error"
- },
- "properties": [
- {
- "id": "color",
- "value": {
- "fixedColor": "red",
- "mode": "fixed"
- }
+ "value": 80
}
]
},
- {
- "matcher": {
- "id": "byName",
- "options": "panic"
- },
- "properties": [
- {
- "id": "color",
- "value": {
- "fixedColor": "semi-dark-red",
- "mode": "fixed"
- }
- }
- ]
- }
- ]
+ "unit": "none"
+ },
+ "overrides": [ ]
},
"gridPos": {
- "h": 9,
- "w": 9,
- "x": 15,
+ "h": 6,
+ "w": 6,
+ "x": 18,
"y": 7
},
- "id": 52,
+ "id": 37,
"options": {
"legend": {
"calcs": [ ],
@@ -737,42 +714,935 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77765",
"targets": [
{
"datasource": {
- "type": "loki",
- "uid": "${loki_datasource}"
- },
- "editorMode": "code",
- "expr": "sum by (level) (count_over_time({cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} |~ \"level=(warn|error)\" | logfmt [$__auto]))",
- "legendFormat": "{{ level }}",
- "queryType": "range",
- "refId": "A"
- },
- {
- "datasource": {
- "type": "loki",
- "uid": "${loki_datasource}"
+ "type": "prometheus",
+ "uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum (count_over_time({cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} |= \"panic:\" | logfmt [$__auto]))",
+ "expr": "(\n max by (pod, reason) (kube_pod_container_status_last_terminated_reason{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"})\n * on (pod) group_left\n sum by (pod) (increase(kube_pod_container_status_restarts_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"}[$__rate_interval]))\n) > 0",
"hide": false,
- "legendFormat": "panic",
- "queryType": "range",
- "refId": "B"
+ "instant": false,
+ "interval": "",
+ "legendFormat": "{{pod}} ({{reason}})",
+ "range": true,
+ "refId": "C"
}
],
- "title": "Errors Rate",
+ "title": "Container restarts",
"type": "timeseries"
},
{
- "collapsed": true,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 16
- },
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "description": "Percentage of chunks that are filtered by using bloom filters",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "scheme",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "area"
+ }
+ },
+ "mappings": [ ],
+ "max": 1,
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "red",
+ "value": null
+ },
+ {
+ "color": "orange",
+ "value": 0.5
+ },
+ {
+ "color": "yellow",
+ "value": 0.75
+ },
+ {
+ "color": "green",
+ "value": 0.90000000000000002
+ }
+ ]
+ },
+ "unit": "percentunit"
+ },
+ "overrides": [ ]
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 12,
+ "x": 0,
+ "y": 13
+ },
+ "id": 99,
+ "options": {
+ "legend": {
+ "calcs": [ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "panels": [ ],
+ "pluginVersion": "11.4.0-77765",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "(\n sum(rate(loki_index_gateway_prefilter_chunks_sum{cluster=\"$cluster\", job=\"$namespace/index-gateway\"}[$__rate_interval])) by (route)\n -\n sum(rate(loki_index_gateway_postfilter_chunks_sum{cluster=\"$cluster\", job=\"$namespace/index-gateway\"}[$__rate_interval])) by (route)\n)\n/\nsum(rate(loki_index_gateway_prefilter_chunks_sum{cluster=\"$cluster\", job=\"$namespace/index-gateway\"}[$__rate_interval])) by (route)",
+ "instant": false,
+ "legendFormat": "chunks {{ route}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Filter ratio - Index Gateway by route",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "scheme",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "area"
+ }
+ },
+ "mappings": [ ],
+ "max": 1,
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "#EAB839",
+ "value": 0.10000000000000001
+ },
+ {
+ "color": "#EF843C",
+ "value": 0.25
+ },
+ {
+ "color": "red",
+ "value": 0.5
+ }
+ ]
+ },
+ "unit": "percentunit"
+ },
+ "overrides": [ ]
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 12,
+ "x": 12,
+ "y": 13
+ },
+ "id": 100,
+ "options": {
+ "legend": {
+ "calcs": [ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "panels": [ ],
+ "pluginVersion": "11.4.0-77765",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "sum(rate(loki_bloom_gateway_querier_series_skipped_total{cluster=\"$cluster\", job=\"$namespace/index-gateway\"}[$__rate_interval]))\n/\nsum(rate(loki_bloom_gateway_querier_series_total{cluster=\"$cluster\", job=\"$namespace/index-gateway\"}[$__rate_interval]))",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "series",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "sum(rate(loki_bloom_gateway_querier_chunks_skipped_total{cluster=\"$cluster\", job=\"$namespace/index-gateway\"}[$__rate_interval]))\n/\nsum(rate(loki_bloom_gateway_querier_chunks_total{cluster=\"$cluster\", job=\"$namespace/index-gateway\"}[$__rate_interval]))",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "chunks",
+ "range": true,
+ "refId": "B"
+ }
+ ],
+ "title": "Data skipped because they don't match any bocks",
+ "type": "timeseries"
+ },
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 19
+ },
+ "id": 96,
+ "panels": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
+ "drawStyle": "line",
+ "fillOpacity": 50,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "percent"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "percentunit"
+ },
+ "overrides": [ ]
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 447
+ },
+ "id": 97,
+ "options": {
+ "legend": {
+ "calcs": [ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "panels": [ ],
+ "pluginVersion": "11.4.0-77765",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "000000134"
+ },
+ "editorMode": "code",
+ "expr": "sum(rate(loki_bloom_recorder_chunks_total{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\", type=~\"(found|skipped|missed)\"}[$__rate_interval])) by (type)\n/ on () group_left\nsum(rate(loki_bloom_recorder_chunks_total{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\", type=\"requested\"}[$__rate_interval]))",
+ "instant": false,
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "000000134"
+ },
+ "editorMode": "code",
+ "expr": "sum(rate(loki_bloom_recorder_chunks_total{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\", type=\"filtered\"}[$__rate_interval])) by (type)\n/ on () group_left\nsum(rate(loki_bloom_recorder_chunks_total{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\", type=\"requested\"}[$__rate_interval]))",
+ "hide": true,
+ "instant": false,
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "B"
+ }
+ ],
+ "title": "Found/Skipped/Missing chunks",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
+ "drawStyle": "line",
+ "fillOpacity": 50,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [ ],
+ "max": 1,
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "percentunit"
+ },
+ "overrides": [ ]
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 12,
+ "y": 447
+ },
+ "id": 98,
+ "options": {
+ "legend": {
+ "calcs": [ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "panels": [ ],
+ "pluginVersion": "11.4.0-77765",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "000000134"
+ },
+ "editorMode": "code",
+ "expr": "sum(rate(loki_bloom_recorder_chunks_total{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\", type=\"filtered\"}[$__rate_interval])) by (type)\n/ on () group_left\nsum(rate(loki_bloom_recorder_chunks_total{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\", type=\"found\"}[$__rate_interval]))",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "B"
+ }
+ ],
+ "title": "Filtered chunks",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
+ "drawStyle": "line",
+ "fillOpacity": 50,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "percent"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "percentunit"
+ },
+ "overrides": [ ]
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 454
+ },
+ "id": 107,
+ "options": {
+ "legend": {
+ "calcs": [ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "panels": [ ],
+ "pluginVersion": "11.4.0-77765",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "000000134"
+ },
+ "editorMode": "code",
+ "expr": "sum(rate(loki_bloom_recorder_series_total{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\", type=~\"(found|skipped|missed)\"}[$__rate_interval])) by (type)\n/ on () group_left\nsum(rate(loki_bloom_recorder_series_total{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\", type=\"requested\"}[$__rate_interval]))",
+ "instant": false,
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Found/Skipped/Missing series",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
+ "drawStyle": "line",
+ "fillOpacity": 50,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [ ],
+ "max": 1,
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "percentunit"
+ },
+ "overrides": [ ]
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 12,
+ "y": 454
+ },
+ "id": 108,
+ "options": {
+ "legend": {
+ "calcs": [ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "panels": [ ],
+ "pluginVersion": "11.4.0-77765",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "000000134"
+ },
+ "editorMode": "code",
+ "expr": "sum(rate(loki_bloom_recorder_series_total{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\", type=\"filtered\"}[$__rate_interval])) by (type)\n/ on () group_left\nsum(rate(loki_bloom_recorder_series_total{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\", type=\"found\"}[$__rate_interval]))",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "B"
+ }
+ ],
+ "title": "Filtered series",
+ "type": "timeseries"
+ }
+ ],
+ "targets": [ ],
+ "title": "Bloom Recorder",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 20
+ },
+ "id": 95,
+ "panels": [
+ {
+ "datasource": {
+ "type": "loki",
+ "uid": "${loki_datasource}"
+ },
+ "fieldConfig": {
+ "defaults": { },
+ "overrides": [ ]
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 15,
+ "x": 0,
+ "y": 920
+ },
+ "id": 48,
+ "options": {
+ "dedupStrategy": "none",
+ "enableLogDetails": true,
+ "prettifyLogMessage": false,
+ "showCommonLabels": false,
+ "showLabels": false,
+ "showTime": false,
+ "sortOrder": "Descending",
+ "wrapLogMessage": true
+ },
+ "panels": [ ],
+ "pluginVersion": "11.4.0-77765",
+ "targets": [
+ {
+ "datasource": {
+ "type": "loki",
+ "uid": "${loki_datasource}"
+ },
+ "editorMode": "code",
+ "expr": "{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} |= \"level=error\" or \"panic:\" | logfmt",
+ "queryType": "range",
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "loki",
+ "uid": "${loki_datasource}"
+ },
+ "editorMode": "code",
+ "expr": "{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} |= \"level=warn\" | logfmt",
+ "queryType": "range",
+ "refId": "B"
+ }
+ ],
+ "title": "Errors",
+ "type": "logs"
+ },
+ {
+ "datasource": {
+ "type": "loki",
+ "uid": "${loki_datasource}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "fixedColor": "red",
+ "mode": "fixed"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
+ "drawStyle": "bars",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "log": 2,
+ "type": "symlog"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 1
+ }
+ ]
+ }
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "warn"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "orange",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "error"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "red",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "panic"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "semi-dark-red",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 9,
+ "x": 15,
+ "y": 920
+ },
+ "id": 52,
+ "options": {
+ "legend": {
+ "calcs": [ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "panels": [ ],
+ "pluginVersion": "11.4.0-77765",
+ "targets": [
+ {
+ "datasource": {
+ "type": "loki",
+ "uid": "${loki_datasource}"
+ },
+ "editorMode": "code",
+ "expr": "sum by (level) (count_over_time({cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} |~ \"level=(warn|error)\" | logfmt [$__auto]))",
+ "queryType": "range",
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "loki",
+ "uid": "${loki_datasource}"
+ },
+ "editorMode": "code",
+ "expr": "sum (count_over_time({cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} |= \"panic:\" | logfmt [$__auto]))",
+ "queryType": "range",
+ "refId": "B"
+ }
+ ],
+ "title": "Errors Rate",
+ "type": "timeseries"
+ }
+ ],
+ "targets": [ ],
+ "title": "Logs",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 21
+ },
"id": 56,
"panels": [
{
@@ -792,6 +1662,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -822,8 +1693,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -838,7 +1708,7 @@
"h": 14,
"w": 12,
"x": 0,
- "y": 17
+ "y": 1764
},
"id": 10,
"options": {
@@ -855,6 +1725,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -915,6 +1786,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -945,8 +1817,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -962,7 +1833,7 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 17
+ "y": 1764
},
"id": 11,
"options": {
@@ -979,6 +1850,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -1040,6 +1912,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -1070,8 +1943,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -1087,7 +1959,7 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 24
+ "y": 2140
},
"id": 81,
"options": {
@@ -1104,6 +1976,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -1179,6 +2052,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
@@ -1212,8 +2086,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -1228,7 +2101,7 @@
"h": 7,
"w": 8,
"x": 0,
- "y": 31
+ "y": 2147
},
"id": 87,
"options": {
@@ -1245,6 +2118,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -1281,6 +2155,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
@@ -1314,8 +2189,110 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "s"
+ },
+ "overrides": [ ]
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 8,
+ "x": 8,
+ "y": 2147
+ },
+ "id": 88,
+ "options": {
+ "legend": {
+ "calcs": [ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "panels": [ ],
+ "pluginVersion": "11.4.0-77663",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "sum by (pod) (rate(go_gc_duration_seconds_sum{container=\"bloom-gateway\"}[$__rate_interval]))\n/\nsum by (pod) (rate(go_gc_duration_seconds_count{container=\"bloom-gateway\"}[$__rate_interval]))",
+ "hide": false,
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "GC duration",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
},
{
"color": "red",
@@ -1330,10 +2307,10 @@
"gridPos": {
"h": 7,
"w": 8,
- "x": 8,
- "y": 31
+ "x": 16,
+ "y": 2147
},
- "id": 88,
+ "id": 89,
"options": {
"legend": {
"calcs": [ ],
@@ -1348,6 +2325,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -1355,14 +2333,26 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by (pod) (rate(go_gc_duration_seconds_sum{container=\"bloom-gateway\"}[$__rate_interval]))\n/\nsum by (pod) (rate(go_gc_duration_seconds_count{container=\"bloom-gateway\"}[$__rate_interval]))",
+ "expr": "histogram_quantile(0.99, sum(rate(go_gc_pauses_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"}[$__rate_interval])) by (le))",
"hide": false,
"legendFormat": "__auto",
"range": true,
"refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.90, sum(rate(go_gc_pauses_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"}[$__rate_interval])) by (le))",
+ "hide": false,
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "B"
}
],
- "title": "GC duration",
+ "title": "GC pauses",
"type": "timeseries"
},
{
@@ -1383,6 +2373,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
@@ -1393,9 +2384,6 @@
},
"insertNulls": false,
"lineInterpolation": "linear",
- "lineStyle": {
- "fill": "solid"
- },
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
@@ -1416,8 +2404,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -1425,17 +2412,17 @@
}
]
},
- "unit": "s"
+ "unit": "binBps"
},
"overrides": [ ]
},
"gridPos": {
"h": 7,
- "w": 8,
- "x": 16,
- "y": 31
+ "w": 12,
+ "x": 0,
+ "y": 2154
},
- "id": 89,
+ "id": 84,
"options": {
"legend": {
"calcs": [ ],
@@ -1450,6 +2437,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -1457,30 +2445,21 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.99, sum(rate(go_gc_pauses_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"}[$__rate_interval])) by (le))",
- "hide": false,
- "legendFormat": "__auto",
- "range": true,
- "refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "histogram_quantile(0.90, sum(rate(go_gc_pauses_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"}[$__rate_interval])) by (le))",
+ "expr": "sum by(instance, pod) (rate(node_disk_read_bytes_total[$__rate_interval]))\n+ ignoring(pod) group_right() \n(count by(instance, pod) (container_fs_reads_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"bloom-gateway\", device!~\".*sda.*\"}) * 0)",
"hide": false,
- "legendFormat": "__auto",
+ "instant": false,
+ "interval": "",
+ "legendFormat": "{{pod}}",
"range": true,
- "refId": "B"
+ "refId": "D"
}
],
- "title": "GC pauses",
+ "title": "Disk reads",
"type": "timeseries"
},
{
"datasource": {
+ "default": false,
"type": "prometheus",
"uid": "${datasource}"
},
@@ -1497,6 +2476,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
@@ -1527,8 +2507,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -1543,10 +2522,10 @@
"gridPos": {
"h": 7,
"w": 12,
- "x": 0,
- "y": 38
+ "x": 12,
+ "y": 2154
},
- "id": 84,
+ "id": 85,
"options": {
"legend": {
"calcs": [ ],
@@ -1561,6 +2540,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -1568,20 +2548,20 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by(instance, pod) (rate(node_disk_read_bytes_total[$__rate_interval]))\n+ ignoring(pod) group_right() \n(count by(instance, pod) (container_fs_reads_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"bloom-gateway\", device!~\".*sda.*\"}) * 0)",
+ "expr": "sum by(instance, pod) (rate(node_disk_written_bytes_total[$__rate_interval]))\n+ ignoring(pod) group_right() \n(count by(instance, pod) (container_fs_writes_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"bloom-gateway\", device!~\".*sda.*\"}) * 0)",
"hide": false,
"instant": false,
- "interval": "",
"legendFormat": "{{pod}}",
"range": true,
"refId": "D"
}
],
- "title": "Disk reads",
+ "title": "Disk writes",
"type": "timeseries"
},
{
"datasource": {
+ "default": false,
"type": "prometheus",
"uid": "${datasource}"
},
@@ -1598,6 +2578,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
@@ -1617,7 +2598,7 @@
"spanNulls": false,
"stacking": {
"group": "A",
- "mode": "none"
+ "mode": "normal"
},
"thresholdsStyle": {
"mode": "off"
@@ -1628,8 +2609,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -1643,11 +2623,11 @@
},
"gridPos": {
"h": 7,
- "w": 12,
- "x": 12,
- "y": 38
+ "w": 24,
+ "x": 0,
+ "y": 2161
},
- "id": 85,
+ "id": 102,
"options": {
"legend": {
"calcs": [ ],
@@ -1662,6 +2642,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -1669,15 +2650,29 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by(instance, pod) (rate(node_disk_written_bytes_total[$__rate_interval]))\n+ ignoring(pod) group_right() \n(count by(instance, pod) (container_fs_writes_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"bloom-gateway\", device!~\".*sda.*\"}) * 0)",
+ "expr": "sum(sum by (instance) (rate(node_disk_read_bytes_total[$__rate_interval]))\n+ on(instance) group_right() \n(count by (instance) (container_fs_reads_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"bloom-gateway\", device!~\".*sda.*\"}) * 0))",
"hide": false,
"instant": false,
- "legendFormat": "{{pod}}",
+ "interval": "",
+ "legendFormat": "Reads",
"range": true,
"refId": "D"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "sum(sum by(instance) (rate(node_disk_written_bytes_total[$__rate_interval]))\n+ on(instance) group_right() \n(count by(instance) (container_fs_writes_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"bloom-gateway\", device!~\".*sda.*\"}) * 0)) * -1",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Writes",
+ "range": true,
+ "refId": "A"
}
],
- "title": "Disk writes",
+ "title": "Disk reads/writes",
"type": "timeseries"
}
],
@@ -1691,7 +2686,7 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 17
+ "y": 22
},
"id": 2,
"panels": [
@@ -1712,6 +2707,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 100,
"gradientMode": "none",
@@ -1742,8 +2738,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
}
]
}
@@ -1799,7 +2794,7 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 18
+ "y": 1175
},
"id": 13,
"options": {
@@ -1816,6 +2811,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -1823,7 +2819,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by (status_code) (\n rate(loki_request_duration_seconds_count{cluster=~\"$cluster\",job=~\"($namespace)/bloom-gateway\", route=\"/logproto.BloomGateway/FilterChunkRefs\"}[$__rate_interval])\n)",
+ "expr": "sum by (status_code) (\n rate(loki_request_duration_seconds_count{cluster=\"$cluster\",job=\"$namespace/bloom-gateway\", route=\"/logproto.BloomGateway/FilterChunkRefs\"}[$__rate_interval])\n)",
"hide": false,
"instant": false,
"legendFormat": "__auto",
@@ -1851,6 +2847,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 20,
"gradientMode": "none",
@@ -1881,8 +2878,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
}
]
}
@@ -1938,7 +2934,7 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 18
+ "y": 1175
},
"id": 86,
"options": {
@@ -1955,6 +2951,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -1975,6 +2972,7 @@
},
{
"datasource": {
+ "default": false,
"type": "prometheus",
"uid": "${datasource}"
},
@@ -1991,6 +2989,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
@@ -2021,8 +3020,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
}
]
},
@@ -2034,7 +3032,7 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 25
+ "y": 1249
},
"id": 14,
"options": {
@@ -2051,6 +3049,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -2090,6 +3089,19 @@
"legendFormat": "{{ route }} 99th percentile",
"range": true,
"refId": "D"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(1, sum by (le,route) (cluster_job_route:loki_request_duration_seconds_bucket:sum_rate{cluster=~\"$cluster\", job=~\"($namespace)/bloom-gateway\", route=~\"/logproto.BloomGateway/FilterChunkRefs\"}))",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "{{ route }} max",
+ "range": true,
+ "refId": "A"
}
],
"title": "Latency",
@@ -2112,6 +3124,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
@@ -2142,8 +3155,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
}
]
},
@@ -2155,7 +3167,7 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 25
+ "y": 1249
},
"id": 15,
"options": {
@@ -2172,6 +3184,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -2201,7 +3214,7 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 18
+ "y": 23
},
"id": 58,
"panels": [
@@ -2223,6 +3236,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 25,
"gradientMode": "none",
@@ -2253,7 +3267,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
}
]
},
@@ -2265,7 +3280,7 @@
"h": 7,
"w": 8,
"x": 0,
- "y": 11
+ "y": 1176
},
"id": 16,
"options": {
@@ -2282,33 +3297,8 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "sum by (pod) (loki_bloom_gateway_queue_duration_seconds_sum{cluster=\"$cluster\", namespace=\"$namespace\"})\n/\nsum by (pod) (loki_bloom_gateway_queue_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\"})\n",
- "hide": true,
- "instant": false,
- "legendFormat": "__auto",
- "range": true,
- "refId": "C"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "sum (loki_bloom_gateway_queue_length{cluster=\"$cluster\", namespace=\"$namespace\"})",
- "hide": true,
- "instant": false,
- "legendFormat": "Total",
- "range": true,
- "refId": "D"
- },
{
"datasource": {
"type": "prometheus",
@@ -2344,6 +3334,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -2374,7 +3365,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
}
]
},
@@ -2386,7 +3378,7 @@
"h": 7,
"w": 8,
"x": 8,
- "y": 11
+ "y": 1176
},
"id": 17,
"options": {
@@ -2403,6 +3395,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -2417,19 +3410,6 @@
"range": true,
"refId": "E"
},
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "histogram_quantile(0.90, sum by (le) (rate(loki_bloom_gateway_queue_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])))",
- "hide": true,
- "instant": false,
- "legendFormat": "p90",
- "range": true,
- "refId": "A"
- },
{
"datasource": {
"type": "prometheus",
@@ -2478,6 +3458,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -2508,7 +3489,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
}
]
},
@@ -2520,7 +3502,7 @@
"h": 7,
"w": 8,
"x": 16,
- "y": 11
+ "y": 1176
},
"id": 22,
"options": {
@@ -2537,6 +3519,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -2566,7 +3549,7 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 19
+ "y": 24
},
"id": 68,
"panels": [
@@ -2588,6 +3571,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -2634,7 +3618,7 @@
"h": 8,
"w": 8,
"x": 0,
- "y": 12
+ "y": 1177
},
"id": 69,
"options": {
@@ -2651,6 +3635,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -2712,6 +3697,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -2758,7 +3744,7 @@
"h": 8,
"w": 8,
"x": 8,
- "y": 12
+ "y": 1177
},
"id": 70,
"options": {
@@ -2775,6 +3761,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -2836,6 +3823,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -2881,7 +3869,7 @@
"h": 8,
"w": 8,
"x": 16,
- "y": 12
+ "y": 1177
},
"id": 71,
"options": {
@@ -2898,6 +3886,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -2917,15 +3906,115 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by (status) (rate(loki_bloom_gateway_tasks_processed_total{cluster=\"$cluster\",namespace=\"$namespace\",container=\"bloom-gateway\"}[$__rate_interval]))",
- "hide": false,
+ "expr": "sum by (status) (rate(loki_bloom_gateway_tasks_processed_total{cluster=\"$cluster\",namespace=\"$namespace\",container=\"bloom-gateway\"}[$__rate_interval]))",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "processed {{status}}",
+ "range": true,
+ "refId": "B"
+ }
+ ],
+ "title": "Tasks dequeued/processed",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "ops"
+ },
+ "overrides": [ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 8,
+ "x": 0,
+ "y": 1214
+ },
+ "id": 105,
+ "options": {
+ "legend": {
+ "calcs": [ ],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "panels": [ ],
+ "pluginVersion": "11.4.0-77663",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "sum(rate(loki_bloom_gateway_process_duration_seconds_count{cluster=\"$cluster\",namespace=\"$namespace\",container=\"bloom-gateway\"}[$__rate_interval])) by (status)",
"instant": false,
- "legendFormat": "processed {{status}}",
+ "legendFormat": "{{status}}",
"range": true,
- "refId": "B"
+ "refId": "A"
}
],
- "title": "Tasks dequeued/processed",
+ "title": "Worker Iterations per second",
"type": "timeseries"
}
],
@@ -2939,21 +4028,21 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 20
+ "y": 25
},
"id": 59,
"panels": [
{
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
"description": "",
+ "fieldConfig": {
+ "defaults": { },
+ "overrides": [ ]
+ },
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
- "y": 13
+ "y": 1178
},
"id": 19,
"options": {
@@ -2966,7 +4055,7 @@
"mode": "markdown"
},
"panels": [ ],
- "pluginVersion": "11.1.0-70005",
+ "pluginVersion": "11.4.0-77663",
"targets": [ ],
"title": "We cache bloom blocks in memory to prevent the gateway from hitting the object store too often",
"transparent": true,
@@ -2990,6 +4079,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -3036,7 +4126,7 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 14
+ "y": 1179
},
"id": 20,
"options": {
@@ -3053,6 +4143,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -3102,6 +4193,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -3194,7 +4286,7 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 14
+ "y": 1179
},
"id": 83,
"options": {
@@ -3211,6 +4303,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -3286,6 +4379,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 100,
"gradientMode": "none",
@@ -3364,7 +4458,7 @@
"h": 7,
"w": 24,
"x": 0,
- "y": 21
+ "y": 1186
},
"id": 92,
"options": {
@@ -3381,6 +4475,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -3388,7 +4483,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by (status) (\n rate(loki_bloom_blocks_cache_fetched_total{container=\"bloom-gateway\"}[$__rate_interval])\n)\n/ ignoring(status) group_left\nsum (\n rate(loki_bloom_blocks_cache_fetched_total{container=\"bloom-gateway\"}[$__rate_interval])\n)",
+ "expr": "sum by (status) (\n rate(loki_bloom_blocks_cache_fetched_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"}[$__rate_interval])\n)\n/ ignoring(status) group_left\nsum (\n rate(loki_bloom_blocks_cache_fetched_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"}[$__rate_interval])\n)",
"hide": false,
"instant": false,
"legendFormat": "__auto",
@@ -3417,6 +4512,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -3476,7 +4572,7 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 28
+ "y": 1193
},
"id": 76,
"options": {
@@ -3493,6 +4589,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -3541,6 +4638,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -3599,255 +4697,10 @@
"gridPos": {
"h": 7,
"w": 12,
- "x": 12,
- "y": 28
- },
- "id": 21,
- "options": {
- "legend": {
- "calcs": [ ],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "maxHeight": 600,
- "mode": "single",
- "sort": "none"
- }
- },
- "panels": [ ],
- "targets": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "sum(rate(loki_bloom_store_metas_fetched_sum{cluster=\"$cluster\",namespace=\"$namespace\", container=\"bloom-gateway\"}[$__rate_interval]))",
- "hide": false,
- "instant": false,
- "legendFormat": "metas fetch rate",
- "range": true,
- "refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "sum(rate(loki_bloom_store_blocks_fetched_sum{cluster=\"$cluster\",namespace=\"$namespace\", container=\"bloom-gateway\"}[$__rate_interval]))",
- "hide": false,
- "instant": false,
- "legendFormat": "blocks fetch rate",
- "range": true,
- "refId": "B"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "histogram_quantile(0.9, sum(rate(loki_bloom_store_blocks_fetched_size_bytes_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (le))",
- "hide": false,
- "instant": false,
- "legendFormat": "p90 blocks size",
- "range": true,
- "refId": "C"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "histogram_quantile(0.9, sum(rate(loki_bloom_store_metas_fetched_size_bytes_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (le))",
- "hide": false,
- "instant": false,
- "legendFormat": "p90 metas size",
- "range": true,
- "refId": "D"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "histogram_quantile(1.0, sum(rate(loki_bloom_store_metas_fetched_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (le))",
- "hide": true,
- "instant": false,
- "legendFormat": "__auto",
- "range": true,
- "refId": "E"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "histogram_quantile(0.95, sum(rate(loki_bloom_store_metas_fetched_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (le))",
- "hide": true,
- "instant": false,
- "legendFormat": "__auto",
- "range": true,
- "refId": "F"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "histogram_quantile(0.5, sum(rate(loki_bloom_store_metas_fetched_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (le))",
- "hide": true,
- "instant": false,
- "legendFormat": "__auto",
- "range": true,
- "refId": "G"
- }
- ],
- "title": "Bloom Store",
- "type": "timeseries"
- }
- ],
- "targets": [ ],
- "title": "Blocks Cache",
- "type": "row"
- },
- {
- "collapsed": true,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 21
- },
- "id": 60,
- "panels": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "description": "",
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 14
- },
- "id": 61,
- "options": {
- "code": {
- "language": "plaintext",
- "showLineNumbers": false,
- "showMiniMap": false
- },
- "content": "",
- "mode": "markdown"
- },
- "panels": [ ],
- "pluginVersion": "11.1.0-70005",
- "targets": [ ],
- "title": "The gateway download bloom meta files and blocks from the object store.",
- "transparent": true,
- "type": "text"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "description": "",
- "gridPos": {
- "h": 7,
- "w": 2,
- "x": 0,
- "y": 15
- },
- "id": 24,
- "options": {
- "code": {
- "language": "plaintext",
- "showLineNumbers": false,
- "showMiniMap": false
- },
- "content": "---\n#### GCS\n",
- "mode": "markdown"
- },
- "panels": [ ],
- "pluginVersion": "11.1.0-70005",
- "targets": [ ],
- "transparent": true,
- "type": "text"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "description": "",
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 25,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "normal"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [ ],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green"
- }
- ]
- },
- "unit": "none"
- },
- "overrides": [ ]
- },
- "gridPos": {
- "h": 7,
- "w": 11,
- "x": 2,
- "y": 15
+ "x": 12,
+ "y": 1193
},
- "id": 25,
+ "id": 21,
"options": {
"legend": {
"calcs": [ ],
@@ -3862,6 +4715,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -3869,14 +4723,93 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by (status_code, operation) (rate(loki_gcs_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} [$__rate_interval]))",
+ "expr": "sum(rate(loki_bloom_store_metas_fetched_sum{cluster=\"$cluster\",namespace=\"$namespace\", container=\"bloom-gateway\"}[$__rate_interval]))",
+ "hide": false,
"instant": false,
- "legendFormat": "{{operation}} {{status_code}}",
+ "legendFormat": "metas fetch rate",
"range": true,
"refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "sum(rate(loki_bloom_store_blocks_fetched_sum{cluster=\"$cluster\",namespace=\"$namespace\", container=\"bloom-gateway\"}[$__rate_interval]))",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "blocks fetch rate",
+ "range": true,
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.9, sum(rate(loki_bloom_store_blocks_fetched_size_bytes_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (le))",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "p90 blocks size",
+ "range": true,
+ "refId": "C"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.9, sum(rate(loki_bloom_store_metas_fetched_size_bytes_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (le))",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "p90 metas size",
+ "range": true,
+ "refId": "D"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(1.0, sum(rate(loki_bloom_store_metas_fetched_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (le))",
+ "hide": true,
+ "instant": false,
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "E"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.95, sum(rate(loki_bloom_store_metas_fetched_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (le))",
+ "hide": true,
+ "instant": false,
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "F"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.5, sum(rate(loki_bloom_store_metas_fetched_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (le))",
+ "hide": true,
+ "instant": false,
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "G"
}
],
- "title": "QPS",
+ "title": "Bloom Store",
"type": "timeseries"
},
{
@@ -3897,6 +4830,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -3907,6 +4841,9 @@
},
"insertNulls": false,
"lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
@@ -3922,6 +4859,7 @@
"mode": "off"
}
},
+ "fieldMinMax": false,
"mappings": [ ],
"thresholds": {
"mode": "absolute",
@@ -3931,17 +4869,30 @@
}
]
},
- "unit": "none"
+ "unit": "short"
},
- "overrides": [ ]
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byRegexp",
+ "options": "/Size (.*)/"
+ },
+ "properties": [
+ {
+ "id": "unit",
+ "value": "bytes"
+ }
+ ]
+ }
+ ]
},
"gridPos": {
"h": 7,
- "w": 11,
- "x": 13,
- "y": 15
+ "w": 12,
+ "x": 0,
+ "y": 1200
},
- "id": 29,
+ "id": 101,
"options": {
"legend": {
"calcs": [ ],
@@ -3956,6 +4907,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -3963,67 +4915,88 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.99, sum by (operation, le) (rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} [$__rate_interval])))",
- "instant": false,
- "legendFormat": "{{operation}} p99",
- "range": true,
- "refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "histogram_quantile(0.90, sum by (operation, le) (rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} [$__rate_interval])))",
+ "expr": "sum by (job)(rate(loki_bloom_store_download_queue_size_sum{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"}[$__rate_interval]))",
"hide": false,
"instant": false,
- "legendFormat": "{{operation}} p90",
+ "interval": "",
+ "legendFormat": "Size",
"range": true,
"refId": "B"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "histogram_quantile(0.50, sum by (operation, le) (rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} [$__rate_interval])))",
- "hide": false,
- "instant": false,
- "legendFormat": "{{operation}} p50",
- "range": true,
- "refId": "C"
}
],
- "title": "Latency",
+ "title": "Block download queue size",
"type": "timeseries"
- },
+ }
+ ],
+ "targets": [ ],
+ "title": "Blocks Cache",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 26
+ },
+ "id": 60,
+ "panels": [
{
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
+ "description": "",
+ "fieldConfig": {
+ "defaults": { },
+ "overrides": [ ]
+ },
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 1013
+ },
+ "id": 61,
+ "options": {
+ "code": {
+ "language": "plaintext",
+ "showLineNumbers": false,
+ "showMiniMap": false
+ },
+ "content": "",
+ "mode": "markdown"
},
+ "panels": [ ],
+ "pluginVersion": "11.4.0-77765",
+ "targets": [ ],
+ "title": "The gateway download bloom meta files and blocks from the object store.",
+ "transparent": true,
+ "type": "text"
+ },
+ {
"description": "",
+ "fieldConfig": {
+ "defaults": { },
+ "overrides": [ ]
+ },
"gridPos": {
"h": 7,
"w": 2,
"x": 0,
- "y": 22
+ "y": 1014
},
- "id": 62,
+ "id": 24,
"options": {
"code": {
"language": "plaintext",
"showLineNumbers": false,
"showMiniMap": false
},
- "content": "---\n#### S3\n",
+ "content": "---\n#### GCS\n",
"mode": "markdown"
},
"panels": [ ],
- "pluginVersion": "11.1.0-70005",
+ "pluginVersion": "11.4.0-77765",
"targets": [ ],
+ "title": "",
"transparent": true,
"type": "text"
},
@@ -4045,6 +5018,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 25,
"gradientMode": "none",
@@ -4075,7 +5049,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
}
]
},
@@ -4087,9 +5062,9 @@
"h": 7,
"w": 11,
"x": 2,
- "y": 22
+ "y": 1014
},
- "id": 63,
+ "id": 25,
"options": {
"legend": {
"calcs": [ ],
@@ -4104,6 +5079,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77765",
"targets": [
{
"datasource": {
@@ -4111,7 +5087,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by (status_code, operation) (rate(loki_s3_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} [$__rate_interval]))",
+ "expr": "sum by (status_code, operation) (rate(loki_gcs_request_duration_seconds_count{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"} [$__rate_interval]))",
"instant": false,
"legendFormat": "{{operation}} {{status_code}}",
"range": true,
@@ -4139,8 +5115,9 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
- "fillOpacity": 25,
+ "fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -4158,7 +5135,7 @@
"spanNulls": false,
"stacking": {
"group": "A",
- "mode": "normal"
+ "mode": "none"
},
"thresholdsStyle": {
"mode": "off"
@@ -4169,7 +5146,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
}
]
},
@@ -4181,9 +5159,9 @@
"h": 7,
"w": 11,
"x": 13,
- "y": 22
+ "y": 1014
},
- "id": 64,
+ "id": 29,
"options": {
"legend": {
"calcs": [ ],
@@ -4198,6 +5176,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77765",
"targets": [
{
"datasource": {
@@ -4205,7 +5184,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.99, sum by (operation, le) (rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} [$__rate_interval])))",
+ "expr": "histogram_quantile(0.99, sum by (operation, le) (rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"} [$__rate_interval])))",
"instant": false,
"legendFormat": "{{operation}} p99",
"range": true,
@@ -4217,7 +5196,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.90, sum by (operation, le) (rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} [$__rate_interval])))",
+ "expr": "histogram_quantile(0.90, sum by (operation, le) (rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"} [$__rate_interval])))",
"hide": false,
"instant": false,
"legendFormat": "{{operation}} p90",
@@ -4230,7 +5209,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.50, sum by (operation, le) (rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} [$__rate_interval])))",
+ "expr": "histogram_quantile(0.50, sum by (operation, le) (rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"} [$__rate_interval])))",
"hide": false,
"instant": false,
"legendFormat": "{{operation}} p50",
@@ -4242,30 +5221,31 @@
"type": "timeseries"
},
{
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
"description": "",
+ "fieldConfig": {
+ "defaults": { },
+ "overrides": [ ]
+ },
"gridPos": {
"h": 7,
"w": 2,
"x": 0,
- "y": 29
+ "y": 1021
},
- "id": 65,
+ "id": 62,
"options": {
"code": {
"language": "plaintext",
"showLineNumbers": false,
"showMiniMap": false
},
- "content": "---\n#### Azure\nBlob Storage\n\n",
+ "content": "---\n#### S3\n",
"mode": "markdown"
},
"panels": [ ],
- "pluginVersion": "11.1.0-70005",
+ "pluginVersion": "11.4.0-77765",
"targets": [ ],
+ "title": "",
"transparent": true,
"type": "text"
},
@@ -4287,6 +5267,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 25,
"gradientMode": "none",
@@ -4317,7 +5298,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
}
]
},
@@ -4329,9 +5311,9 @@
"h": 7,
"w": 11,
"x": 2,
- "y": 29
+ "y": 1021
},
- "id": 66,
+ "id": 63,
"options": {
"legend": {
"calcs": [ ],
@@ -4346,6 +5328,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77765",
"targets": [
{
"datasource": {
@@ -4353,7 +5336,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by (status_code, operation) (rate(loki_azure_blob_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} [$__rate_interval]))",
+ "expr": "sum by (status_code, operation) (rate(loki_s3_request_duration_seconds_count{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"} [$__rate_interval]))",
"instant": false,
"legendFormat": "{{operation}} {{status_code}}",
"range": true,
@@ -4381,6 +5364,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
"fillOpacity": 25,
"gradientMode": "none",
@@ -4411,7 +5395,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
}
]
},
@@ -4423,9 +5408,9 @@
"h": 7,
"w": 11,
"x": 13,
- "y": 29
+ "y": 1021
},
- "id": 67,
+ "id": 64,
"options": {
"legend": {
"calcs": [ ],
@@ -4440,6 +5425,7 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77765",
"targets": [
{
"datasource": {
@@ -4447,7 +5433,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.99, sum by (operation, le) (rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} [$__rate_interval])))",
+ "expr": "histogram_quantile(0.99, sum by (operation, le) (rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"} [$__rate_interval])))",
"instant": false,
"legendFormat": "{{operation}} p99",
"range": true,
@@ -4459,7 +5445,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.90, sum by (operation, le) (rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} [$__rate_interval])))",
+ "expr": "histogram_quantile(0.90, sum by (operation, le) (rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"} [$__rate_interval])))",
"hide": false,
"instant": false,
"legendFormat": "{{operation}} p90",
@@ -4472,7 +5458,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.50, sum by (operation, le) (rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} [$__rate_interval])))",
+ "expr": "histogram_quantile(0.50, sum by (operation, le) (rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"} [$__rate_interval])))",
"hide": false,
"instant": false,
"legendFormat": "{{operation}} p50",
@@ -4482,26 +5468,40 @@
],
"title": "Latency",
"type": "timeseries"
- }
- ],
- "targets": [ ],
- "title": "Object Store",
- "type": "row"
- },
- {
- "collapsed": true,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 22
- },
- "id": 77,
- "panels": [
+ },
+ {
+ "description": "",
+ "fieldConfig": {
+ "defaults": { },
+ "overrides": [ ]
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 2,
+ "x": 0,
+ "y": 1028
+ },
+ "id": 65,
+ "options": {
+ "code": {
+ "language": "plaintext",
+ "showLineNumbers": false,
+ "showMiniMap": false
+ },
+ "content": "---\n#### Azure\nBlob Storage\n\n",
+ "mode": "markdown"
+ },
+ "panels": [ ],
+ "pluginVersion": "11.4.0-77765",
+ "targets": [ ],
+ "title": "",
+ "transparent": true,
+ "type": "text"
+ },
{
"datasource": {
- "type": "loki",
- "uid": "${loki_datasource}"
+ "type": "prometheus",
+ "uid": "${datasource}"
},
"description": "",
"fieldConfig": {
@@ -4516,8 +5516,9 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
- "fillOpacity": 10,
+ "fillOpacity": 25,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -4526,9 +5527,6 @@
},
"insertNulls": false,
"lineInterpolation": "linear",
- "lineStyle": {
- "fill": "solid"
- },
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
@@ -4538,7 +5536,7 @@
"spanNulls": false,
"stacking": {
"group": "A",
- "mode": "none"
+ "mode": "normal"
},
"thresholdsStyle": {
"mode": "off"
@@ -4549,24 +5547,22 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
- },
- {
- "color": "red",
- "value": 80
+ "color": "green",
+ "value": null
}
]
- }
+ },
+ "unit": "none"
},
"overrides": [ ]
},
"gridPos": {
- "h": 8,
- "w": 12,
- "x": 0,
- "y": 23
+ "h": 7,
+ "w": 11,
+ "x": 2,
+ "y": 1028
},
- "id": 78,
+ "id": 66,
"options": {
"legend": {
"calcs": [ ],
@@ -4581,26 +5577,28 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77765",
"targets": [
{
"datasource": {
- "type": "loki",
- "uid": "${loki_datasource}"
+ "type": "prometheus",
+ "uid": "${datasource}"
},
"editorMode": "code",
- "expr": "topk(3, sum by (tasks) (count_over_time({namespace=\"loki-dev-006\", container=\"bloom-gateway\"} |= \"process tasks with bounds\" | logfmt [5s])))",
- "legendFormat": "{{tasks}}",
- "queryType": "range",
+ "expr": "sum by (status_code, operation) (rate(loki_azure_blob_request_duration_seconds_count{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"} [$__rate_interval]))",
+ "instant": false,
+ "legendFormat": "{{operation}} {{status_code}}",
+ "range": true,
"refId": "A"
}
],
- "title": "Process tasks with bounds",
+ "title": "QPS",
"type": "timeseries"
},
{
"datasource": {
- "type": "loki",
- "uid": "${loki_datasource}"
+ "type": "prometheus",
+ "uid": "${datasource}"
},
"description": "",
"fieldConfig": {
@@ -4615,8 +5613,9 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.59999999999999998,
"drawStyle": "line",
- "fillOpacity": 10,
+ "fillOpacity": 25,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -4625,9 +5624,6 @@
},
"insertNulls": false,
"lineInterpolation": "linear",
- "lineStyle": {
- "fill": "solid"
- },
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
@@ -4637,7 +5633,7 @@
"spanNulls": false,
"stacking": {
"group": "A",
- "mode": "none"
+ "mode": "normal"
},
"thresholdsStyle": {
"mode": "off"
@@ -4648,50 +5644,22 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
- },
- {
- "color": "red",
- "value": 80
+ "color": "green",
+ "value": null
}
]
- }
+ },
+ "unit": "none"
},
- "overrides": [
- {
- "__systemRef": "hideSeriesFrom",
- "matcher": {
- "id": "byNames",
- "options": {
- "mode": "exclude",
- "names": [
- "max",
- "avg"
- ],
- "prefix": "All except:",
- "readOnly": true
- }
- },
- "properties": [
- {
- "id": "custom.hideFrom",
- "value": {
- "legend": false,
- "tooltip": false,
- "viz": true
- }
- }
- ]
- }
- ]
+ "overrides": [ ]
},
"gridPos": {
- "h": 8,
- "w": 12,
- "x": 12,
- "y": 23
+ "h": 7,
+ "w": 11,
+ "x": 13,
+ "y": 1028
},
- "id": 79,
+ "id": 67,
"options": {
"legend": {
"calcs": [ ],
@@ -4706,91 +5674,81 @@
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77765",
"targets": [
{
"datasource": {
- "type": "loki",
- "uid": "${loki_datasource}"
+ "type": "prometheus",
+ "uid": "${datasource}"
},
"editorMode": "code",
- "expr": "max(max_over_time({cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} |= \"fetch blocks\" | logfmt | unwrap duration(duration) [$__auto]))",
- "hide": false,
- "legendFormat": "max",
- "queryType": "range",
+ "expr": "histogram_quantile(0.99, sum by (operation, le) (rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"} [$__rate_interval])))",
+ "instant": false,
+ "legendFormat": "{{operation}} p99",
+ "range": true,
"refId": "A"
},
{
"datasource": {
- "type": "loki",
- "uid": "${loki_datasource}"
+ "type": "prometheus",
+ "uid": "${datasource}"
},
"editorMode": "code",
- "expr": "avg(avg_over_time({cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} |= \"fetch blocks\" | logfmt | unwrap duration(duration) [$__auto]))",
+ "expr": "histogram_quantile(0.90, sum by (operation, le) (rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"} [$__rate_interval])))",
"hide": false,
- "legendFormat": "avg",
- "queryType": "range",
+ "instant": false,
+ "legendFormat": "{{operation}} p90",
+ "range": true,
"refId": "B"
},
{
"datasource": {
- "type": "loki",
- "uid": "${loki_datasource}"
+ "type": "prometheus",
+ "uid": "${datasource}"
},
"editorMode": "code",
- "expr": "avg(avg_over_time({cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} |= \"request unavailable blocks in the background\" | logfmt | missing > 0 | unwrap missing [$__auto]))",
+ "expr": "histogram_quantile(0.50, sum by (operation, le) (rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"} [$__rate_interval])))",
"hide": false,
- "legendFormat": "avg missing",
- "queryType": "range",
+ "instant": false,
+ "legendFormat": "{{operation}} p50",
+ "range": true,
"refId": "C"
}
],
- "title": "Download enqueue duration",
+ "title": "Latency",
"type": "timeseries"
- },
+ }
+ ],
+ "targets": [ ],
+ "title": "Object Store",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 27
+ },
+ "id": 77,
+ "panels": [
{
"datasource": {
- "type": "loki",
- "uid": "${loki_datasource}"
+ "type": "prometheus",
+ "uid": "${datasource}"
},
"fieldConfig": {
"defaults": {
- "color": {
- "fixedColor": "green",
- "mode": "palette-classic"
- },
"custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "fillOpacity": 80,
- "gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
- "lineWidth": 1,
"scaleDistribution": {
"type": "linear"
- },
- "thresholdsStyle": {
- "mode": "off"
}
- },
- "mappings": [ ],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green"
- },
- {
- "color": "red",
- "value": 100
- }
- ]
}
},
"overrides": [ ]
@@ -4799,145 +5757,127 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 31
+ "y": 1044
},
"id": 80,
"options": {
- "barRadius": 0,
- "barWidth": 0.96999999999999997,
- "fullHighlight": false,
- "groupWidth": 0.69999999999999996,
+ "calculate": false,
+ "cellGap": 1,
+ "color": {
+ "exponent": 0.5,
+ "fill": "dark-orange",
+ "mode": "scheme",
+ "reverse": false,
+ "scale": "exponential",
+ "scheme": "RdYlGn",
+ "steps": 64
+ },
+ "exemplars": {
+ "color": "rgba(255,0,255,0.7)"
+ },
+ "filterValues": {
+ "le": 1.0000000000000001e-09
+ },
"legend": {
- "calcs": [ ],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
+ "show": true
+ },
+ "rowsFrame": {
+ "layout": "auto"
},
- "orientation": "horizontal",
- "showValue": "auto",
- "stacking": "none",
"tooltip": {
- "maxHeight": 600,
"mode": "single",
- "sort": "none"
+ "showColorScale": false,
+ "yHistogram": false
},
- "xTickLabelRotation": 0,
- "xTickLabelSpacing": 0
+ "yAxis": {
+ "axisPlacement": "left",
+ "reverse": false
+ }
},
"panels": [ ],
- "pluginVersion": "11.0.0-67814",
+ "pluginVersion": "11.4.0-77765",
"targets": [
{
"datasource": {
- "type": "loki",
- "uid": "${loki_datasource}"
+ "type": "prometheus",
+ "uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sort_desc(topk(10, sum by (tasks) (count_over_time({namespace=\"loki-dev-006\", container=\"bloom-gateway\"} |= \"process tasks with bounds\" | logfmt [$__auto]))))",
- "legendFormat": "",
- "queryType": "instant",
+ "exemplar": false,
+ "expr": "increase(loki_bloom_gateway_dequeue_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"}[$__rate_interval])",
+ "format": "heatmap",
+ "instant": false,
+ "legendFormat": "__auto",
+ "range": true,
"refId": "A"
}
],
- "title": "Tasks multiplexed",
- "type": "barchart"
+ "title": "Dequeue duration",
+ "type": "heatmap"
},
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
- "description": "",
"fieldConfig": {
"defaults": {
- "color": {
- "mode": "palette-classic"
- },
"custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 10,
- "gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineStyle": {
- "fill": "solid"
- },
- "lineWidth": 1,
- "pointSize": 5,
"scaleDistribution": {
"type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
}
- },
- "mappings": [ ],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green"
- },
- {
- "color": "red",
- "value": 80
- }
- ]
}
},
- "overrides": [
- {
- "matcher": {
- "id": "byName",
- "options": "Enqueue latency"
- },
- "properties": [
- {
- "id": "unit",
- "value": "s"
- }
- ]
- }
- ]
+ "overrides": [ ]
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
- "y": 31
+ "y": 1044
},
- "id": 82,
+ "id": 106,
"options": {
+ "calculate": false,
+ "cellGap": 1,
+ "color": {
+ "exponent": 0.5,
+ "fill": "dark-orange",
+ "mode": "scheme",
+ "reverse": false,
+ "scale": "exponential",
+ "scheme": "RdYlGn",
+ "steps": 64
+ },
+ "exemplars": {
+ "color": "rgba(255,0,255,0.7)"
+ },
+ "filterValues": {
+ "le": 1.0000000000000001e-09
+ },
"legend": {
- "calcs": [ ],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
+ "show": true
+ },
+ "rowsFrame": {
+ "layout": "auto"
},
"tooltip": {
- "maxHeight": 600,
"mode": "single",
- "sort": "none"
+ "showColorScale": false,
+ "yHistogram": false
+ },
+ "yAxis": {
+ "axisPlacement": "left",
+ "reverse": false
}
},
"panels": [ ],
+ "pluginVersion": "11.4.0-77765",
"targets": [
{
"datasource": {
@@ -4945,27 +5885,17 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.99, sum(rate(loki_bloom_store_download_queue_enqueue_time_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"}[$__rate_interval])) by (le))",
- "hide": false,
- "legendFormat": "Enqueue latency",
- "range": true,
- "refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "histogram_quantile(0.99, sum(rate(loki_bloom_store_download_queue_size_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"}[$__rate_interval])) by (le))",
+ "expr": "increase(loki_bloom_gateway_tasks_dequeued_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"}[$__rate_interval])",
+ "format": "heatmap",
"hide": false,
- "legendFormat": "Size",
+ "instant": false,
+ "legendFormat": "__auto",
"range": true,
"refId": "B"
}
],
- "title": "Block download queue",
- "type": "timeseries"
+ "title": "Dequeue count",
+ "type": "heatmap"
}
],
"targets": [ ],
@@ -4973,6 +5903,7 @@
"type": "row"
}
],
+ "preload": false,
"refresh": "10s",
"rows": [ ],
"schemaVersion": 14,
@@ -5058,7 +5989,6 @@
"from": "now-1h",
"to": "now"
},
- "timeRangeUpdatedDuringEditOrView": false,
"timepicker": {
"refresh_intervals": [
"5s",
diff --git a/production/loki-mixin-compiled/dashboards/loki-retention.json b/production/loki-mixin-compiled/dashboards/loki-retention.json
index 70c5171d9c391..ac719445d2458 100644
--- a/production/loki-mixin-compiled/dashboards/loki-retention.json
+++ b/production/loki-mixin-compiled/dashboards/loki-retention.json
@@ -495,7 +495,7 @@
"span": 6,
"targets": [
{
- "expr": "sum(increase(loki_compactor_skipped_compacting_locked_table_total{cluster=~\"$cluster\", namespace=~\"$namespace\"}[$__range]))",
+ "expr": "sum(loki_compactor_locked_table_successive_compaction_skips{cluster=~\"$cluster\", namespace=~\"$namespace\"})",
"format": "time_series",
"legendFormat": "{{table_name}}",
"legendLink": null
diff --git a/production/loki-mixin/.lint b/production/loki-mixin/.lint
index 4661415c1c7f2..d8c2ddc8b956e 100644
--- a/production/loki-mixin/.lint
+++ b/production/loki-mixin/.lint
@@ -12,7 +12,7 @@ exclusions:
- dashboard: "Loki / Logs"
- dashboard: "Loki / Writes Resources"
- dashboard: "Loki / Writes"
- - dashboard: "Loki / Bloom Compactor"
+ - dashboard: "Loki / Bloom Build"
- dashboard: "Loki / Bloom Gateway"
template-datasource-rule:
reason: "Based on new convention we are using variable names prometheus_datasource and loki_datasource where as linter expects 'datasource'"
@@ -27,7 +27,7 @@ exclusions:
- dashboard: "Loki / Logs"
- dashboard: "Loki / Writes Resources"
- dashboard: "Loki / Writes"
- - dashboard: "Loki / Bloom Compactor"
+ - dashboard: "Loki / Bloom Build"
- dashboard: "Loki / Bloom Gateway"
template-instance-rule:
reason: "These dashboards are cluster overview dashboards, whereas the instance refers to specific pods or nodes"
@@ -42,7 +42,7 @@ exclusions:
- dashboard: "Loki / Reads Resources"
- dashboard: "Loki / Logs"
- dashboard: "Loki / Writes Resources"
- - dashboard: "Loki / Bloom Compactor"
+ - dashboard: "Loki / Bloom Build"
- dashboard: "Loki / Bloom Gateway"
target-instance-rule:
reason: "These dashboards are cluster overview dashboards, whereas the instance refers to specific pods or nodes"
@@ -57,7 +57,7 @@ exclusions:
- dashboard: "Loki / Logs"
- dashboard: "Loki / Writes Resources"
- dashboard: "Loki / Writes"
- - dashboard: "Loki / Bloom Compactor"
+ - dashboard: "Loki / Bloom Build"
- dashboard: "Loki / Bloom Gateway"
target-job-rule:
reason: "We don't have/need a job template selector for this dashboard"
@@ -72,7 +72,7 @@ exclusions:
- dashboard: "Loki / Logs"
- dashboard: "Loki / Writes Resources"
- dashboard: "Loki / Writes"
- - dashboard: "Loki / Bloom Compactor"
+ - dashboard: "Loki / Bloom Build"
- dashboard: "Loki / Bloom Gateway"
target-promql-rule:
reason: "The following are logql queries, not promql"
@@ -89,8 +89,10 @@ exclusions:
panel: "Process tasks with bounds"
- dashboard: "Loki / Bloom Gateway"
panel: "Download enqueue duration"
- - dashboard: "Loki / Bloom Compactor"
- panel: "Errors Rate"
+ - dashboard: "Loki / Bloom Build"
+ panel: "Errors Rate Planner"
+ - dashboard: "Loki / Bloom Build"
+ panel: "Errors Rate Builder"
panel-datasource-rule:
reason: "Loki datasource variable is being named as loki_datasource now while linter expects 'datasource'"
entries:
@@ -183,37 +185,39 @@ exclusions:
panel: "Query Pages"
- dashboard: "Loki / Operational"
panel: "Throttled Rate"
- - dashboard: "Loki / Bloom Compactor"
- panel: "Errors Rate"
- - dashboard: "Loki / Bloom Compactor"
+ - dashboard: "Loki / Bloom Build"
+ panel: "Errors Rate Planner"
+ - dashboard: "Loki / Bloom Build"
+ panel: "Errors Rate Builder"
+ - dashboard: "Loki / Bloom Build"
panel: "Required CPUs to not lag behind"
- - dashboard: "Loki / Bloom Compactor"
- panel: "Tokens rate (millions)"
- - dashboard: "Loki / Bloom Compactor"
+ - dashboard: "Loki / Bloom Build"
+ panel: "Tokens rate"
+ - dashboard: "Loki / Bloom Build"
panel: "Created Blocks"
- - dashboard: "Loki / Bloom Compactor"
+ - dashboard: "Loki / Bloom Build"
panel: "Deleted Blocks"
- - dashboard: "Loki / Bloom Compactor"
+ - dashboard: "Loki / Bloom Build"
panel: "Blocks reused"
- - dashboard: "Loki / Bloom Compactor"
+ - dashboard: "Loki / Bloom Build"
panel: "Created Metas"
- - dashboard: "Loki / Bloom Compactor"
+ - dashboard: "Loki / Bloom Build"
panel: "Deleted Metas"
- - dashboard: "Loki / Bloom Compactor"
+ - dashboard: "Loki / Bloom Build"
panel: "Tenants"
- - dashboard: "Loki / Bloom Compactor"
+ - dashboard: "Loki / Bloom Build"
panel: "Tenants per pod"
- - dashboard: "Loki / Bloom Compactor"
+ - dashboard: "Loki / Bloom Build"
panel: "Tenant Tables"
- - dashboard: "Loki / Bloom Compactor"
+ - dashboard: "Loki / Bloom Build"
panel: "Tenant Tables per pod"
- - dashboard: "Loki / Bloom Compactor"
+ - dashboard: "Loki / Bloom Build"
panel: "Series"
- - dashboard: "Loki / Bloom Compactor"
+ - dashboard: "Loki / Bloom Build"
panel: "avg series per compaction by pod"
- - dashboard: "Loki / Bloom Compactor"
+ - dashboard: "Loki / Bloom Build"
panel: "CPU"
- - dashboard: "Loki / Bloom Compactor"
+ - dashboard: "Loki / Bloom Build"
panel: "CPU per pod"
- dashboard: "Loki / Bloom Gateway"
panel: "Errors Rate"
@@ -236,4 +240,4 @@ exclusions:
- dashboard: "Loki / Bloom Gateway"
panel: "CPU"
- dashboard: "Loki / Bloom Gateway"
- panel: "CPU per pod"
\ No newline at end of file
+ panel: "CPU per pod"
diff --git a/production/loki-mixin/dashboards.libsonnet b/production/loki-mixin/dashboards.libsonnet
index a28f276cd66da..cb1b5d5161778 100644
--- a/production/loki-mixin/dashboards.libsonnet
+++ b/production/loki-mixin/dashboards.libsonnet
@@ -10,5 +10,5 @@
(import 'dashboards/loki-deletion.libsonnet') +
(import 'dashboards/loki-canary-dashboard.libsonnet') +
(import 'dashboards/recording-rules.libsonnet') +
-(import 'dashboards/loki-bloom-compactor.libsonnet') +
+(import 'dashboards/loki-bloom-build.libsonnet') +
(import 'dashboards/loki-bloom-gateway.libsonnet')
diff --git a/production/loki-mixin/dashboards/dashboard-bloom-compactor.json b/production/loki-mixin/dashboards/dashboard-bloom-build.json
similarity index 69%
rename from production/loki-mixin/dashboards/dashboard-bloom-compactor.json
rename to production/loki-mixin/dashboards/dashboard-bloom-build.json
index 035a7523af8e3..7379b46ae2d6b 100644
--- a/production/loki-mixin/dashboards/dashboard-bloom-compactor.json
+++ b/production/loki-mixin/dashboards/dashboard-bloom-build.json
@@ -33,26 +33,6 @@
"title": "Overview",
"type": "row"
},
- {
- "gridPos": {
- "h": 8,
- "w": 14,
- "x": 0,
- "y": 1
- },
- "id": 35,
- "options": {
- "code": {
- "language": "plaintext",
- "showLineNumbers": false,
- "showMiniMap": false
- },
- "content": "## About the Bloom Compactor\nThe compactor iterates through chunks and creates blooms out of them.\nThe size of the resulting blooms depends on the bloom filter settings, the tokenizer settings, the number of ring tokens per compactor and the total number opf compactors.\n\nCompactors are horizontally scalable and uses a ring to:\n- Shard tenants\n- Shard series fingerprints within a tenant subring.\n\nThe blooms for the series are grouped together in blocks which are flushed to object store.", "mode": "markdown"
- },
- "pluginVersion": "11.1.0-70005",
- "transparent": true,
- "type": "text"
- },
{
"datasource": {
"type": "prometheus",
@@ -71,6 +51,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 15,
"gradientMode": "none",
@@ -115,13 +96,30 @@
},
"unit": "percentunit"
},
- "overrides": []
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byRegexp",
+ "options": "/(Planned|success|failure)/"
+ },
+ "properties": [
+ {
+ "id": "unit",
+ "value": "none"
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ }
+ ]
+ }
+ ]
},
"gridPos": {
"h": 7,
"w": 12,
"x": 0,
- "y": 9
+ "y": 1
},
"id": 42,
"options": {
@@ -137,7 +135,7 @@
"sort": "none"
}
},
- "pluginVersion": "11.1.0-69868",
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -145,41 +143,15 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum(loki_bloomcompactor_progress{cluster=~\"$cluster\", job=~\"$namespace/bloom-compactor\"})\n/\nsum(count(loki_bloomcompactor_progress{cluster=~\"$cluster\", job=~\"$namespace/bloom-compactor\"}))",
- "hide": false,
- "instant": false,
- "legendFormat": "avg",
- "range": true,
- "refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "quantile(\n 0.9, \n sum by (pod) (\n loki_bloomcompactor_progress{cluster=~\"$cluster\", job=~\"$namespace/bloom-compactor\"}\n )\n)",
- "hide": false,
- "instant": false,
- "legendFormat": "p90",
- "range": true,
- "refId": "B"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "quantile(\n 0.1, \n sum by (pod) (\n loki_bloomcompactor_progress{cluster=~\"$cluster\", job=~\"$namespace/bloom-compactor\"}\n )\n)",
+ "expr": "sum(loki_bloomplanner_tenant_tasks_completed{cluster=~\"$cluster\", job=~\"$namespace/bloom-planner\"})\n/\nsum(loki_bloomplanner_tenant_tasks_planned{cluster=~\"$cluster\", job=~\"$namespace/bloom-planner\"})",
"hide": false,
"instant": false,
- "legendFormat": "p10",
+ "legendFormat": "Progress",
"range": true,
- "refId": "C"
+ "refId": "D"
}
],
- "title": "Progress",
+ "title": "Overall progress",
"type": "timeseries"
},
{
@@ -187,7 +159,7 @@
"type": "prometheus",
"uid": "${datasource}"
},
- "description": "Uncompressed size of chunks in a series VS the size of the blooms built.",
+ "description": "Cell-wide compaction progress. Should increase till completion throughout each compaction period.",
"fieldConfig": {
"defaults": {
"color": {
@@ -200,8 +172,9 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
- "fillOpacity": 15,
+ "fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -235,33 +208,29 @@
{
"color": "green",
"value": null
+ },
+ {
+ "color": "red",
+ "value": 80
}
]
},
- "unit": "bytes"
+ "unit": "percentunit"
},
"overrides": [
{
"matcher": {
- "id": "byName",
- "options": "Ratio"
+ "id": "byRegexp",
+ "options": "/(Planned|success|failure)/"
},
"properties": [
{
"id": "unit",
- "value": "percentunit"
- }
- ]
- },
- {
- "matcher": {
- "id": "byName",
- "options": "Ratio over range"
- },
- "properties": [
+ "value": "none"
+ },
{
- "id": "unit",
- "value": "percentunit"
+ "id": "custom.fillOpacity",
+ "value": 0
}
]
}
@@ -271,9 +240,9 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 9
+ "y": 1
},
- "id": 41,
+ "id": 116,
"options": {
"legend": {
"calcs": [],
@@ -287,7 +256,7 @@
"sort": "none"
}
},
- "pluginVersion": "11.1.0-69868",
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -295,54 +264,156 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum(rate(loki_bloom_size_sum{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval]))",
+ "expr": "sum by (tenant) (loki_bloomplanner_tenant_tasks_completed{cluster=~\"$cluster\", job=~\"$namespace/bloom-planner\"})\n/\nsum by (tenant) (loki_bloomplanner_tenant_tasks_planned{cluster=~\"$cluster\", job=~\"$namespace/bloom-planner\"})",
"hide": false,
"instant": false,
- "legendFormat": "Bloom",
+ "legendFormat": "{{tenant}}",
"range": true,
- "refId": "A"
- },
+ "refId": "D"
+ }
+ ],
+ "title": "Progress by tenant",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "loki",
+ "uid": "${loki_datasource}"
+ },
+ "description": "Blooms size vs uncompressed chunk size.",
+ "fieldConfig": {
+ "defaults": {},
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 17,
+ "x": 0,
+ "y": 8
+ },
+ "id": 51,
+ "options": {
+ "dedupStrategy": "none",
+ "enableLogDetails": true,
+ "prettifyLogMessage": false,
+ "showCommonLabels": false,
+ "showLabels": false,
+ "showTime": false,
+ "sortOrder": "Descending",
+ "wrapLogMessage": false
+ },
+ "pluginVersion": "11.4.0-77663",
+ "targets": [
{
"datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
+ "type": "loki",
+ "uid": "${loki_datasource}"
},
"editorMode": "code",
- "expr": "sum(rate(loki_bloomcompactor_chunk_series_size_sum{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval]))",
- "hide": false,
- "instant": false,
- "legendFormat": "Chunk",
- "range": true,
+ "expr": "{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"} |= \"level=error\" |= \"component=bloom-planner\"",
+ "queryType": "range",
"refId": "B"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
+ }
+ ],
+ "title": "Errors Planner",
+ "type": "logs"
+ },
+ {
+ "datasource": {
+ "type": "loki",
+ "uid": "${loki_datasource}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "fixedColor": "red",
+ "mode": "fixed"
},
- "editorMode": "code",
- "expr": "sum(rate(loki_bloom_size_sum{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval]))\n/\nsum(rate(loki_bloomcompactor_chunk_series_size_sum{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval]))",
- "hide": false,
- "instant": false,
- "legendFormat": "Ratio",
- "range": true,
- "refId": "C"
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "bars",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 3,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 1
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 7,
+ "x": 17,
+ "y": 8
+ },
+ "id": 53,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
},
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "11.4.0-77663",
+ "targets": [
{
"datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
+ "type": "loki",
+ "uid": "${loki_datasource}"
},
"editorMode": "code",
- "expr": "sum(rate(loki_bloom_size_sum{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval]))\n/\nsum(rate(loki_bloomcompactor_chunk_series_size_sum{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval]))",
- "hide": false,
- "instant": false,
- "legendFormat": "Ratio over range",
- "range": true,
- "refId": "D"
+ "expr": "sum(count_over_time({cluster=\"$cluster\", job=\"$namespace/bloom-planner\"} |= \"level=error\" |= \"component=bloom-planner\" [$__auto]))",
+ "legendFormat": "Error rate",
+ "queryType": "range",
+ "refId": "A"
}
],
- "title": "Chunks and Bloom size",
+ "title": "Errors Rate Planner",
"type": "timeseries"
},
{
@@ -351,13 +422,17 @@
"uid": "${loki_datasource}"
},
"description": "Blooms size vs uncompressed chunk size.",
+ "fieldConfig": {
+ "defaults": {},
+ "overrides": []
+ },
"gridPos": {
"h": 7,
"w": 17,
"x": 0,
- "y": 16
+ "y": 15
},
- "id": 51,
+ "id": 133,
"options": {
"dedupStrategy": "none",
"enableLogDetails": true,
@@ -368,7 +443,7 @@
"sortOrder": "Descending",
"wrapLogMessage": false
},
- "pluginVersion": "11.1.0-69868",
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -376,12 +451,12 @@
"uid": "${loki_datasource}"
},
"editorMode": "code",
- "expr": "{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"} |= \"level=error\" |= \"component=bloom-compactor\"",
+ "expr": "{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"} |= \"level=error\" |= \"component=bloom-builder\"",
"queryType": "range",
"refId": "B"
}
],
- "title": "Errors",
+ "title": "Errors Builder",
"type": "logs"
},
{
@@ -402,6 +477,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "bars",
"fillOpacity": 0,
"gradientMode": "none",
@@ -448,9 +524,9 @@
"h": 7,
"w": 7,
"x": 17,
- "y": 16
+ "y": 15
},
- "id": 53,
+ "id": 134,
"options": {
"legend": {
"calcs": [],
@@ -464,7 +540,7 @@
"sort": "none"
}
},
- "pluginVersion": "11.1.0-69868",
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -472,12 +548,13 @@
"uid": "${loki_datasource}"
},
"editorMode": "code",
- "expr": "sum(count_over_time({cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"} |= \"level=error\" |= \"component=bloom-compactor\" [$__auto]))",
+ "expr": "sum(count_over_time({cluster=\"$cluster\", job=\"$namespace/bloom-builder\"} |= \"level=error\" |= \"component=bloom-builder\" [$__auto]))",
+ "legendFormat": "Error rate",
"queryType": "range",
"refId": "A"
}
],
- "title": "Errors Rate",
+ "title": "Errors Rate Builder",
"type": "timeseries"
},
{
@@ -486,7 +563,7 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 23
+ "y": 22
},
"id": 112,
"panels": [
@@ -507,9 +584,11 @@
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
+ "axisSoftMin": 0,
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
- "fillOpacity": 15,
+ "fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -541,7 +620,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -549,100 +629,1054 @@
}
]
},
- "unit": "percentunit"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 0,
- "y": 24
- },
- "id": 114,
- "options": {
- "legend": {
- "calcs": [],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
+ "unit": "none"
},
- "tooltip": {
- "maxHeight": 600,
- "mode": "single",
- "sort": "none"
- }
- },
- "targets": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "sum by (pod) (\n loki_bloomcompactor_progress{cluster=~\"$cluster\", job=~\"$namespace/bloom-compactor\"}\n)",
- "hide": false,
- "instant": false,
- "legendFormat": "__auto",
- "range": true,
- "refId": "B"
- }
- ],
- "title": "Progress per pod",
- "type": "timeseries"
- },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byRegexp",
+ "options": "/(success|failure)/"
+ },
+ "properties": [
+ {
+ "id": "custom.fillOpacity",
+ "value": 100
+ },
+ {
+ "id": "custom.stacking",
+ "value": {
+ "group": "A",
+ "mode": "normal"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Planned"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "yellow",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 15
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Completed - success"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "green",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Completed - failure"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "red",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Queued"
+ },
+ "properties": [
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ },
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "blue",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 64
+ },
+ "id": 125,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "11.4.0-77663",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "sum(loki_bloomplanner_tenant_tasks_planned{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"}) > 0",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Planned",
+ "range": true,
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "sum by (status) (loki_bloomplanner_tenant_tasks_completed{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"}) > 0",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Completed - {{status}}",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "loki_bloomplanner_inflight_tasks{cluster=\"$cluster\", job=\"$namespace/bloom-planner\", quantile=\"0.95\"}",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "inflight p95",
+ "range": true,
+ "refId": "C"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "sum(loki_bloomplanner_queue_length{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"}) > 0",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Queued",
+ "range": true,
+ "refId": "D"
+ }
+ ],
+ "title": "Tasks",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byRegexp",
+ "options": "/(success|failure)/"
+ },
+ "properties": [
+ {
+ "id": "custom.fillOpacity",
+ "value": 100
+ },
+ {
+ "id": "custom.stacking",
+ "value": {
+ "group": "A",
+ "mode": "normal"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Planned"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "yellow",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 15
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Completed - success"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "green",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Completed - failure"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "red",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Connected builders"
+ },
+ "properties": [
+ {
+ "id": "custom.axisPlacement",
+ "value": "right"
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "IDLE Builders"
+ },
+ "properties": [
+ {
+ "id": "custom.axisPlacement",
+ "value": "right"
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Builders processing task"
+ },
+ "properties": [
+ {
+ "id": "custom.axisPlacement",
+ "value": "right"
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 64
+ },
+ "id": 126,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "11.4.0-77663",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "sum(loki_bloomplanner_connected_builders{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"})",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Connected builders",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "count(loki_bloombuilder_processing_task{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"} > 0)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Builders processing task",
+ "range": true,
+ "refId": "C"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "count(loki_bloombuilder_processing_task{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"} == 0)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "IDLE Builders",
+ "range": true,
+ "refId": "D"
+ }
+ ],
+ "title": "Tasks per builder",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ },
+ "unit": "short"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 72
+ },
+ "id": 81,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "11.4.0-77663",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "# series checked per compaction\nhistogram_quantile(\n 0.99, \n sum by (le) (\n rate(loki_bloombuilder_series_per_task_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__rate_interval])\n )\n)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "p99",
+ "range": true,
+ "refId": "C"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "# series checked per compaction\nhistogram_quantile(\n 0.9, \n sum by (le) (\n rate(loki_bloombuilder_series_per_task_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__rate_interval])\n )\n)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "p90",
+ "range": true,
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "# series checked per compaction\nhistogram_quantile(\n 0.75, \n sum by (le) (\n rate(loki_bloombuilder_series_per_task_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__rate_interval])\n )\n)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "p75",
+ "range": true,
+ "refId": "D"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "# series checked per compaction\nhistogram_quantile(\n 0.5, \n sum by (le) (\n rate(loki_bloombuilder_series_per_task_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__rate_interval])\n )\n)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "p50",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Series per task (includes series copied from other blocks)",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 12,
+ "y": 72
+ },
+ "id": 91,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "11.4.0-77663",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "# series checked per compaction\nhistogram_quantile(\n 0.99, \n sum by (le) (\n rate(loki_bloombuilder_bytes_per_task_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__rate_interval])\n )\n)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "p99",
+ "range": true,
+ "refId": "C"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "# series checked per compaction\nhistogram_quantile(\n 0.9, \n sum by (le) (\n rate(loki_bloombuilder_bytes_per_task_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__rate_interval])\n )\n)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "p90",
+ "range": true,
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "# series checked per compaction\nhistogram_quantile(\n 0.5, \n sum by (le) (\n rate(loki_bloombuilder_bytes_per_task_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__rate_interval])\n )\n)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "p50",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Number of bytes from chunks added to blocks during each compaction.",
+ "type": "timeseries"
+ },
+ {
+ "fieldConfig": {
+ "defaults": {},
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 2,
+ "w": 24,
+ "x": 0,
+ "y": 79
+ },
+ "id": 117,
+ "options": {
+ "code": {
+ "language": "plaintext",
+ "showLineNumbers": false,
+ "showMiniMap": false
+ },
+ "content": "Identify the tenant using the **_Progress by tenant_** panel from the overview and set tenant variable",
+ "mode": "markdown"
+ },
+ "pluginVersion": "11.4.0-77663",
+ "title": "Tip",
+ "transparent": true,
+ "type": "text"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "axisSoftMin": 0,
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byRegexp",
+ "options": "/(success|failure)/"
+ },
+ "properties": [
+ {
+ "id": "custom.fillOpacity",
+ "value": 100
+ },
+ {
+ "id": "custom.stacking",
+ "value": {
+ "group": "A",
+ "mode": "normal"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Planned"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "yellow",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 15
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Completed - success"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "green",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Completed - failure"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "red",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Queued"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "blue",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 81
+ },
+ "id": 114,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "11.4.0-77663",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "sum(loki_bloomplanner_tenant_tasks_planned{cluster=\"$cluster\", job=\"$namespace/bloom-planner\", tenant=\"$tenant\"}) > 0",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Planned",
+ "range": true,
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "sum by (status) (loki_bloomplanner_tenant_tasks_completed{cluster=\"$cluster\", job=\"$namespace/bloom-planner\", tenant=\"$tenant\"}) > 0",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Completed - {{status}}",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "sum(loki_bloomplanner_queue_length{cluster=\"$cluster\", job=\"$namespace/bloom-planner\", user=\"$tenant\"}) > 0",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Queued",
+ "range": true,
+ "refId": "C"
+ }
+ ],
+ "title": "Tasks per tenant",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "loki",
+ "uid": "${loki_datasource}"
+ },
+ "fieldConfig": {
+ "defaults": {},
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 81
+ },
+ "id": 115,
+ "options": {
+ "dedupStrategy": "none",
+ "enableLogDetails": true,
+ "prettifyLogMessage": false,
+ "showCommonLabels": false,
+ "showLabels": false,
+ "showTime": false,
+ "sortOrder": "Descending",
+ "wrapLogMessage": false
+ },
+ "pluginVersion": "11.4.0-77663",
+ "targets": [
+ {
+ "datasource": {
+ "type": "loki",
+ "uid": "${loki_datasource}"
+ },
+ "editorMode": "code",
+ "expr": "{cluster=~\"$cluster\", job=~\"$namespace/bloom-planner\"}\n|= \"level=error\"\n|= \"tenant=$tenant\"",
+ "queryType": "range",
+ "refId": "B"
+ }
+ ],
+ "title": "Tenant errors",
+ "type": "logs"
+ }
+ ],
+ "title": "Tasks",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 23
+ },
+ "id": 95,
+ "panels": [
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
+ "description": "How many tokens each builder is appending to blooms. Accounts for tokens that are not actually added to the blooms since they are already there. See the panel on the right for a drill down on the collision.\n",
"fieldConfig": {
"defaults": {
- "fieldMinMax": false,
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "log": 2,
+ "type": "log"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
"mappings": [],
"thresholds": {
- "mode": "percentage",
+ "mode": "absolute",
"steps": [
{
- "color": "green"
- },
- {
- "color": "#EAB839",
- "value": 0
+ "color": "green",
+ "value": null
},
{
- "color": "green",
- "value": 100
+ "color": "red",
+ "value": 80
}
]
},
- "unit": "percentunit"
+ "unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
- "x": 12,
- "y": 24
+ "x": 0,
+ "y": 90
},
- "id": 115,
+ "id": 96,
"options": {
- "minVizHeight": 75,
- "minVizWidth": 75,
- "orientation": "auto",
- "reduceOptions": {
- "calcs": [
- "lastNotNull"
- ],
- "fields": "",
- "values": false
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
},
- "showThresholdLabels": false,
- "showThresholdMarkers": false,
- "sizing": "auto"
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
},
- "pluginVersion": "11.0.0-68102",
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -650,60 +1684,137 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by (pod) (\n loki_bloomcompactor_progress{cluster=~\"$cluster\", job=~\"$namespace/bloom-compactor\"}\n)",
+ "expr": "sum(rate(loki_bloom_tokens_total{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__rate_interval]))\n/\nsum(count(loki_bloom_tokens_total{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}))",
"hide": false,
"instant": false,
- "legendFormat": "__auto",
+ "legendFormat": "Per core",
"range": true,
"refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "sum(rate(loki_bloom_inserts_total{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__rate_interval]))",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Total",
+ "range": true,
+ "refId": "C"
}
],
- "title": "Current Progress per pod",
- "type": "gauge"
- }
- ],
- "title": "Progress per pod",
- "type": "row"
- },
- {
- "collapsed": true,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 24
- },
- "id": 56,
- "panels": [
+ "title": "Tokens rate",
+ "type": "timeseries"
+ },
{
- "description": "",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "description": "Collision type may be `false` (no collision), `cache` (found in token cache) or true (found in bloom filter).\n\nType may be either `raw` (the original ngram) or `chunk_prefixed` (the ngram with the chunk prefix)",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "percentunit"
+ },
+ "overrides": []
+ },
"gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 25
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 90
},
- "id": 85,
+ "id": 97,
"options": {
- "code": {
- "language": "plaintext",
- "showLineNumbers": false,
- "showMiniMap": false
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
},
- "content": "",
- "mode": "markdown"
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
},
- "pluginVersion": "11.1.0-70005",
- "title": "We use tenant sharding so each compactor will process a subset of the tenants.",
- "transparent": true,
- "type": "text"
+ "pluginVersion": "11.4.0-77663",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "# tokens/s by type+collision\nsum by (collision) (\n rate(loki_bloom_inserts_total{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__rate_interval])\n) \n/ on () group_left\nsum (\n rate(loki_bloom_inserts_total{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__rate_interval])\n)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "B"
+ }
+ ],
+ "title": "tokens/s by collision type",
+ "type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
- "description": "Shows the expected number of cpu cores we need to provision to build blooms as fast as we ingest data so a compaction iteration doesn't take longer than the compaction interval.\n\nWe may decide to have more to speed up compaction.",
+ "description": "The sizes of the blooms created by the compactor. We build one bloom per series. The more unique ngrams and chunks the series has, the bigger their blooms will be.",
"fieldConfig": {
"defaults": {
"color": {
@@ -716,6 +1827,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -729,7 +1841,8 @@
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
- "type": "linear"
+ "log": 2,
+ "type": "log"
},
"showPoints": "auto",
"spanNulls": false,
@@ -748,19 +1861,24 @@
{
"color": "green",
"value": null
+ },
+ {
+ "color": "red",
+ "value": 80
}
]
- }
+ },
+ "unit": "bytes"
},
"overrides": []
},
"gridPos": {
- "h": 7,
+ "h": 8,
"w": 12,
"x": 0,
- "y": 26
+ "y": 98
},
- "id": 94,
+ "id": 98,
"options": {
"legend": {
"calcs": [],
@@ -774,7 +1892,7 @@
"sort": "none"
}
},
- "pluginVersion": "11.1.0-69868",
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -782,12 +1900,12 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "# This query shows the expected number of cpu cores we need to not fall behind\n# building blooms for data we're ingesting.\n# conceptually, the formula is:\n# (cell_bytes * space_amplification / bloom_bytes_processed_per_core)\n\n# number of replicas needed\nsum(avg_over_time(loki_cell:bytes:rate1m{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval]))\n*\n## Space amplification (how much data do we write compared to what we ingest?)\n(\n # rep factor\n 3 *\n sum(\n # 1 - dedupe_ratio\n 1 - \n sum(rate(loki_chunk_store_deduped_chunks_total{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (cluster, namespace)\n /\n sum(rate(loki_ingester_chunks_flushed_total{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (cluster, namespace)\n )\n)\n/\n(\nsum(rate(loki_bloomcompactor_chunk_series_size_sum{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval]))\n/\nsum(rate(container_cpu_usage_seconds_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval]))\n)",
+ "expr": "histogram_quantile(\n 1.0,\n sum by (le) (\n rate(loki_bloom_size_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__rate_interval])\n )\n)",
"hide": false,
"instant": false,
- "legendFormat": "Needed",
+ "legendFormat": "max",
"range": true,
- "refId": "B"
+ "refId": "D"
},
{
"datasource": {
@@ -795,15 +1913,28 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum(rate(container_cpu_usage_seconds_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval]))",
+ "expr": "histogram_quantile(\n 0.99, \n sum by (le) (\n rate(loki_bloom_size_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__rate_interval])\n )\n)",
"hide": false,
"instant": false,
- "legendFormat": "Available",
+ "legendFormat": "p99",
"range": true,
- "refId": "A"
+ "refId": "E"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(\n 0.50, \n sum by (le) (\n rate(loki_bloom_size_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__rate_interval])\n )\n)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "p50",
+ "range": true,
+ "refId": "F"
}
],
- "title": "Required CPUs to not lag behind",
+ "title": "Bloom size",
"type": "timeseries"
},
{
@@ -811,7 +1942,7 @@
"type": "prometheus",
"uid": "${datasource}"
},
- "description": "",
+ "description": "How many chunks are we indexing in the blooms. Either:\n- `copied` from a pre-existing bloom block, or \n- `iterated` through all its entries if processed for the first time.",
"fieldConfig": {
"defaults": {
"color": {
@@ -824,8 +1955,9 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
- "fillOpacity": 15,
+ "fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -834,9 +1966,6 @@
},
"insertNulls": false,
"lineInterpolation": "linear",
- "lineStyle": {
- "fill": "solid"
- },
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
@@ -866,17 +1995,17 @@
}
]
},
- "unit": "Bps"
+ "unit": "short"
},
"overrides": []
},
"gridPos": {
- "h": 7,
+ "h": 8,
"w": 12,
"x": 12,
- "y": 26
+ "y": 98
},
- "id": 72,
+ "id": 99,
"options": {
"legend": {
"calcs": [],
@@ -890,7 +2019,7 @@
"sort": "none"
}
},
- "pluginVersion": "11.1.0-69868",
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -898,35 +2027,37 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "# MB/s/core chunk data processed\nsum(rate(loki_bloomcompactor_chunk_series_size_sum{cluster=~\"$cluster\", job=~\"$namespace/bloom-compactor\"}[$__rate_interval])) by (pod)\n/\nsum(rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval])) by (pod)",
- "hide": true,
- "instant": false,
- "legendFormat": "{{pod}}",
- "range": true,
- "refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "# MB/s/core chunk data processed\nsum(rate(loki_bloomcompactor_chunk_series_size_sum{cluster=~\"$cluster\", job=~\"$namespace/bloom-compactor\"}[$__rate_interval]))\n/\nsum(rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval]))",
+ "expr": "# chunks indexed, by iteration or copied from a pre-existing bloom\nsum(rate(loki_bloom_chunks_indexed_total{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__rate_interval])) by (type)",
"hide": false,
"instant": false,
- "legendFormat": "Total",
+ "legendFormat": "__auto",
"range": true,
"refId": "B"
}
],
- "title": "MB/s per core",
+ "title": "Chunks indexed",
"type": "timeseries"
- },
+ }
+ ],
+ "title": "Bloom building",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 24
+ },
+ "id": 56,
+ "panels": [
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
+ "description": "Shows the expected number of cpu cores we need to provision to build blooms as fast as we ingest data so a build iteration doesn't take longer than the build interval.\n\nWe may decide to have more to speed up building blooms.",
"fieldConfig": {
"defaults": {
"color": {
@@ -939,6 +2070,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -981,9 +2113,9 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 33
+ "y": 2030
},
- "id": 1,
+ "id": 94,
"options": {
"legend": {
"calcs": [],
@@ -997,7 +2129,7 @@
"sort": "none"
}
},
- "pluginVersion": "11.1.0-69868",
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -1005,10 +2137,10 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-compactor\", resource=\"cpu\"} > 0)",
+ "expr": "# This query shows the expected number of cpu cores we need to not fall behind\n# building blooms for data we're ingesting.\n# conceptually, the formula is:\n# (cell_bytes * space_amplification / bloom_bytes_processed_per_core)\n\n# number of replicas needed\nsum(avg_over_time(loki_cell:bytes:rate1m{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval]))\n*\n## Space amplification (how much data do we write compared to what we ingest?)\n(\n # rep factor\n 3 *\n sum(\n # 1 - dedupe_ratio\n 1 - \n sum(rate(loki_chunk_store_deduped_chunks_total{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (cluster, namespace)\n /\n sum(rate(loki_ingester_chunks_flushed_total{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (cluster, namespace)\n )\n)\n/\n(\nsum(rate(loki_bloombuilder_chunk_series_size_sum{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-builder\"}[$__rate_interval]))\n/\nsum(rate(container_cpu_usage_seconds_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-builder\"}[$__rate_interval]))\n)",
"hide": false,
"instant": false,
- "legendFormat": "Request",
+ "legendFormat": "Needed",
"range": true,
"refId": "B"
},
@@ -1018,22 +2150,10 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "min(kube_pod_container_resource_limits{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-compactor\", resource=\"cpu\"} > 0)",
+ "expr": "sum(rate(container_cpu_usage_seconds_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-builder\"}[$__rate_interval]))",
"hide": false,
"instant": false,
- "legendFormat": "Limit",
- "range": true,
- "refId": "C"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "quantile(\n 0.99,\n rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-compactor\"}[$__rate_interval])\n)",
- "instant": false,
- "legendFormat": "p99",
+ "legendFormat": "Available",
"range": true,
"refId": "A"
},
@@ -1043,28 +2163,15 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "quantile(\n 0.50,\n rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-compactor\"}[$__rate_interval])\n)",
- "hide": false,
- "instant": false,
- "legendFormat": "p50",
- "range": true,
- "refId": "D"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "avg(\n rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-compactor\"}[$__rate_interval])\n)",
+ "expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\", resource=\"cpu\"} > 0)\n*\ncount(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\", resource=\"cpu\"} > 0)",
"hide": false,
"instant": false,
- "legendFormat": "Avg",
+ "legendFormat": "Provisioned",
"range": true,
- "refId": "E"
+ "refId": "C"
}
],
- "title": "CPU",
+ "title": "Required CPUs to not lag behind",
"type": "timeseries"
},
{
@@ -1072,6 +2179,7 @@
"type": "prometheus",
"uid": "${datasource}"
},
+ "description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -1084,8 +2192,9 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
- "fillOpacity": 0,
+ "fillOpacity": 15,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -1094,6 +2203,9 @@
},
"insertNulls": false,
"lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
@@ -1122,7 +2234,8 @@
"value": 80
}
]
- }
+ },
+ "unit": "Bps"
},
"overrides": []
},
@@ -1130,9 +2243,9 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 33
+ "y": 2030
},
- "id": 75,
+ "id": 72,
"options": {
"legend": {
"calcs": [],
@@ -1146,7 +2259,7 @@
"sort": "none"
}
},
- "pluginVersion": "11.1.0-69868",
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -1154,40 +2267,15 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by(pod) (rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-compactor\"}[$__rate_interval]))",
- "instant": false,
- "legendFormat": "{{pod}}",
- "range": true,
- "refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-compactor\", resource=\"cpu\"} > 0)",
+ "expr": "# MB/s/core chunk data processed\nsum(rate(loki_bloombuilder_chunk_series_size_sum{cluster=~\"$cluster\", job=~\"$namespace/bloom-builder\"}[$__rate_interval]))\n/\nsum(rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"bloom-builder\"}[$__rate_interval]))",
"hide": false,
"instant": false,
- "legendFormat": "Request",
+ "legendFormat": "Total",
"range": true,
"refId": "B"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "min(kube_pod_container_resource_limits{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-compactor\", resource=\"cpu\"} > 0)",
- "hide": false,
- "instant": false,
- "legendFormat": "Limit",
- "range": true,
- "refId": "C"
}
],
- "title": "CPU per pod",
+ "title": "MB/s per core",
"type": "timeseries"
},
{
@@ -1207,6 +2295,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -1239,14 +2328,9 @@
{
"color": "green",
"value": null
- },
- {
- "color": "red",
- "value": 80
}
]
- },
- "unit": "bytes"
+ }
},
"overrides": []
},
@@ -1254,9 +2338,9 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 40
+ "y": 2037
},
- "id": 76,
+ "id": 1,
"options": {
"legend": {
"calcs": [],
@@ -1270,7 +2354,7 @@
"sort": "none"
}
},
- "pluginVersion": "11.1.0-69868",
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -1278,7 +2362,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-compactor\", resource=\"memory\"} > 0)",
+ "expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\", resource=\"cpu\"} > 0)",
"hide": false,
"instant": false,
"legendFormat": "Request",
@@ -1291,7 +2375,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "min(container_spec_memory_limit_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-compactor\"} > 0)",
+ "expr": "min(kube_pod_container_resource_limits{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\", resource=\"cpu\"} > 0)",
"hide": false,
"instant": false,
"legendFormat": "Limit",
@@ -1304,7 +2388,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "quantile (\n 0.99,\n container_memory_working_set_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-compactor\"}\n)",
+ "expr": "quantile(\n 0.99,\n rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\"}[$__rate_interval])\n)",
"instant": false,
"legendFormat": "p99",
"range": true,
@@ -1316,7 +2400,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "quantile (\n 0.50,\n container_memory_working_set_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-compactor\"}\n)",
+ "expr": "quantile(\n 0.50,\n rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\"}[$__rate_interval])\n)",
"hide": false,
"instant": false,
"legendFormat": "p50",
@@ -1329,15 +2413,28 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "avg (\n container_memory_working_set_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-compactor\"}\n)",
+ "expr": "avg(\n rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\"}[$__rate_interval])\n)",
"hide": false,
"instant": false,
"legendFormat": "Avg",
"range": true,
"refId": "E"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "max(\n rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\"}[$__rate_interval])\n)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Max",
+ "range": true,
+ "refId": "F"
}
],
- "title": "Memory (workingset)",
+ "title": "CPU",
"type": "timeseries"
},
{
@@ -1357,6 +2454,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -1395,43 +2493,17 @@
"value": 80
}
]
- },
- "unit": "bytes"
- },
- "overrides": [
- {
- "__systemRef": "hideSeriesFrom",
- "matcher": {
- "id": "byNames",
- "options": {
- "mode": "exclude",
- "names": [
- "bloom-compactor-106"
- ],
- "prefix": "All except:",
- "readOnly": true
- }
- },
- "properties": [
- {
- "id": "custom.hideFrom",
- "value": {
- "legend": false,
- "tooltip": false,
- "viz": true
- }
- }
- ]
}
- ]
+ },
+ "overrides": []
},
"gridPos": {
"h": 7,
"w": 12,
"x": 12,
- "y": 40
+ "y": 2037
},
- "id": 5,
+ "id": 75,
"options": {
"legend": {
"calcs": [],
@@ -1445,7 +2517,7 @@
"sort": "none"
}
},
- "pluginVersion": "11.1.0-69868",
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -1453,7 +2525,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "max by(pod) (container_memory_working_set_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-compactor\"})",
+ "expr": "sum by(pod) (rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\"}[$__rate_interval]))",
"instant": false,
"legendFormat": "{{pod}}",
"range": true,
@@ -1465,7 +2537,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-compactor\", resource=\"memory\"} > 0)",
+ "expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\", resource=\"cpu\"} > 0)",
"hide": false,
"instant": false,
"legendFormat": "Request",
@@ -1478,7 +2550,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "min(container_spec_memory_limit_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-compactor\"} > 0)",
+ "expr": "min(kube_pod_container_resource_limits{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\", resource=\"cpu\"} > 0)",
"hide": false,
"instant": false,
"legendFormat": "Limit",
@@ -1486,7 +2558,7 @@
"refId": "C"
}
],
- "title": "Memory per pod (workingset)",
+ "title": "CPU per pod",
"type": "timeseries"
},
{
@@ -1494,7 +2566,6 @@
"type": "prometheus",
"uid": "${datasource}"
},
- "description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -1507,8 +2578,9 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
- "fillOpacity": 15,
+ "fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -1517,9 +2589,6 @@
},
"insertNulls": false,
"lineInterpolation": "linear",
- "lineStyle": {
- "fill": "solid"
- },
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
@@ -1549,46 +2618,111 @@
}
]
},
- "unit": "none"
+ "unit": "bytes"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 2044
+ },
+ "id": 76,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "11.4.0-77663",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\", resource=\"memory\"} > 0)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Request",
+ "range": true,
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "min(container_spec_memory_limit_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\"} > 0)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Limit",
+ "range": true,
+ "refId": "C"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "quantile (\n 0.99,\n container_memory_working_set_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\"}\n)",
+ "instant": false,
+ "legendFormat": "p99",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "quantile (\n 0.50,\n container_memory_working_set_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\"}\n)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "p50",
+ "range": true,
+ "refId": "D"
},
- "overrides": []
- },
- "gridPos": {
- "h": 7,
- "w": 12,
- "x": 0,
- "y": 47
- },
- "id": 27,
- "options": {
- "legend": {
- "calcs": [],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "avg (\n container_memory_working_set_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\"}\n)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Avg",
+ "range": true,
+ "refId": "E"
},
- "tooltip": {
- "maxHeight": 600,
- "mode": "single",
- "sort": "none"
- }
- },
- "pluginVersion": "11.1.0-69868",
- "targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum (\n increase(\n kube_pod_container_status_restarts_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[10m]\n )\n) > 0",
+ "expr": "max (\n container_memory_working_set_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\"}\n)",
+ "hide": false,
"instant": false,
- "legendFormat": "Restarts",
+ "legendFormat": "Max",
"range": true,
- "refId": "A"
+ "refId": "F"
}
],
- "title": "Container restarts",
+ "title": "Memory (workingset)",
"type": "timeseries"
},
{
@@ -1596,7 +2730,6 @@
"type": "prometheus",
"uid": "${datasource}"
},
- "description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -1609,8 +2742,9 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
- "fillOpacity": 15,
+ "fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -1619,9 +2753,6 @@
},
"insertNulls": false,
"lineInterpolation": "linear",
- "lineStyle": {
- "fill": "solid"
- },
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
@@ -1651,7 +2782,7 @@
}
]
},
- "unit": "none"
+ "unit": "bytes"
},
"overrides": []
},
@@ -1659,9 +2790,9 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 47
+ "y": 2044
},
- "id": 77,
+ "id": 5,
"options": {
"legend": {
"calcs": [],
@@ -1675,7 +2806,7 @@
"sort": "none"
}
},
- "pluginVersion": "11.1.0-69868",
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -1683,36 +2814,48 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "(\n sum by (pod) (\n increase(\n kube_pod_container_status_restarts_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[10m]\n )\n )\n * on (pod) group_right\n max by (pod, reason) (\n kube_pod_container_status_last_terminated_reason{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}\n )\n) > 0",
+ "expr": "max by(pod) (container_memory_working_set_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\"})",
"instant": false,
- "legendFormat": "{{reason}} / {{pod}}",
+ "legendFormat": "{{pod}}",
"range": true,
"refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\", resource=\"memory\"} > 0)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Request",
+ "range": true,
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "min(container_spec_memory_limit_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\"} > 0)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Limit",
+ "range": true,
+ "refId": "C"
}
],
- "title": "Container restarts reason per pod",
+ "title": "Memory per pod (workingset)",
"type": "timeseries"
- }
- ],
- "title": "Resource Usage",
- "type": "row"
- },
- {
- "collapsed": true,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 25
- },
- "id": 95,
- "panels": [
+ },
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
- "description": "How many tokens each compactor is appending to blooms. Accounts for tokens that are not actually added to the blooms since they are already there. See the panel on the right for a drill down on the collision.\n",
+ "description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -1725,8 +2868,9 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
- "fillOpacity": 0,
+ "fillOpacity": 15,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -1735,11 +2879,13 @@
},
"insertNulls": false,
"lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
- "log": 2,
- "type": "log"
+ "type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
@@ -1764,17 +2910,18 @@
"value": 80
}
]
- }
+ },
+ "unit": "none"
},
"overrides": []
},
"gridPos": {
- "h": 8,
+ "h": 7,
"w": 12,
"x": 0,
- "y": 55
+ "y": 2051
},
- "id": 96,
+ "id": 27,
"options": {
"legend": {
"calcs": [],
@@ -1788,7 +2935,7 @@
"sort": "none"
}
},
- "pluginVersion": "11.1.0-69868",
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -1796,28 +2943,14 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "# tokens checked per pod, millions/s\nsum(rate(loki_bloom_tokens_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval]))\n/\nsum(count(loki_bloom_tokens_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}))\n/ 1e6",
- "hide": false,
- "instant": false,
- "legendFormat": "Per core",
- "range": true,
- "refId": "B"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "sum(rate(loki_bloom_inserts_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval])) / 1e6",
- "hide": false,
+ "expr": "sum (\n increase(\n kube_pod_container_status_restarts_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-builder\"}[10m]\n )\n) > 0",
"instant": false,
- "legendFormat": "Total",
+ "legendFormat": "Restarts",
"range": true,
- "refId": "C"
+ "refId": "A"
}
],
- "title": "Tokens rate (millions)",
+ "title": "Container restarts",
"type": "timeseries"
},
{
@@ -1825,7 +2958,7 @@
"type": "prometheus",
"uid": "${datasource}"
},
- "description": "Collision type may be `false` (no collision), `cache` (found in token cache) or true (found in bloom filter).\n\nType may be either `raw` (the original ngram) or `chunk_prefixed` (the ngram with the chunk prefix)",
+ "description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -1838,8 +2971,9 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
- "fillOpacity": 0,
+ "fillOpacity": 15,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -1848,6 +2982,9 @@
},
"insertNulls": false,
"lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
@@ -1877,17 +3014,17 @@
}
]
},
- "unit": "percentunit"
+ "unit": "none"
},
"overrides": []
},
"gridPos": {
- "h": 8,
+ "h": 7,
"w": 12,
"x": 12,
- "y": 55
+ "y": 2051
},
- "id": 97,
+ "id": 77,
"options": {
"legend": {
"calcs": [],
@@ -1901,7 +3038,7 @@
"sort": "none"
}
},
- "pluginVersion": "11.1.0-69868",
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -1909,23 +3046,35 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "# tokens/s by type+collision\nsum by (collision) (\n rate(loki_bloom_inserts_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval])\n) \n/ on () group_left\nsum (\n rate(loki_bloom_inserts_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval])\n)",
- "hide": false,
+ "expr": "(\n sum by (pod) (\n increase(\n kube_pod_container_status_restarts_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-builder\"}[10m]\n )\n )\n * on (pod) group_right\n max by (pod, reason) (\n kube_pod_container_status_last_terminated_reason{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-builder\"}\n )\n) > 0",
"instant": false,
- "legendFormat": "__auto",
+ "legendFormat": "{{reason}} / {{pod}}",
"range": true,
- "refId": "B"
+ "refId": "A"
}
],
- "title": "tokens/s by collision type",
+ "title": "Container restarts reason per pod",
"type": "timeseries"
- },
+ }
+ ],
+ "title": "Builder Resource Usage",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 25
+ },
+ "id": 118,
+ "panels": [
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
- "description": "The sizes of the blooms created by the compactor. We build one bloom per series. The more unique ngrams and chunks the series has, the bigger their blooms will be.",
"fieldConfig": {
"defaults": {
"color": {
@@ -1938,6 +3087,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -1970,24 +3120,19 @@
{
"color": "green",
"value": null
- },
- {
- "color": "red",
- "value": 80
}
]
- },
- "unit": "bytes"
+ }
},
"overrides": []
},
"gridPos": {
- "h": 8,
+ "h": 7,
"w": 12,
"x": 0,
- "y": 63
+ "y": 2302
},
- "id": 98,
+ "id": 119,
"options": {
"legend": {
"calcs": [],
@@ -2001,7 +3146,7 @@
"sort": "none"
}
},
- "pluginVersion": "11.1.0-69868",
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -2009,12 +3154,12 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(\n 0.99, \n sum by (le) (\n rate(loki_bloom_size_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval])\n )\n)",
+ "expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-planner\", resource=\"cpu\"} > 0)",
"hide": false,
"instant": false,
- "legendFormat": "p99",
+ "legendFormat": "Request",
"range": true,
- "refId": "D"
+ "refId": "B"
},
{
"datasource": {
@@ -2022,12 +3167,24 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(\n 0.90, \n sum by (le) (\n rate(loki_bloom_size_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval])\n )\n)",
+ "expr": "min(kube_pod_container_resource_limits{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-planner\", resource=\"cpu\"} > 0)",
"hide": false,
"instant": false,
- "legendFormat": "p90",
+ "legendFormat": "Limit",
"range": true,
- "refId": "E"
+ "refId": "C"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "quantile(\n 0.99,\n rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-planner\"}[$__rate_interval])\n)",
+ "instant": false,
+ "legendFormat": "p99",
+ "range": true,
+ "refId": "A"
},
{
"datasource": {
@@ -2035,15 +3192,28 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(\n 0.50, \n sum by (le) (\n rate(loki_bloom_size_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval])\n )\n)",
+ "expr": "quantile(\n 0.50,\n rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-planner\"}[$__rate_interval])\n)",
"hide": false,
"instant": false,
"legendFormat": "p50",
"range": true,
- "refId": "F"
+ "refId": "D"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "avg(\n rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-planner\"}[$__rate_interval])\n)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Avg",
+ "range": true,
+ "refId": "E"
}
],
- "title": "Bloom size",
+ "title": "CPU",
"type": "timeseries"
},
{
@@ -2051,7 +3221,6 @@
"type": "prometheus",
"uid": "${datasource}"
},
- "description": "How many chunks are we indexing in the blooms. Either:\n- `copied` from a pre-existing bloom block, or \n- `iterated` through all its entries if processed for the first time.",
"fieldConfig": {
"defaults": {
"color": {
@@ -2064,6 +3233,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -2102,18 +3272,17 @@
"value": 80
}
]
- },
- "unit": "none"
+ }
},
"overrides": []
},
"gridPos": {
- "h": 8,
+ "h": 7,
"w": 12,
"x": 12,
- "y": 63
+ "y": 2302
},
- "id": 99,
+ "id": 120,
"options": {
"legend": {
"calcs": [],
@@ -2127,7 +3296,7 @@
"sort": "none"
}
},
- "pluginVersion": "11.1.0-69868",
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -2135,31 +3304,42 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "# chunks indexed, by iteration or copied from a pre-existing bloom\nsum(rate(loki_bloom_chunks_indexed_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval])) by (type)",
+ "expr": "sum by(pod) (rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-planner\"}[$__rate_interval]))",
+ "instant": false,
+ "legendFormat": "{{pod}}",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-planner\", resource=\"cpu\"} > 0)",
"hide": false,
"instant": false,
- "legendFormat": "__auto",
+ "legendFormat": "Request",
"range": true,
"refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "min(kube_pod_container_resource_limits{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-planner\", resource=\"cpu\"} > 0)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Limit",
+ "range": true,
+ "refId": "C"
}
- ],
- "title": "Chunks indexed",
- "type": "timeseries"
- }
- ],
- "title": "Bloom building",
- "type": "row"
- },
- {
- "collapsed": true,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 26
- },
- "id": 103,
- "panels": [
+ ],
+ "title": "CPU per pod",
+ "type": "timeseries"
+ },
{
"datasource": {
"type": "prometheus",
@@ -2177,6 +3357,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -2209,9 +3390,14 @@
{
"color": "green",
"value": null
+ },
+ {
+ "color": "red",
+ "value": 80
}
]
- }
+ },
+ "unit": "bytes"
},
"overrides": []
},
@@ -2219,9 +3405,9 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 72
+ "y": 2309
},
- "id": 107,
+ "id": 121,
"options": {
"legend": {
"calcs": [],
@@ -2235,6 +3421,7 @@
"sort": "none"
}
},
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -2242,15 +3429,66 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum(increase(loki_bloomcompactor_blocks_created_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval]))",
+ "expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-planner\", resource=\"memory\"} > 0)",
"hide": false,
"instant": false,
- "legendFormat": "Blocks",
+ "legendFormat": "Request",
+ "range": true,
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "min(container_spec_memory_limit_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-planner\"} > 0)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Limit",
+ "range": true,
+ "refId": "C"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "quantile (\n 0.99,\n container_memory_working_set_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-planner\"}\n)",
+ "instant": false,
+ "legendFormat": "p99",
"range": true,
"refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "quantile (\n 0.50,\n container_memory_working_set_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\"}\n)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "p50",
+ "range": true,
+ "refId": "D"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "avg (\n container_memory_working_set_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-builder\"}\n)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Avg",
+ "range": true,
+ "refId": "E"
}
],
- "title": "Created Blocks",
+ "title": "Memory (workingset)",
"type": "timeseries"
},
{
@@ -2258,7 +3496,6 @@
"type": "prometheus",
"uid": "${datasource}"
},
- "description": "Compactors delete metas and blocks marked for deletion in the metas tombstones.",
"fieldConfig": {
"defaults": {
"color": {
@@ -2271,6 +3508,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -2303,9 +3541,14 @@
{
"color": "green",
"value": null
+ },
+ {
+ "color": "red",
+ "value": 80
}
]
- }
+ },
+ "unit": "bytes"
},
"overrides": []
},
@@ -2313,9 +3556,9 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 72
+ "y": 2309
},
- "id": 106,
+ "id": 122,
"options": {
"legend": {
"calcs": [],
@@ -2329,6 +3572,7 @@
"sort": "none"
}
},
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -2336,15 +3580,40 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum(increase(loki_bloomcompactor_blocks_deleted_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval]))",
- "hide": false,
+ "expr": "max by(pod) (container_memory_working_set_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-planner\"})",
"instant": false,
- "legendFormat": "Blocks",
+ "legendFormat": "{{pod}}",
"range": true,
"refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-planner\", resource=\"memory\"} > 0)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Request",
+ "range": true,
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "min(container_spec_memory_limit_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"bloom-planner\"} > 0)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Limit",
+ "range": true,
+ "refId": "C"
}
],
- "title": "Deleted Blocks",
+ "title": "Memory per pod (workingset)",
"type": "timeseries"
},
{
@@ -2352,7 +3621,7 @@
"type": "prometheus",
"uid": "${datasource}"
},
- "description": "Number of overlapping bloom blocks reused when creating new blocks\n",
+ "description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -2365,8 +3634,9 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
- "fillOpacity": 0,
+ "fillOpacity": 15,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -2375,6 +3645,9 @@
},
"insertNulls": false,
"lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
@@ -2397,9 +3670,14 @@
{
"color": "green",
"value": null
+ },
+ {
+ "color": "red",
+ "value": 80
}
]
- }
+ },
+ "unit": "none"
},
"overrides": []
},
@@ -2407,9 +3685,9 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 79
+ "y": 2316
},
- "id": 109,
+ "id": 123,
"options": {
"legend": {
"calcs": [],
@@ -2423,6 +3701,7 @@
"sort": "none"
}
},
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -2430,36 +3709,22 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum(increase(loki_bloomcompactor_blocks_reused_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval]))",
- "hide": false,
+ "expr": "sum (\n increase(\n kube_pod_container_status_restarts_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-planner\"}[10m]\n )\n) > 0",
"instant": false,
- "legendFormat": "Blocks",
+ "legendFormat": "Restarts",
"range": true,
"refId": "A"
}
],
- "title": "Blocks reused",
+ "title": "Container restarts",
"type": "timeseries"
- }
- ],
- "title": "Blocks building",
- "type": "row"
- },
- {
- "collapsed": true,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 27
- },
- "id": 110,
- "panels": [
+ },
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
+ "description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -2472,8 +3737,9 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
- "fillOpacity": 0,
+ "fillOpacity": 15,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -2482,6 +3748,9 @@
},
"insertNulls": false,
"lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
@@ -2504,19 +3773,24 @@
{
"color": "green",
"value": null
+ },
+ {
+ "color": "red",
+ "value": 80
}
]
- }
+ },
+ "unit": "none"
},
"overrides": []
},
"gridPos": {
"h": 7,
"w": 12,
- "x": 0,
- "y": 87
+ "x": 12,
+ "y": 2316
},
- "id": 108,
+ "id": 124,
"options": {
"legend": {
"calcs": [],
@@ -2530,7 +3804,7 @@
"sort": "none"
}
},
- "pluginVersion": "11.1.0-69868",
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -2538,23 +3812,35 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum(increase(loki_bloomcompactor_metas_created_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval]))",
- "hide": false,
+ "expr": "(\n sum by (pod) (\n increase(\n kube_pod_container_status_restarts_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-planner\"}[10m]\n )\n )\n * on (pod) group_right\n max by (pod, reason) (\n kube_pod_container_status_last_terminated_reason{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-planner\"}\n )\n) > 0",
"instant": false,
- "legendFormat": "Metas",
+ "legendFormat": "{{reason}} / {{pod}}",
"range": true,
"refId": "A"
}
],
- "title": "Created Metas",
+ "title": "Container restarts reason per pod",
"type": "timeseries"
- },
+ }
+ ],
+ "title": "Planner Resource Usage",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 26
+ },
+ "id": 110,
+ "panels": [
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
- "description": "Compactors delete metas and blocks marked for deletion in the metas tombstones.",
"fieldConfig": {
"defaults": {
"color": {
@@ -2567,6 +3853,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -2607,11 +3894,11 @@
},
"gridPos": {
"h": 7,
- "w": 12,
- "x": 12,
- "y": 87
+ "w": 9,
+ "x": 0,
+ "y": 2497
},
- "id": 105,
+ "id": 108,
"options": {
"legend": {
"calcs": [],
@@ -2625,7 +3912,7 @@
"sort": "none"
}
},
- "pluginVersion": "11.1.0-69868",
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -2633,7 +3920,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum(increase(loki_bloomcompactor_metas_deleted_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval]))",
+ "expr": "sum(increase(loki_bloombuilder_metas_created_total{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__rate_interval]))",
"hide": false,
"instant": false,
"legendFormat": "Metas",
@@ -2641,45 +3928,8 @@
"refId": "A"
}
],
- "title": "Deleted Metas",
+ "title": "Created Metas",
"type": "timeseries"
- }
- ],
- "title": "Metas building",
- "type": "row"
- },
- {
- "collapsed": true,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 28
- },
- "id": 80,
- "panels": [
- {
- "description": "",
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 95
- },
- "id": 93,
- "options": {
- "code": {
- "language": "plaintext",
- "showLineNumbers": false,
- "showMiniMap": false
- },
- "content": "",
- "mode": "markdown"
- },
- "pluginVersion": "11.1.0-70005",
- "title": "We use tenant sharding so each compactor will process a subset of the tenants.",
- "transparent": true,
- "type": "text"
},
{
"datasource": {
@@ -2689,39 +3939,7 @@
"fieldConfig": {
"defaults": {
"color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
+ "mode": "thresholds"
},
"mappings": [],
"thresholds": {
@@ -2738,24 +3956,29 @@
},
"gridPos": {
"h": 7,
- "w": 12,
- "x": 0,
- "y": 96
- },
- "id": 83,
- "options": {
- "legend": {
- "calcs": [],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
+ "w": 3,
+ "x": 9,
+ "y": 2497
+ },
+ "id": 140,
+ "options": {
+ "colorMode": "value",
+ "graphMode": "area",
+ "justifyMode": "auto",
+ "orientation": "auto",
+ "percentChangeColorMode": "standard",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
},
- "tooltip": {
- "maxHeight": 600,
- "mode": "single",
- "sort": "none"
- }
+ "showPercentChange": false,
+ "textMode": "auto",
+ "wideLayout": true
},
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -2763,48 +3986,25 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "quantile(\n 0.99,\n increase(\n loki_bloomcompactor_tenants_started_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval]\n )\n)",
- "hide": false,
- "instant": false,
- "legendFormat": "p99",
- "range": true,
- "refId": "C"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "quantile(\n 0.90,\n increase(\n loki_bloomcompactor_tenants_started_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval]\n )\n)",
+ "exemplar": false,
+ "expr": "sum(increase(loki_bloombuilder_metas_created_total{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__range]))",
+ "format": "table",
"hide": false,
- "instant": false,
- "legendFormat": "p90",
- "range": true,
+ "instant": true,
+ "legendFormat": "Metas",
+ "range": false,
"refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "quantile(\n 0.50,\n increase(\n loki_bloomcompactor_tenants_started_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[30m]\n )\n)",
- "hide": false,
- "instant": false,
- "legendFormat": "p50",
- "range": true,
- "refId": "B"
}
],
- "title": "Tenants",
- "type": "timeseries"
+ "title": "Created Metas",
+ "type": "stat"
},
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
+ "description": "Compactors delete metas and blocks marked for deletion in the metas tombstones.",
"fieldConfig": {
"defaults": {
"color": {
@@ -2817,6 +4017,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -2859,9 +4060,9 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 96
+ "y": 2497
},
- "id": 84,
+ "id": 105,
"options": {
"legend": {
"calcs": [],
@@ -2875,6 +4076,7 @@
"sort": "none"
}
},
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -2882,40 +4084,31 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by (pod) (\n increase(\n loki_bloomcompactor_tenants_started_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval]\n )\n)",
+ "expr": "sum by (phase) (increase(loki_bloomplanner_metas_deleted_total{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"}[$__rate_interval]))",
"hide": false,
"instant": false,
- "legendFormat": "{{pod}}",
+ "legendFormat": "Deleted during {{phase}}",
"range": true,
- "refId": "C"
+ "refId": "A"
}
],
- "title": "Tenants per pod",
+ "title": "Deleted Metas",
"type": "timeseries"
- },
- {
- "description": "",
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 103
- },
- "id": 86,
- "options": {
- "code": {
- "language": "plaintext",
- "showLineNumbers": false,
- "showMiniMap": false
- },
- "content": "",
- "mode": "markdown"
- },
- "pluginVersion": "11.1.0-70005",
- "title": "Number of tenant tables processed. ",
- "transparent": true,
- "type": "text"
- },
+ }
+ ],
+ "title": "Metas building",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 27
+ },
+ "id": 103,
+ "panels": [
{
"datasource": {
"type": "prometheus",
@@ -2933,6 +4126,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -2973,11 +4167,11 @@
},
"gridPos": {
"h": 7,
- "w": 12,
+ "w": 9,
"x": 0,
- "y": 104
+ "y": 2505
},
- "id": 88,
+ "id": 107,
"options": {
"legend": {
"calcs": [],
@@ -2991,6 +4185,7 @@
"sort": "none"
}
},
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -2998,48 +4193,91 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "quantile(\n 0.99,\n increase(\n loki_bloomcompactor_tenant_table_ranges_completed_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval]\n )\n)",
- "hide": false,
- "instant": false,
- "legendFormat": "p99",
- "range": true,
- "refId": "C"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "quantile(\n 0.90,\n increase(\n loki_bloomcompactor_tenant_table_ranges_completed_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval]\n )\n)",
+ "expr": "sum(increase(loki_bloombuilder_blocks_created_total{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__rate_interval]))",
"hide": false,
"instant": false,
- "legendFormat": "p90",
+ "legendFormat": "Blocks",
"range": true,
"refId": "A"
+ }
+ ],
+ "title": "Created Blocks",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 3,
+ "x": 9,
+ "y": 2505
+ },
+ "id": 139,
+ "options": {
+ "colorMode": "value",
+ "graphMode": "area",
+ "justifyMode": "auto",
+ "orientation": "auto",
+ "percentChangeColorMode": "standard",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
},
+ "showPercentChange": false,
+ "textMode": "auto",
+ "wideLayout": true
+ },
+ "pluginVersion": "11.4.0-77663",
+ "targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "quantile(\n 0.50,\n increase(\n loki_bloomcompactor_tenant_table_ranges_completed_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval]\n )\n)",
+ "exemplar": false,
+ "expr": "sum(increase(loki_bloombuilder_blocks_created_total{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__range]))",
+ "format": "time_series",
"hide": false,
- "instant": false,
- "legendFormat": "p50",
- "range": true,
- "refId": "B"
+ "instant": true,
+ "legendFormat": "Blocks",
+ "range": false,
+ "refId": "A"
}
],
- "title": "Tenant Tables",
- "type": "timeseries"
+ "title": "Created Blocks",
+ "type": "stat"
},
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
+ "description": "Compactors delete metas and blocks marked for deletion in the metas tombstones.",
"fieldConfig": {
"defaults": {
"color": {
@@ -3052,6 +4290,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -3094,9 +4333,9 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 104
+ "y": 2505
},
- "id": 89,
+ "id": 106,
"options": {
"legend": {
"calcs": [],
@@ -3110,6 +4349,7 @@
"sort": "none"
}
},
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -3117,46 +4357,23 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by (pod) (\n increase(\n loki_bloomcompactor_tenant_table_ranges_completed_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval]\n )\n)",
+ "expr": "sum by (phase) (increase(loki_bloomplanner_blocks_deleted_total{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"}[$__rate_interval]))",
"hide": false,
"instant": false,
- "legendFormat": "{{pod}}",
+ "legendFormat": "Deleted during {{phase}}",
"range": true,
- "refId": "C"
+ "refId": "A"
}
],
- "title": "Tenant Tables per pod",
+ "title": "Deleted Blocks",
"type": "timeseries"
},
- {
- "description": "",
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 111
- },
- "id": 87,
- "options": {
- "code": {
- "language": "plaintext",
- "showLineNumbers": false,
- "showMiniMap": false
- },
- "content": "",
- "mode": "markdown"
- },
- "pluginVersion": "11.1.0-70005",
- "title": "Series per compaction (includes series copied from other blocks)",
- "transparent": true,
- "type": "text"
- },
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
- "description": "",
+ "description": "Number of overlapping bloom blocks reused when creating new blocks\n",
"fieldConfig": {
"defaults": {
"color": {
@@ -3169,6 +4386,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -3211,9 +4429,9 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 112
+ "y": 2512
},
- "id": 81,
+ "id": 109,
"options": {
"legend": {
"calcs": [],
@@ -3227,6 +4445,7 @@
"sort": "none"
}
},
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -3234,86 +4453,61 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "# series checked per compaction\nhistogram_quantile(\n 0.99, \n sum by (le) (\n rate(loki_bloomcompactor_series_per_compaction_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval])\n )\n)",
- "hide": false,
- "instant": false,
- "legendFormat": "p99",
- "range": true,
- "refId": "C"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "# series checked per compaction\nhistogram_quantile(\n 0.9, \n sum by (le) (\n rate(loki_bloomcompactor_series_per_compaction_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval])\n )\n)",
- "hide": false,
- "instant": false,
- "legendFormat": "p90",
- "range": true,
- "refId": "B"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "# series checked per compaction\nhistogram_quantile(\n 0.5, \n sum by (le) (\n rate(loki_bloomcompactor_series_per_compaction_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval])\n )\n)",
+ "expr": "sum(increase(loki_bloombuilder_blocks_reused_total{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"}[$__rate_interval]))",
"hide": false,
"instant": false,
- "legendFormat": "p50",
+ "legendFormat": "Blocks",
"range": true,
"refId": "A"
}
],
- "title": "Series",
+ "title": "Blocks reused",
"type": "timeseries"
- },
+ }
+ ],
+ "title": "Blocks building",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 28
+ },
+ "id": 135,
+ "panels": [
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
+ "description": "Is the retention currently running?",
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
+ "fieldMinMax": false,
+ "mappings": [
+ {
+ "options": {
+ "0": {
+ "color": "yellow",
+ "index": 0,
+ "text": "No"
+ },
+ "1": {
+ "color": "green",
+ "index": 1,
+ "text": "Yes"
+ }
+ },
+ "type": "value"
}
- },
- "mappings": [],
+ ],
+ "max": 0,
"thresholds": {
"mode": "absolute",
"steps": [
@@ -3322,30 +4516,36 @@
"value": null
}
]
- }
+ },
+ "unit": "none"
},
"overrides": []
},
"gridPos": {
"h": 7,
- "w": 12,
- "x": 12,
- "y": 112
+ "w": 3,
+ "x": 0,
+ "y": 2573
},
- "id": 82,
+ "id": 136,
"options": {
- "legend": {
- "calcs": [],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
+ "colorMode": "value",
+ "graphMode": "area",
+ "justifyMode": "auto",
+ "orientation": "auto",
+ "percentChangeColorMode": "standard",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
},
- "tooltip": {
- "maxHeight": 600,
- "mode": "single",
- "sort": "none"
- }
+ "showPercentChange": false,
+ "textMode": "auto",
+ "wideLayout": true
},
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -3353,45 +4553,23 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by (pod) (\n rate(loki_bloomcompactor_series_per_compaction_sum{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval])\n /\n rate(loki_bloomcompactor_series_per_compaction_count{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval])\n)",
- "hide": false,
- "instant": false,
- "legendFormat": "{{pod}}",
- "range": true,
- "refId": "C"
+ "exemplar": false,
+ "expr": "sum by (cluster, namespace) (loki_bloomplanner_retention_running{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"})",
+ "instant": true,
+ "legendFormat": "__auto",
+ "range": false,
+ "refId": "A"
}
],
- "title": "avg series per compaction by pod",
- "type": "timeseries"
- },
- {
- "description": "",
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 119
- },
- "id": 90,
- "options": {
- "code": {
- "language": "plaintext",
- "showLineNumbers": false,
- "showMiniMap": false
- },
- "content": "",
- "mode": "markdown"
- },
- "pluginVersion": "11.1.0-70005",
- "title": "Number of bytes from chunks added to blocks during each compaction.",
- "transparent": true,
- "type": "text"
+ "title": "Running now?",
+ "type": "stat"
},
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
+ "description": "Is the retention currently running?",
"fieldConfig": {
"defaults": {
"color": {
@@ -3403,7 +4581,10 @@
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
+ "axisSoftMax": 1,
+ "axisSoftMin": 0,
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -3429,7 +4610,9 @@
"mode": "off"
}
},
+ "fieldMinMax": false,
"mappings": [],
+ "max": 2,
"thresholds": {
"mode": "absolute",
"steps": [
@@ -3439,17 +4622,17 @@
}
]
},
- "unit": "bytes"
+ "unit": "bool_yes_no"
},
"overrides": []
},
"gridPos": {
"h": 7,
- "w": 12,
- "x": 0,
- "y": 120
+ "w": 9,
+ "x": 3,
+ "y": 2573
},
- "id": 91,
+ "id": 137,
"options": {
"legend": {
"calcs": [],
@@ -3458,11 +4641,11 @@
"showLegend": true
},
"tooltip": {
- "maxHeight": 600,
"mode": "single",
"sort": "none"
}
},
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -3470,41 +4653,13 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "# series checked per compaction\nhistogram_quantile(\n 0.99, \n sum by (le) (\n rate(loki_bloomcompactor_bytes_per_compaction_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval])\n )\n)",
- "hide": false,
- "instant": false,
- "legendFormat": "p99",
- "range": true,
- "refId": "C"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "# series checked per compaction\nhistogram_quantile(\n 0.9, \n sum by (le) (\n rate(loki_bloomcompactor_bytes_per_compaction_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval])\n )\n)",
- "hide": false,
- "instant": false,
- "legendFormat": "p90",
- "range": true,
- "refId": "B"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "# series checked per compaction\nhistogram_quantile(\n 0.5, \n sum by (le) (\n rate(loki_bloomcompactor_bytes_per_compaction_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval])\n )\n)",
- "hide": false,
- "instant": false,
- "legendFormat": "p50",
+ "expr": "sum by (cluster, namespace) (loki_bloomplanner_retention_running{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"})",
+ "legendFormat": "__auto",
"range": true,
"refId": "A"
}
],
- "title": "Bytes",
+ "title": "Retention running",
"type": "timeseries"
},
{
@@ -3512,6 +4667,7 @@
"type": "prometheus",
"uid": "${datasource}"
},
+ "description": "How much time applying retention took",
"fieldConfig": {
"defaults": {
"color": {
@@ -3524,6 +4680,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -3549,6 +4706,7 @@
"mode": "off"
}
},
+ "fieldMinMax": false,
"mappings": [],
"thresholds": {
"mode": "absolute",
@@ -3559,7 +4717,7 @@
}
]
},
- "unit": "bytes"
+ "unit": "dtdurations"
},
"overrides": []
},
@@ -3567,9 +4725,9 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 120
+ "y": 2573
},
- "id": 92,
+ "id": 138,
"options": {
"legend": {
"calcs": [],
@@ -3578,11 +4736,11 @@
"showLegend": true
},
"tooltip": {
- "maxHeight": 600,
"mode": "single",
"sort": "none"
}
},
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -3590,19 +4748,17 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by (pod) (\n rate(loki_bloomcompactor_bytes_per_compaction_sum{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval])\n /\n rate(loki_bloomcompactor_bytes_per_compaction_count{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"}[$__rate_interval])\n)",
- "hide": false,
- "instant": false,
- "legendFormat": "p99",
+ "expr": "histogram_quantile(0.9, \n sum by (status, le) (\n rate(loki_bloomplanner_retention_time_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"}[$__rate_interval])\n )\n)",
+ "legendFormat": "__auto",
"range": true,
- "refId": "C"
+ "refId": "A"
}
],
- "title": "avg bytes per compaction by pod",
+ "title": "Retention time",
"type": "timeseries"
}
],
- "title": "Data processed",
+ "title": "Retention",
"type": "row"
},
{
@@ -3613,7 +4769,7 @@
"x": 0,
"y": 29
},
- "id": 58,
+ "id": 62,
"panels": [
{
"description": "",
@@ -3622,22 +4778,49 @@
"overrides": []
},
"gridPos": {
- "h": 3,
+ "h": 4,
"w": 24,
"x": 0,
- "y": 82
+ "y": 2581
+ },
+ "id": 71,
+ "options": {
+ "code": {
+ "language": "plaintext",
+ "showLineNumbers": false,
+ "showMiniMap": false
+ },
+ "content": "During the planning phase, the planner downloads the metas and TSDBs to build the plan.\n\nOnce all blocks and metas are built, the builder flushes them to the object store.\n\nAfter each iteration, the planner deletes the metas and blocks marked for deletion in the tombstones.",
+ "mode": "markdown"
+ },
+ "pluginVersion": "11.4.0-77663",
+ "title": "",
+ "transparent": true,
+ "type": "text"
+ },
+ {
+ "description": "",
+ "fieldConfig": {
+ "defaults": {},
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 13,
+ "w": 2,
+ "x": 0,
+ "y": 2585
},
- "id": 47,
+ "id": 63,
"options": {
"code": {
"language": "plaintext",
"showLineNumbers": false,
"showMiniMap": false
},
- "content": "\nCompactors write blocks to the attached PVs before flushing them into the object store.\nIt also download chunks and index files.\n\nAfter compacting a given tenant, all the downloaded index files and chunks, as well as the already flushed blocks are deleted.",
+ "content": "---\n#### GCS\n",
"mode": "markdown"
},
- "pluginVersion": "11.1.0-69747",
+ "pluginVersion": "11.4.0-77663",
"title": "",
"transparent": true,
"type": "text"
@@ -3647,6 +4830,7 @@
"type": "prometheus",
"uid": "${datasource}"
},
+ "description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -3659,8 +4843,9 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
- "fillOpacity": 0,
+ "fillOpacity": 25,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -3681,35 +4866,30 @@
"mode": "none"
},
"thresholdsStyle": {
- "mode": "area"
+ "mode": "off"
}
},
"mappings": [],
- "max": 1,
- "min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
- "color": "green"
- },
- {
- "color": "red",
- "value": 0.8
+ "color": "green",
+ "value": null
}
]
},
- "unit": "percentunit"
+ "unit": "none"
},
"overrides": []
},
"gridPos": {
- "h": 8,
- "w": 12,
- "x": 0,
- "y": 85
+ "h": 7,
+ "w": 11,
+ "x": 2,
+ "y": 2585
},
- "id": 9,
+ "id": 61,
"options": {
"legend": {
"calcs": [],
@@ -3723,6 +4903,7 @@
"sort": "none"
}
},
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -3730,40 +4911,15 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "quantile(\n 0.99,\n max by(persistentvolumeclaim) (\n kubelet_volume_stats_used_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\"} \n / \n kubelet_volume_stats_capacity_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\"}\n ) \n and \n count by(persistentvolumeclaim) (\n kube_persistentvolumeclaim_labels{cluster=~\"$cluster\", namespace=~\"$namespace\",label_name=~\"bloom-compactor\"}\n )\n)",
- "instant": false,
- "legendFormat": "p99",
- "range": true,
- "refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "quantile(\n 0.90,\n max by(persistentvolumeclaim) (\n kubelet_volume_stats_used_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\"} \n / \n kubelet_volume_stats_capacity_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\"}\n ) \n and \n count by(persistentvolumeclaim) (\n kube_persistentvolumeclaim_labels{cluster=~\"$cluster\", namespace=~\"$namespace\",label_name=~\"bloom-compactor\"}\n )\n)",
+ "expr": "sum by (container, status_code, operation) (rate(loki_gcs_request_duration_seconds_count{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"} [$__rate_interval]))",
"hide": false,
"instant": false,
- "legendFormat": "p90",
+ "legendFormat": "{{operation}} {{status_code}}",
"range": true,
"refId": "B"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "quantile(\n 0.50,\n max by(persistentvolumeclaim) (\n kubelet_volume_stats_used_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\"} \n / \n kubelet_volume_stats_capacity_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\"}\n ) \n and \n count by(persistentvolumeclaim) (\n kube_persistentvolumeclaim_labels{cluster=~\"$cluster\", namespace=~\"$namespace\",label_name=~\"bloom-compactor\"}\n )\n)",
- "hide": false,
- "instant": false,
- "legendFormat": "p50",
- "range": true,
- "refId": "C"
}
],
- "title": "Disk Utilization",
+ "title": "QPS Planner",
"type": "timeseries"
},
{
@@ -3771,6 +4927,7 @@
"type": "prometheus",
"uid": "${datasource}"
},
+ "description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -3783,8 +4940,9 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
- "fillOpacity": 0,
+ "fillOpacity": 25,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -3805,35 +4963,30 @@
"mode": "none"
},
"thresholdsStyle": {
- "mode": "area"
+ "mode": "off"
}
},
"mappings": [],
- "max": 1,
- "min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
- "color": "green"
- },
- {
- "color": "red",
- "value": 0.8
+ "color": "green",
+ "value": null
}
]
},
- "unit": "percentunit"
+ "unit": "s"
},
"overrides": []
},
"gridPos": {
- "h": 8,
- "w": 12,
- "x": 12,
- "y": 85
+ "h": 7,
+ "w": 11,
+ "x": 13,
+ "y": 2585
},
- "id": 100,
+ "id": 64,
"options": {
"legend": {
"calcs": [],
@@ -3847,6 +5000,7 @@
"sort": "none"
}
},
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -3854,14 +5008,41 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "max by(persistentvolumeclaim) (kubelet_volume_stats_used_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\"} / kubelet_volume_stats_capacity_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\"}) and count by(persistentvolumeclaim) (kube_persistentvolumeclaim_labels{cluster=~\"$cluster\", namespace=~\"$namespace\",label_name=~\"bloom-compactor\"})",
+ "expr": "histogram_quantile(0.99, sum by (operation, le) (rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"} [$__rate_interval])))",
+ "hide": false,
"instant": false,
- "legendFormat": "{{pod}}",
+ "legendFormat": "{{operation}} p99",
"range": true,
- "refId": "A"
+ "refId": "D"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.90, sum by (operation, le) (rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"} [$__rate_interval])))",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "{{operation}} p90",
+ "range": true,
+ "refId": "E"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.50, sum by (operation, le) (rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"} [$__rate_interval])))",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "{{operation}} p50",
+ "range": true,
+ "refId": "F"
}
],
- "title": "Disk Utilization per pod",
+ "title": "Latency Planner",
"type": "timeseries"
},
{
@@ -3869,6 +5050,7 @@
"type": "prometheus",
"uid": "${datasource}"
},
+ "description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -3881,8 +5063,9 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
- "fillOpacity": 0,
+ "fillOpacity": 25,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -3911,25 +5094,22 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
- },
- {
- "color": "red",
- "value": 80
+ "color": "green",
+ "value": null
}
]
},
- "unit": "bytes"
+ "unit": "none"
},
"overrides": []
},
"gridPos": {
"h": 7,
- "w": 12,
- "x": 0,
- "y": 93
+ "w": 11,
+ "x": 2,
+ "y": 2592
},
- "id": 7,
+ "id": 127,
"options": {
"legend": {
"calcs": [],
@@ -3943,6 +5123,7 @@
"sort": "none"
}
},
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -3950,40 +5131,15 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "quantile(\n 0.99,\n sum by(instance, pod, device) (\n rate(node_disk_written_bytes_total[$__rate_interval])\n ) \n + ignoring(pod) group_right() \n (\n label_replace(\n count by(instance, pod, device) (\n container_fs_writes_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"bloom-compactor\", device!~\".*sda.*\"}\n ), \n \"device\", \"$1\", \"device\", \"/dev/(.*)\"\n ) * 0\n )\n)",
- "instant": false,
- "legendFormat": "p99",
- "range": true,
- "refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "quantile(\n 0.90,\n sum by(instance, pod, device) (\n rate(node_disk_written_bytes_total[$__rate_interval])\n ) \n + ignoring(pod) group_right() \n (\n label_replace(\n count by(instance, pod, device) (\n container_fs_writes_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"bloom-compactor\", device!~\".*sda.*\"}\n ), \n \"device\", \"$1\", \"device\", \"/dev/(.*)\"\n ) * 0\n )\n)",
+ "expr": "sum by (container, status_code, operation) (rate(loki_gcs_request_duration_seconds_count{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"} [$__rate_interval]))",
"hide": false,
"instant": false,
- "legendFormat": "p90",
+ "legendFormat": "{{operation}} {{status_code}}",
"range": true,
"refId": "B"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "quantile(\n 0.50,\n sum by(instance, pod, device) (\n rate(node_disk_written_bytes_total[$__rate_interval])\n ) \n + ignoring(pod) group_right() \n (\n label_replace(\n count by(instance, pod, device) (\n container_fs_writes_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"bloom-compactor\", device!~\".*sda.*\"}\n ), \n \"device\", \"$1\", \"device\", \"/dev/(.*)\"\n ) * 0\n )\n)",
- "hide": false,
- "instant": false,
- "legendFormat": "p50",
- "range": true,
- "refId": "C"
}
],
- "title": "Disk Writes",
+ "title": "QPS Builder",
"type": "timeseries"
},
{
@@ -3991,6 +5147,7 @@
"type": "prometheus",
"uid": "${datasource}"
},
+ "description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -4003,8 +5160,9 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
- "fillOpacity": 0,
+ "fillOpacity": 25,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -4033,25 +5191,22 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
- },
- {
- "color": "red",
- "value": 80
+ "color": "green",
+ "value": null
}
]
},
- "unit": "bytes"
+ "unit": "s"
},
"overrides": []
},
"gridPos": {
"h": 7,
- "w": 12,
- "x": 12,
- "y": 93
+ "w": 11,
+ "x": 13,
+ "y": 2592
},
- "id": 101,
+ "id": 128,
"options": {
"legend": {
"calcs": [],
@@ -4065,6 +5220,7 @@
"sort": "none"
}
},
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -4072,21 +5228,76 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by(instance, pod, device) (rate(node_disk_written_bytes_total[$__rate_interval])) + ignoring(pod) group_right() (label_replace(count by(instance, pod, device) (container_fs_writes_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"bloom-compactor\", device!~\".*sda.*\"}), \"device\", \"$1\", \"device\", \"/dev/(.*)\") * 0)",
+ "expr": "histogram_quantile(0.99, sum by (operation, le) (rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"} [$__rate_interval])))",
+ "hide": false,
"instant": false,
- "legendFormat": "{{pod}}",
+ "legendFormat": "{{operation}} p99",
"range": true,
- "refId": "A"
+ "refId": "D"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.90, sum by (operation, le) (rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"} [$__rate_interval])))",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "{{operation}} p90",
+ "range": true,
+ "refId": "E"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.50, sum by (operation, le) (rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"} [$__rate_interval])))",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "{{operation}} p50",
+ "range": true,
+ "refId": "F"
}
],
- "title": "Disk Writes per pod",
+ "title": "Latency Planner",
"type": "timeseries"
},
+ {
+ "description": "",
+ "fieldConfig": {
+ "defaults": {},
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 14,
+ "w": 2,
+ "x": 0,
+ "y": 2598
+ },
+ "id": 65,
+ "options": {
+ "code": {
+ "language": "plaintext",
+ "showLineNumbers": false,
+ "showMiniMap": false
+ },
+ "content": "---\n#### S3\n",
+ "mode": "markdown"
+ },
+ "pluginVersion": "11.4.0-77663",
+ "title": "",
+ "transparent": true,
+ "type": "text"
+ },
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
+ "description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -4099,8 +5310,9 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
- "fillOpacity": 0,
+ "fillOpacity": 25,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -4129,25 +5341,22 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
- },
- {
- "color": "red",
- "value": 80
+ "color": "green",
+ "value": null
}
]
},
- "unit": "bytes"
+ "unit": "none"
},
"overrides": []
},
"gridPos": {
- "h": 8,
- "w": 12,
- "x": 0,
- "y": 100
+ "h": 7,
+ "w": 11,
+ "x": 2,
+ "y": 2599
},
- "id": 8,
+ "id": 67,
"options": {
"legend": {
"calcs": [],
@@ -4161,6 +5370,7 @@
"sort": "none"
}
},
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -4168,40 +5378,15 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "quantile(\n 0.99,\n sum by(instance, pod, device) (\n rate(node_disk_read_bytes_total[$__rate_interval])\n ) + ignoring(pod) group_right()\n (\n label_replace(\n count by(instance, pod, device) (\n container_fs_writes_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"bloom-compactor\", device!~\".*sda.*\"}\n ), \n \"device\", \"$1\", \"device\", \"/dev/(.*)\"\n ) * 0\n )\n)",
- "instant": false,
- "legendFormat": "p99",
- "range": true,
- "refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "quantile(\n 0.90,\n sum by(instance, pod, device) (\n rate(node_disk_read_bytes_total[$__rate_interval])\n ) + ignoring(pod) group_right()\n (\n label_replace(\n count by(instance, pod, device) (\n container_fs_writes_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"bloom-compactor\", device!~\".*sda.*\"}\n ), \n \"device\", \"$1\", \"device\", \"/dev/(.*)\"\n ) * 0\n )\n)",
+ "expr": "sum by (status_code, operation) (rate(loki_s3_request_duration_seconds_count{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"} [$__rate_interval]))",
"hide": false,
"instant": false,
- "legendFormat": "p90",
+ "legendFormat": "{{operation}} {{status_code}}",
"range": true,
"refId": "B"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "quantile(\n 0.50,\n sum by(instance, pod, device) (\n rate(node_disk_read_bytes_total[$__rate_interval])\n ) + ignoring(pod) group_right()\n (\n label_replace(\n count by(instance, pod, device) (\n container_fs_writes_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"bloom-compactor\", device!~\".*sda.*\"}\n ), \n \"device\", \"$1\", \"device\", \"/dev/(.*)\"\n ) * 0\n )\n)",
- "hide": false,
- "instant": false,
- "legendFormat": "p50",
- "range": true,
- "refId": "C"
}
],
- "title": "Disk Reads",
+ "title": "QPS Planner",
"type": "timeseries"
},
{
@@ -4209,6 +5394,7 @@
"type": "prometheus",
"uid": "${datasource}"
},
+ "description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -4221,8 +5407,9 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
- "fillOpacity": 0,
+ "fillOpacity": 25,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -4251,25 +5438,22 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
- },
- {
- "color": "red",
- "value": 80
+ "color": "green",
+ "value": null
}
]
},
- "unit": "bytes"
+ "unit": "s"
},
"overrides": []
},
"gridPos": {
- "h": 8,
- "w": 12,
- "x": 12,
- "y": 100
+ "h": 7,
+ "w": 11,
+ "x": 13,
+ "y": 2599
},
- "id": 102,
+ "id": 69,
"options": {
"legend": {
"calcs": [],
@@ -4283,6 +5467,7 @@
"sort": "none"
}
},
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -4290,83 +5475,42 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by(instance, pod, device) (rate(node_disk_read_bytes_total[$__rate_interval])) + ignoring(pod) group_right() (label_replace(count by(instance, pod, device) (container_fs_writes_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"bloom-compactor\", device!~\".*sda.*\"}), \"device\", \"$1\", \"device\", \"/dev/(.*)\") * 0)",
+ "expr": "histogram_quantile(0.99, sum by (operation, le) (rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"} [$__rate_interval])))",
+ "hide": false,
"instant": false,
- "legendFormat": "{{pod}}",
+ "legendFormat": "{{operation}} p99",
"range": true,
- "refId": "A"
+ "refId": "D"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.90, sum by (operation, le) (rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"} [$__rate_interval])))",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "{{operation}} p90",
+ "range": true,
+ "refId": "E"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.50, sum by (operation, le) (rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-planner\", job=\"\"} [$__rate_interval])))",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "{{operation}} p50",
+ "range": true,
+ "refId": "F"
}
],
- "title": "Disk Reads per pod",
+ "title": "Latency Planner",
"type": "timeseries"
- }
- ],
- "title": "Disk Usage",
- "type": "row"
- },
- {
- "collapsed": true,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 30
- },
- "id": 62,
- "panels": [
- {
- "description": "",
- "fieldConfig": {
- "defaults": {},
- "overrides": []
- },
- "gridPos": {
- "h": 3,
- "w": 24,
- "x": 0,
- "y": 83
- },
- "id": 71,
- "options": {
- "code": {
- "language": "plaintext",
- "showLineNumbers": false,
- "showMiniMap": false
- },
- "content": "Once all blocks and metas are built locally, the compactor flushes them to the object store.\n\nAfter each iteration, the compactor deletes the metas and blocks marked for deletion in the tombstones.",
- "mode": "markdown"
- },
- "pluginVersion": "11.1.0-69747",
- "title": "",
- "transparent": true,
- "type": "text"
- },
- {
- "description": "",
- "fieldConfig": {
- "defaults": {},
- "overrides": []
- },
- "gridPos": {
- "h": 7,
- "w": 2,
- "x": 0,
- "y": 86
- },
- "id": 63,
- "options": {
- "code": {
- "language": "plaintext",
- "showLineNumbers": false,
- "showMiniMap": false
- },
- "content": "---\n#### GCS\n",
- "mode": "markdown"
- },
- "pluginVersion": "11.1.0-69747",
- "title": "",
- "transparent": true,
- "type": "text"
},
{
"datasource": {
@@ -4386,6 +5530,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 25,
"gradientMode": "none",
@@ -4416,7 +5561,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
}
]
},
@@ -4428,9 +5574,9 @@
"h": 7,
"w": 11,
"x": 2,
- "y": 86
+ "y": 2606
},
- "id": 61,
+ "id": 129,
"options": {
"legend": {
"calcs": [],
@@ -4444,6 +5590,7 @@
"sort": "none"
}
},
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -4451,7 +5598,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by (status_code, operation) (rate(loki_gcs_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"} [$__rate_interval]))",
+ "expr": "sum by (status_code, operation) (rate(loki_s3_request_duration_seconds_count{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"} [$__rate_interval]))",
"hide": false,
"instant": false,
"legendFormat": "{{operation}} {{status_code}}",
@@ -4459,7 +5606,7 @@
"refId": "B"
}
],
- "title": "QPS",
+ "title": "QPS Builder",
"type": "timeseries"
},
{
@@ -4480,6 +5627,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 25,
"gradientMode": "none",
@@ -4510,7 +5658,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
}
]
},
@@ -4522,9 +5671,9 @@
"h": 7,
"w": 11,
"x": 13,
- "y": 86
+ "y": 2606
},
- "id": 64,
+ "id": 130,
"options": {
"legend": {
"calcs": [],
@@ -4538,6 +5687,7 @@
"sort": "none"
}
},
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -4545,7 +5695,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.99, sum by (operation, le) (rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"} [$__rate_interval])))",
+ "expr": "histogram_quantile(0.99, sum by (operation, le) (rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"} [$__rate_interval])))",
"hide": false,
"instant": false,
"legendFormat": "{{operation}} p99",
@@ -4558,7 +5708,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.90, sum by (operation, le) (rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"} [$__rate_interval])))",
+ "expr": "histogram_quantile(0.90, sum by (operation, le) (rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"} [$__rate_interval])))",
"hide": false,
"instant": false,
"legendFormat": "{{operation}} p90",
@@ -4571,7 +5721,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.50, sum by (operation, le) (rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"} [$__rate_interval])))",
+ "expr": "histogram_quantile(0.50, sum by (operation, le) (rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"} [$__rate_interval])))",
"hide": false,
"instant": false,
"legendFormat": "{{operation}} p50",
@@ -4579,7 +5729,7 @@
"refId": "F"
}
],
- "title": "Latency",
+ "title": "Latency Builder",
"type": "timeseries"
},
{
@@ -4592,19 +5742,19 @@
"h": 7,
"w": 2,
"x": 0,
- "y": 93
+ "y": 2612
},
- "id": 65,
+ "id": 66,
"options": {
"code": {
"language": "plaintext",
"showLineNumbers": false,
"showMiniMap": false
},
- "content": "---\n#### S3\n",
+ "content": "---\n#### Azure\nBlob Storage",
"mode": "markdown"
},
- "pluginVersion": "11.1.0-69747",
+ "pluginVersion": "11.4.0-77663",
"title": "",
"transparent": true,
"type": "text"
@@ -4627,6 +5777,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 25,
"gradientMode": "none",
@@ -4657,7 +5808,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
}
]
},
@@ -4669,9 +5821,9 @@
"h": 7,
"w": 11,
"x": 2,
- "y": 93
+ "y": 2613
},
- "id": 67,
+ "id": 68,
"options": {
"legend": {
"calcs": [],
@@ -4685,6 +5837,7 @@
"sort": "none"
}
},
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -4692,7 +5845,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by (status_code, operation) (rate(loki_s3_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"} [$__rate_interval]))",
+ "expr": "sum by (status_code, operation) (rate(loki_azure_blob_request_duration_seconds_count{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"} [$__rate_interval]))",
"hide": false,
"instant": false,
"legendFormat": "{{operation}} {{status_code}}",
@@ -4700,7 +5853,7 @@
"refId": "B"
}
],
- "title": "QPS",
+ "title": "QPS Planner",
"type": "timeseries"
},
{
@@ -4721,6 +5874,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 25,
"gradientMode": "none",
@@ -4751,7 +5905,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
}
]
},
@@ -4763,9 +5918,9 @@
"h": 7,
"w": 11,
"x": 13,
- "y": 93
+ "y": 2613
},
- "id": 69,
+ "id": 70,
"options": {
"legend": {
"calcs": [],
@@ -4779,6 +5934,7 @@
"sort": "none"
}
},
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -4786,7 +5942,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.99, sum by (operation, le) (rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"} [$__rate_interval])))",
+ "expr": "histogram_quantile(0.99, sum by (operation, le) (rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"} [$__rate_interval])))",
"hide": false,
"instant": false,
"legendFormat": "{{operation}} p99",
@@ -4799,7 +5955,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.90, sum by (operation, le) (rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"} [$__rate_interval])))",
+ "expr": "histogram_quantile(0.90, sum by (operation, le) (rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"} [$__rate_interval])))",
"hide": false,
"instant": false,
"legendFormat": "{{operation}} p90",
@@ -4812,7 +5968,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.50, sum by (operation, le) (rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"} [$__rate_interval])))",
+ "expr": "histogram_quantile(0.50, sum by (operation, le) (rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-planner\"} [$__rate_interval])))",
"hide": false,
"instant": false,
"legendFormat": "{{operation}} p50",
@@ -4820,36 +5976,9 @@
"refId": "F"
}
],
- "title": "Latency",
+ "title": "Latency Planner",
"type": "timeseries"
},
- {
- "description": "",
- "fieldConfig": {
- "defaults": {},
- "overrides": []
- },
- "gridPos": {
- "h": 7,
- "w": 2,
- "x": 0,
- "y": 100
- },
- "id": 66,
- "options": {
- "code": {
- "language": "plaintext",
- "showLineNumbers": false,
- "showMiniMap": false
- },
- "content": "---\n#### Azure\nBlob Storage",
- "mode": "markdown"
- },
- "pluginVersion": "11.1.0-69747",
- "title": "",
- "transparent": true,
- "type": "text"
- },
{
"datasource": {
"type": "prometheus",
@@ -4868,6 +5997,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 25,
"gradientMode": "none",
@@ -4898,7 +6028,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
}
]
},
@@ -4910,9 +6041,9 @@
"h": 7,
"w": 11,
"x": 2,
- "y": 100
+ "y": 2620
},
- "id": 68,
+ "id": 131,
"options": {
"legend": {
"calcs": [],
@@ -4926,6 +6057,7 @@
"sort": "none"
}
},
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -4933,7 +6065,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by (status_code, operation) (rate(loki_azure_blob_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"} [$__rate_interval]))",
+ "expr": "sum by (status_code, operation) (rate(loki_azure_blob_request_duration_seconds_count{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"} [$__rate_interval]))",
"hide": false,
"instant": false,
"legendFormat": "{{operation}} {{status_code}}",
@@ -4941,7 +6073,7 @@
"refId": "B"
}
],
- "title": "QPS",
+ "title": "QPS Builder",
"type": "timeseries"
},
{
@@ -4962,6 +6094,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 25,
"gradientMode": "none",
@@ -4992,7 +6125,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
}
]
},
@@ -5004,9 +6138,9 @@
"h": 7,
"w": 11,
"x": 13,
- "y": 100
+ "y": 2620
},
- "id": 70,
+ "id": 132,
"options": {
"legend": {
"calcs": [],
@@ -5020,6 +6154,7 @@
"sort": "none"
}
},
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -5027,7 +6162,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.99, sum by (operation, le) (rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"} [$__rate_interval])))",
+ "expr": "histogram_quantile(0.99, sum by (operation, le) (rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"} [$__rate_interval])))",
"hide": false,
"instant": false,
"legendFormat": "{{operation}} p99",
@@ -5040,7 +6175,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.90, sum by (operation, le) (rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"} [$__rate_interval])))",
+ "expr": "histogram_quantile(0.90, sum by (operation, le) (rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"} [$__rate_interval])))",
"hide": false,
"instant": false,
"legendFormat": "{{operation}} p90",
@@ -5053,7 +6188,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.50, sum by (operation, le) (rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-compactor\"} [$__rate_interval])))",
+ "expr": "histogram_quantile(0.50, sum by (operation, le) (rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-builder\"} [$__rate_interval])))",
"hide": false,
"instant": false,
"legendFormat": "{{operation}} p50",
@@ -5061,7 +6196,7 @@
"refId": "F"
}
],
- "title": "Latency",
+ "title": "Latency Builder",
"type": "timeseries"
}
],
@@ -5069,18 +6204,18 @@
"type": "row"
}
],
+ "preload": false,
"refresh": "",
- "schemaVersion": 39,
+ "schemaVersion": 40,
"tags": [],
"time": {
- "from": "now-2d",
+ "from": "now-1h",
"to": "now"
},
- "timeRangeUpdatedDuringEditOrView": false,
"timepicker": {},
"timezone": "",
- "title": "Bloom-Compactor New",
- "uid": "bdeqksjzwxqf4e",
- "version": 62,
+ "title": "Bloom Build",
+ "uid": "cdqj1p7oe1i4ge",
+ "version": 88,
"weekStart": ""
-}
\ No newline at end of file
+}
diff --git a/production/loki-mixin/dashboards/dashboard-bloom-gateway.json b/production/loki-mixin/dashboards/dashboard-bloom-gateway.json
index 12492f8f00bb5..3fe816800b5e6 100644
--- a/production/loki-mixin/dashboards/dashboard-bloom-gateway.json
+++ b/production/loki-mixin/dashboards/dashboard-bloom-gateway.json
@@ -19,7 +19,6 @@
"fiscalYearStartMonth": 0,
"graphTooltip": 1,
"links": [],
- "liveNow": false,
"panels": [
{
"collapsed": false,
@@ -43,8 +42,7 @@
"fieldConfig": {
"defaults": {
"color": {
- "mode": "thresholds",
- "seriesBy": "last"
+ "mode": "thresholds"
},
"custom": {
"axisBorderShow": false,
@@ -53,9 +51,10 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
- "gradientMode": "none",
+ "gradientMode": "scheme",
"hideFrom": {
"legend": false,
"tooltip": false,
@@ -66,7 +65,7 @@
"lineStyle": {
"fill": "solid"
},
- "lineWidth": 2,
+ "lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
@@ -111,7 +110,7 @@
},
"gridPos": {
"h": 6,
- "w": 6,
+ "w": 12,
"x": 0,
"y": 1
},
@@ -129,6 +128,7 @@
"sort": "none"
}
},
+ "pluginVersion": "11.4.0-77765",
"targets": [
{
"datasource": {
@@ -136,7 +136,8 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum(rate(loki_bloom_gateway_filtered_chunks_sum{job=\"$namespace/bloom-gateway\"}[$__rate_interval]))\n/\nsum(rate(loki_bloom_gateway_requested_chunks_sum{job=\"$namespace/bloom-gateway\"}[$__rate_interval]))",
+ "expr": "sum(rate(loki_bloom_gateway_filtered_chunks_sum{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"}[$__rate_interval]))\n/\nsum(rate(loki_bloom_gateway_requested_chunks_sum{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"}[$__rate_interval]))",
+ "hide": false,
"instant": false,
"legendFormat": "Chunks",
"range": true,
@@ -149,7 +150,7 @@
},
"editorMode": "code",
"exemplar": false,
- "expr": "sum(rate(loki_bloom_gateway_filtered_series_sum{job=\"$namespace/bloom-gateway\"}[$__rate_interval]))\n/\nsum(rate(loki_bloom_gateway_requested_series_sum{job=\"$namespace/bloom-gateway\"}[$__rate_interval]))",
+ "expr": "sum(rate(loki_bloom_gateway_filtered_series_sum{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"}[$__rate_interval]))\n/\nsum(rate(loki_bloom_gateway_requested_series_sum{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"}[$__rate_interval]))",
"hide": false,
"instant": false,
"legendFormat": "Series",
@@ -157,7 +158,7 @@
"refId": "B"
}
],
- "title": "Filter ratio",
+ "title": "Filter ratio - Bloom Gateway (server)",
"type": "timeseries"
},
{
@@ -202,7 +203,7 @@
"gridPos": {
"h": 6,
"w": 6,
- "x": 6,
+ "x": 12,
"y": 1
},
"id": 75,
@@ -221,7 +222,7 @@
"showThresholdMarkers": true,
"sizing": "auto"
},
- "pluginVersion": "11.1.0-70005",
+ "pluginVersion": "11.4.0-77765",
"targets": [
{
"datasource": {
@@ -230,7 +231,7 @@
},
"editorMode": "code",
"exemplar": false,
- "expr": "sum(rate(loki_bloom_gateway_filtered_chunks_sum{job=\"$namespace/bloom-gateway\"}[$__rate_interval]))\n/\nsum(rate(loki_bloom_gateway_requested_chunks_sum{job=\"$namespace/bloom-gateway\"}[$__rate_interval]))",
+ "expr": "sum(increase(loki_bloom_gateway_filtered_chunks_sum{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"}[$__range]))\n/\nsum(increase(loki_bloom_gateway_requested_chunks_sum{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"}[$__range]))",
"instant": true,
"legendFormat": "Chunks",
"range": false,
@@ -243,40 +244,12 @@
},
"editorMode": "code",
"exemplar": false,
- "expr": "sum(rate(loki_bloom_gateway_filtered_series_sum{job=\"$namespace/bloom-gateway\"}[$__rate_interval]))\n/\nsum(rate(loki_bloom_gateway_requested_series_sum{job=\"$namespace/bloom-gateway\"}[$__rate_interval]))",
+ "expr": "sum(increase(loki_bloom_gateway_filtered_series_sum{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"}[$__range]))\n/\nsum(increase(loki_bloom_gateway_requested_series_sum{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"}[$__range]))",
"hide": false,
"instant": true,
"legendFormat": "Series",
"range": false,
"refId": "B"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "exemplar": false,
- "expr": "sum(loki_bloom_gateway_filtered_chunks_sum{job=\"$namespace/bloom-gateway\"})\n/\nsum(loki_bloom_gateway_requested_chunks_sum{job=\"$namespace/bloom-gateway\"})",
- "hide": true,
- "instant": true,
- "legendFormat": "Chunks avg",
- "range": false,
- "refId": "C"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "exemplar": false,
- "expr": "sum(loki_bloom_gateway_filtered_series_sum{job=\"$namespace/bloom-gateway\"})\n/\nsum(loki_bloom_gateway_requested_series_sum{job=\"$namespace/bloom-gateway\"})",
- "hide": true,
- "instant": true,
- "legendFormat": "Series avg",
- "range": false,
- "refId": "D"
}
],
"title": "Filter ratio",
@@ -300,6 +273,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
@@ -376,7 +350,7 @@
"gridPos": {
"h": 6,
"w": 6,
- "x": 12,
+ "x": 18,
"y": 1
},
"id": 72,
@@ -393,6 +367,7 @@
"sort": "none"
}
},
+ "pluginVersion": "11.4.0-77765",
"targets": [
{
"datasource": {
@@ -407,19 +382,6 @@
"range": true,
"refId": "D"
},
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "sum(kube_pod_container_status_ready{container=\"bloom-gateway\", cluster=\"$cluster\", namespace=\"$namespace\"})",
- "hide": true,
- "instant": false,
- "legendFormat": "Running",
- "range": true,
- "refId": "A"
- },
{
"datasource": {
"type": "prometheus",
@@ -432,19 +394,6 @@
"legendFormat": "Desired",
"range": true,
"refId": "B"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "kube_statefulset_status_replicas_available{cluster=\"$cluster\", namespace=\"$namespace\", statefulset=\"bloom-gateway\"}",
- "hide": true,
- "instant": false,
- "legendFormat": "Available",
- "range": true,
- "refId": "C"
}
],
"title": "Readiness",
@@ -455,11 +404,11 @@
"type": "prometheus",
"uid": "${datasource}"
},
- "description": "",
+ "description": "Percentage of chunks that are filtered by using bloom filters",
"fieldConfig": {
"defaults": {
"color": {
- "mode": "palette-classic"
+ "mode": "thresholds"
},
"custom": {
"axisBorderShow": false,
@@ -468,9 +417,10 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
- "fillOpacity": 50,
- "gradientMode": "none",
+ "fillOpacity": 0,
+ "gradientMode": "scheme",
"hideFrom": {
"legend": false,
"tooltip": false,
@@ -493,34 +443,44 @@
"mode": "none"
},
"thresholdsStyle": {
- "mode": "off"
+ "mode": "area"
}
},
"mappings": [],
+ "max": 1,
+ "min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
- "color": "green",
+ "color": "red",
"value": null
},
{
- "color": "red",
- "value": 80
+ "color": "orange",
+ "value": 0.5
+ },
+ {
+ "color": "yellow",
+ "value": 0.75
+ },
+ {
+ "color": "green",
+ "value": 0.9
}
]
},
- "unit": "none"
+ "unit": "percentunit"
},
"overrides": []
},
"gridPos": {
"h": 6,
- "w": 6,
- "x": 18,
- "y": 1
+ "w": 12,
+ "x": 0,
+ "y": 7
},
- "id": 37,
+ "id": 93,
"options": {
"legend": {
"calcs": [],
@@ -534,6 +494,7 @@
"sort": "none"
}
},
+ "pluginVersion": "11.4.0-77765",
"targets": [
{
"datasource": {
@@ -541,76 +502,134 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "(\n max by (pod, reason) (kube_pod_container_status_last_terminated_reason{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"})\n * on (pod) group_left\n sum by (pod) (increase(kube_pod_container_status_restarts_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"}[$__rate_interval]))\n) > 0",
+ "expr": "sum(rate(loki_bloom_gateway_querier_chunks_filtered_total{cluster=\"$cluster\", job=\"$namespace/index-gateway\"}[$__rate_interval]))\n/\nsum(rate(loki_bloom_gateway_querier_chunks_total{cluster=\"$cluster\", job=\"$namespace/index-gateway\"}[$__rate_interval]))",
+ "instant": false,
+ "legendFormat": "Chunks",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "sum(rate(loki_bloom_gateway_querier_series_filtered_total{cluster=\"$cluster\", job=\"$namespace/index-gateway\"}[$__rate_interval]))\n/\nsum(rate(loki_bloom_gateway_querier_series_total{cluster=\"$cluster\", job=\"$namespace/index-gateway\"}[$__rate_interval]))",
"hide": false,
"instant": false,
- "interval": "",
- "legendFormat": "{{pod}} ({{reason}})",
+ "legendFormat": "Series",
"range": true,
- "refId": "C"
+ "refId": "B"
}
],
- "title": "Container restarts",
+ "title": "Filter ratio - Index Gateway (client)",
"type": "timeseries"
},
{
"datasource": {
- "type": "loki",
- "uid": "${loki_datasource}"
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "description": "Percentage of chunks that are filtered by using bloom filters",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "mappings": [],
+ "max": 1,
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "red",
+ "value": null
+ },
+ {
+ "color": "orange",
+ "value": 0.5
+ },
+ {
+ "color": "yellow",
+ "value": 0.75
+ },
+ {
+ "color": "green",
+ "value": 0.9
+ }
+ ]
+ },
+ "unit": "percentunit"
+ },
+ "overrides": []
},
"gridPos": {
- "h": 9,
- "w": 15,
- "x": 0,
+ "h": 6,
+ "w": 6,
+ "x": 12,
"y": 7
},
- "id": 48,
+ "id": 94,
"options": {
- "dedupStrategy": "none",
- "enableLogDetails": true,
- "prettifyLogMessage": false,
- "showCommonLabels": false,
- "showLabels": false,
- "showTime": false,
- "sortOrder": "Descending",
- "wrapLogMessage": true
+ "minVizHeight": 75,
+ "minVizWidth": 75,
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "showThresholdLabels": false,
+ "showThresholdMarkers": true,
+ "sizing": "auto"
},
+ "pluginVersion": "11.4.0-77765",
"targets": [
{
"datasource": {
- "type": "loki",
- "uid": "${loki_datasource}"
+ "type": "prometheus",
+ "uid": "${datasource}"
},
"editorMode": "code",
- "expr": "{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} |= \"level=error\" or \"panic:\" | logfmt",
- "queryType": "range",
+ "exemplar": false,
+ "expr": "sum(increase(loki_bloom_gateway_querier_chunks_filtered_total{cluster=\"$cluster\", job=\"$namespace/index-gateway\"}[$__range]))\n/\nsum(increase(loki_bloom_gateway_querier_chunks_total{cluster=\"$cluster\", job=\"$namespace/index-gateway\"}[$__range]))",
+ "instant": true,
+ "legendFormat": "Chunks",
+ "range": false,
"refId": "A"
},
{
"datasource": {
- "type": "loki",
- "uid": "${loki_datasource}"
+ "type": "prometheus",
+ "uid": "${datasource}"
},
"editorMode": "code",
- "expr": "{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} |= \"level=warn\" | logfmt",
- "hide": true,
- "queryType": "range",
+ "exemplar": false,
+ "expr": "sum(increase(loki_bloom_gateway_querier_series_filtered_total{cluster=\"$cluster\", job=\"$namespace/index-gateway\"}[$__range]))\n/\nsum(increase(loki_bloom_gateway_querier_series_total{cluster=\"$cluster\", job=\"$namespace/index-gateway\"}[$__range]))",
+ "hide": false,
+ "instant": true,
+ "legendFormat": "Series",
+ "range": false,
"refId": "B"
}
],
- "title": "Errors",
- "type": "logs"
+ "title": "Filter ratio",
+ "type": "gauge"
},
{
"datasource": {
- "type": "loki",
- "uid": "${loki_datasource}"
+ "type": "prometheus",
+ "uid": "${datasource}"
},
+ "description": "",
"fieldConfig": {
"defaults": {
"color": {
- "fixedColor": "red",
- "mode": "fixed"
+ "mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
@@ -619,8 +638,9 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
- "drawStyle": "bars",
- "fillOpacity": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "line",
+ "fillOpacity": 50,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -629,11 +649,13 @@
},
"insertNulls": false,
"lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
- "log": 2,
- "type": "symlog"
+ "type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
@@ -655,66 +677,21 @@
},
{
"color": "red",
- "value": 1
- }
- ]
- }
- },
- "overrides": [
- {
- "matcher": {
- "id": "byName",
- "options": "warn"
- },
- "properties": [
- {
- "id": "color",
- "value": {
- "fixedColor": "orange",
- "mode": "fixed"
- }
- }
- ]
- },
- {
- "matcher": {
- "id": "byName",
- "options": "error"
- },
- "properties": [
- {
- "id": "color",
- "value": {
- "fixedColor": "red",
- "mode": "fixed"
- }
+ "value": 80
}
]
},
- {
- "matcher": {
- "id": "byName",
- "options": "panic"
- },
- "properties": [
- {
- "id": "color",
- "value": {
- "fixedColor": "semi-dark-red",
- "mode": "fixed"
- }
- }
- ]
- }
- ]
+ "unit": "none"
+ },
+ "overrides": []
},
"gridPos": {
- "h": 9,
- "w": 9,
- "x": 15,
+ "h": 6,
+ "w": 6,
+ "x": 18,
"y": 7
},
- "id": 52,
+ "id": 37,
"options": {
"legend": {
"calcs": [],
@@ -728,42 +705,925 @@
"sort": "none"
}
},
+ "pluginVersion": "11.4.0-77765",
"targets": [
{
"datasource": {
- "type": "loki",
- "uid": "${loki_datasource}"
- },
- "editorMode": "code",
- "expr": "sum by (level) (count_over_time({cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} |~ \"level=(warn|error)\" | logfmt [$__auto]))",
- "legendFormat": "{{ level }}",
- "queryType": "range",
- "refId": "A"
- },
- {
- "datasource": {
- "type": "loki",
- "uid": "${loki_datasource}"
+ "type": "prometheus",
+ "uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum (count_over_time({cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} |= \"panic:\" | logfmt [$__auto]))",
+ "expr": "(\n max by (pod, reason) (kube_pod_container_status_last_terminated_reason{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"})\n * on (pod) group_left\n sum by (pod) (increase(kube_pod_container_status_restarts_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"}[$__rate_interval]))\n) > 0",
"hide": false,
- "legendFormat": "panic",
- "queryType": "range",
- "refId": "B"
+ "instant": false,
+ "interval": "",
+ "legendFormat": "{{pod}} ({{reason}})",
+ "range": true,
+ "refId": "C"
}
],
- "title": "Errors Rate",
+ "title": "Container restarts",
"type": "timeseries"
},
{
- "collapsed": true,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 16
- },
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "description": "Percentage of chunks that are filtered by using bloom filters",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "scheme",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "area"
+ }
+ },
+ "mappings": [],
+ "max": 1,
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "red",
+ "value": null
+ },
+ {
+ "color": "orange",
+ "value": 0.5
+ },
+ {
+ "color": "yellow",
+ "value": 0.75
+ },
+ {
+ "color": "green",
+ "value": 0.9
+ }
+ ]
+ },
+ "unit": "percentunit"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 12,
+ "x": 0,
+ "y": 13
+ },
+ "id": 99,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "11.4.0-77765",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "(\n sum(rate(loki_index_gateway_prefilter_chunks_sum{cluster=\"$cluster\", job=\"$namespace/index-gateway\"}[$__rate_interval])) by (route)\n -\n sum(rate(loki_index_gateway_postfilter_chunks_sum{cluster=\"$cluster\", job=\"$namespace/index-gateway\"}[$__rate_interval])) by (route)\n)\n/\nsum(rate(loki_index_gateway_prefilter_chunks_sum{cluster=\"$cluster\", job=\"$namespace/index-gateway\"}[$__rate_interval])) by (route)",
+ "instant": false,
+ "legendFormat": "chunks {{ route}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Filter ratio - Index Gateway by route",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "scheme",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "area"
+ }
+ },
+ "mappings": [],
+ "max": 1,
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "#EAB839",
+ "value": 0.1
+ },
+ {
+ "color": "#EF843C",
+ "value": 0.25
+ },
+ {
+ "color": "red",
+ "value": 0.5
+ }
+ ]
+ },
+ "unit": "percentunit"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 12,
+ "x": 12,
+ "y": 13
+ },
+ "id": 100,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "11.4.0-77765",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "sum(rate(loki_bloom_gateway_querier_series_skipped_total{cluster=\"$cluster\", job=\"$namespace/index-gateway\"}[$__rate_interval]))\n/\nsum(rate(loki_bloom_gateway_querier_series_total{cluster=\"$cluster\", job=\"$namespace/index-gateway\"}[$__rate_interval]))",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "series",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "sum(rate(loki_bloom_gateway_querier_chunks_skipped_total{cluster=\"$cluster\", job=\"$namespace/index-gateway\"}[$__rate_interval]))\n/\nsum(rate(loki_bloom_gateway_querier_chunks_total{cluster=\"$cluster\", job=\"$namespace/index-gateway\"}[$__rate_interval]))",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "chunks",
+ "range": true,
+ "refId": "B"
+ }
+ ],
+ "title": "Data skipped because they don't match any bocks",
+ "type": "timeseries"
+ },
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 19
+ },
+ "id": 96,
+ "panels": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "line",
+ "fillOpacity": 50,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "percent"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "percentunit"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 447
+ },
+ "id": 97,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "11.4.0-77765",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "000000134"
+ },
+ "editorMode": "code",
+ "expr": "sum(rate(loki_bloom_recorder_chunks_total{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\", type=~\"(found|skipped|missed)\"}[$__rate_interval])) by (type)\n/ on () group_left\nsum(rate(loki_bloom_recorder_chunks_total{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\", type=\"requested\"}[$__rate_interval]))",
+ "instant": false,
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "000000134"
+ },
+ "editorMode": "code",
+ "expr": "sum(rate(loki_bloom_recorder_chunks_total{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\", type=\"filtered\"}[$__rate_interval])) by (type)\n/ on () group_left\nsum(rate(loki_bloom_recorder_chunks_total{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\", type=\"requested\"}[$__rate_interval]))",
+ "hide": true,
+ "instant": false,
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "B"
+ }
+ ],
+ "title": "Found/Skipped/Missing chunks",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "line",
+ "fillOpacity": 50,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "max": 1,
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "percentunit"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 12,
+ "y": 447
+ },
+ "id": 98,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "11.4.0-77765",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "000000134"
+ },
+ "editorMode": "code",
+ "expr": "sum(rate(loki_bloom_recorder_chunks_total{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\", type=\"filtered\"}[$__rate_interval])) by (type)\n/ on () group_left\nsum(rate(loki_bloom_recorder_chunks_total{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\", type=\"found\"}[$__rate_interval]))",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "B"
+ }
+ ],
+ "title": "Filtered chunks",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "line",
+ "fillOpacity": 50,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "percent"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "percentunit"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 454
+ },
+ "id": 107,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "11.4.0-77765",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "000000134"
+ },
+ "editorMode": "code",
+ "expr": "sum(rate(loki_bloom_recorder_series_total{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\", type=~\"(found|skipped|missed)\"}[$__rate_interval])) by (type)\n/ on () group_left\nsum(rate(loki_bloom_recorder_series_total{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\", type=\"requested\"}[$__rate_interval]))",
+ "instant": false,
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Found/Skipped/Missing series",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "line",
+ "fillOpacity": 50,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "max": 1,
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "percentunit"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 12,
+ "y": 454
+ },
+ "id": 108,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "11.4.0-77765",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "000000134"
+ },
+ "editorMode": "code",
+ "expr": "sum(rate(loki_bloom_recorder_series_total{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\", type=\"filtered\"}[$__rate_interval])) by (type)\n/ on () group_left\nsum(rate(loki_bloom_recorder_series_total{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\", type=\"found\"}[$__rate_interval]))",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "B"
+ }
+ ],
+ "title": "Filtered series",
+ "type": "timeseries"
+ }
+ ],
+ "title": "Bloom Recorder",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 20
+ },
+ "id": 95,
+ "panels": [
+ {
+ "datasource": {
+ "type": "loki",
+ "uid": "${loki_datasource}"
+ },
+ "fieldConfig": {
+ "defaults": {},
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 15,
+ "x": 0,
+ "y": 920
+ },
+ "id": 48,
+ "options": {
+ "dedupStrategy": "none",
+ "enableLogDetails": true,
+ "prettifyLogMessage": false,
+ "showCommonLabels": false,
+ "showLabels": false,
+ "showTime": false,
+ "sortOrder": "Descending",
+ "wrapLogMessage": true
+ },
+ "pluginVersion": "11.4.0-77765",
+ "targets": [
+ {
+ "datasource": {
+ "type": "loki",
+ "uid": "${loki_datasource}"
+ },
+ "editorMode": "code",
+ "expr": "{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} |= \"level=error\" or \"panic:\" | logfmt",
+ "queryType": "range",
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "loki",
+ "uid": "${loki_datasource}"
+ },
+ "editorMode": "code",
+ "expr": "{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} |= \"level=warn\" | logfmt",
+ "queryType": "range",
+ "refId": "B"
+ }
+ ],
+ "title": "Errors",
+ "type": "logs"
+ },
+ {
+ "datasource": {
+ "type": "loki",
+ "uid": "${loki_datasource}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "fixedColor": "red",
+ "mode": "fixed"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "bars",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "log": 2,
+ "type": "symlog"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 1
+ }
+ ]
+ }
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "warn"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "orange",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "error"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "red",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "panic"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "semi-dark-red",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 9,
+ "x": 15,
+ "y": 920
+ },
+ "id": 52,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "11.4.0-77765",
+ "targets": [
+ {
+ "datasource": {
+ "type": "loki",
+ "uid": "${loki_datasource}"
+ },
+ "editorMode": "code",
+ "expr": "sum by (level) (count_over_time({cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} |~ \"level=(warn|error)\" | logfmt [$__auto]))",
+ "queryType": "range",
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "loki",
+ "uid": "${loki_datasource}"
+ },
+ "editorMode": "code",
+ "expr": "sum (count_over_time({cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} |= \"panic:\" | logfmt [$__auto]))",
+ "queryType": "range",
+ "refId": "B"
+ }
+ ],
+ "title": "Errors Rate",
+ "type": "timeseries"
+ }
+ ],
+ "title": "Logs",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 21
+ },
"id": 56,
"panels": [
{
@@ -783,6 +1643,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -813,8 +1674,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -829,7 +1689,7 @@
"h": 14,
"w": 12,
"x": 0,
- "y": 17
+ "y": 1764
},
"id": 10,
"options": {
@@ -845,6 +1705,7 @@
"sort": "none"
}
},
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -905,6 +1766,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -935,8 +1797,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -952,7 +1813,7 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 17
+ "y": 1764
},
"id": 11,
"options": {
@@ -968,6 +1829,7 @@
"sort": "none"
}
},
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -1029,6 +1891,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -1059,8 +1922,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -1076,7 +1938,7 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 24
+ "y": 2140
},
"id": 81,
"options": {
@@ -1092,6 +1954,7 @@
"sort": "none"
}
},
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -1167,6 +2030,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
@@ -1200,8 +2064,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -1216,7 +2079,7 @@
"h": 7,
"w": 8,
"x": 0,
- "y": 31
+ "y": 2147
},
"id": 87,
"options": {
@@ -1232,6 +2095,7 @@
"sort": "none"
}
},
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -1268,6 +2132,109 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "s"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 8,
+ "x": 8,
+ "y": 2147
+ },
+ "id": 88,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "11.4.0-77663",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "sum by (pod) (rate(go_gc_duration_seconds_sum{container=\"bloom-gateway\"}[$__rate_interval]))\n/\nsum by (pod) (rate(go_gc_duration_seconds_count{container=\"bloom-gateway\"}[$__rate_interval]))",
+ "hide": false,
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "GC duration",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
@@ -1301,8 +2268,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -1317,10 +2283,10 @@
"gridPos": {
"h": 7,
"w": 8,
- "x": 8,
- "y": 31
+ "x": 16,
+ "y": 2147
},
- "id": 88,
+ "id": 89,
"options": {
"legend": {
"calcs": [],
@@ -1334,6 +2300,7 @@
"sort": "none"
}
},
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -1341,14 +2308,26 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by (pod) (rate(go_gc_duration_seconds_sum{container=\"bloom-gateway\"}[$__rate_interval]))\n/\nsum by (pod) (rate(go_gc_duration_seconds_count{container=\"bloom-gateway\"}[$__rate_interval]))",
+ "expr": "histogram_quantile(0.99, sum(rate(go_gc_pauses_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"}[$__rate_interval])) by (le))",
"hide": false,
"legendFormat": "__auto",
"range": true,
"refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.90, sum(rate(go_gc_pauses_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"}[$__rate_interval])) by (le))",
+ "hide": false,
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "B"
}
],
- "title": "GC duration",
+ "title": "GC pauses",
"type": "timeseries"
},
{
@@ -1369,6 +2348,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
@@ -1379,9 +2359,6 @@
},
"insertNulls": false,
"lineInterpolation": "linear",
- "lineStyle": {
- "fill": "solid"
- },
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
@@ -1402,8 +2379,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -1411,17 +2387,17 @@
}
]
},
- "unit": "s"
+ "unit": "binBps"
},
"overrides": []
},
"gridPos": {
"h": 7,
- "w": 8,
- "x": 16,
- "y": 31
+ "w": 12,
+ "x": 0,
+ "y": 2154
},
- "id": 89,
+ "id": 84,
"options": {
"legend": {
"calcs": [],
@@ -1435,6 +2411,7 @@
"sort": "none"
}
},
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -1442,30 +2419,21 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.99, sum(rate(go_gc_pauses_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"}[$__rate_interval])) by (le))",
- "hide": false,
- "legendFormat": "__auto",
- "range": true,
- "refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "histogram_quantile(0.90, sum(rate(go_gc_pauses_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"}[$__rate_interval])) by (le))",
+ "expr": "sum by(instance, pod) (rate(node_disk_read_bytes_total[$__rate_interval]))\n+ ignoring(pod) group_right() \n(count by(instance, pod) (container_fs_reads_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"bloom-gateway\", device!~\".*sda.*\"}) * 0)",
"hide": false,
- "legendFormat": "__auto",
+ "instant": false,
+ "interval": "",
+ "legendFormat": "{{pod}}",
"range": true,
- "refId": "B"
+ "refId": "D"
}
],
- "title": "GC pauses",
+ "title": "Disk reads",
"type": "timeseries"
},
{
"datasource": {
+ "default": false,
"type": "prometheus",
"uid": "${datasource}"
},
@@ -1482,6 +2450,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
@@ -1512,8 +2481,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -1528,10 +2496,10 @@
"gridPos": {
"h": 7,
"w": 12,
- "x": 0,
- "y": 38
+ "x": 12,
+ "y": 2154
},
- "id": 84,
+ "id": 85,
"options": {
"legend": {
"calcs": [],
@@ -1545,6 +2513,7 @@
"sort": "none"
}
},
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -1552,20 +2521,20 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by(instance, pod) (rate(node_disk_read_bytes_total[$__rate_interval]))\n+ ignoring(pod) group_right() \n(count by(instance, pod) (container_fs_reads_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"bloom-gateway\", device!~\".*sda.*\"}) * 0)",
+ "expr": "sum by(instance, pod) (rate(node_disk_written_bytes_total[$__rate_interval]))\n+ ignoring(pod) group_right() \n(count by(instance, pod) (container_fs_writes_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"bloom-gateway\", device!~\".*sda.*\"}) * 0)",
"hide": false,
"instant": false,
- "interval": "",
"legendFormat": "{{pod}}",
"range": true,
"refId": "D"
}
],
- "title": "Disk reads",
+ "title": "Disk writes",
"type": "timeseries"
},
{
"datasource": {
+ "default": false,
"type": "prometheus",
"uid": "${datasource}"
},
@@ -1582,6 +2551,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
@@ -1601,7 +2571,7 @@
"spanNulls": false,
"stacking": {
"group": "A",
- "mode": "none"
+ "mode": "normal"
},
"thresholdsStyle": {
"mode": "off"
@@ -1612,8 +2582,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -1627,11 +2596,11 @@
},
"gridPos": {
"h": 7,
- "w": 12,
- "x": 12,
- "y": 38
+ "w": 24,
+ "x": 0,
+ "y": 2161
},
- "id": 85,
+ "id": 102,
"options": {
"legend": {
"calcs": [],
@@ -1645,6 +2614,7 @@
"sort": "none"
}
},
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -1652,15 +2622,29 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by(instance, pod) (rate(node_disk_written_bytes_total[$__rate_interval]))\n+ ignoring(pod) group_right() \n(count by(instance, pod) (container_fs_writes_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"bloom-gateway\", device!~\".*sda.*\"}) * 0)",
+ "expr": "sum(sum by (instance) (rate(node_disk_read_bytes_total[$__rate_interval]))\n+ on(instance) group_right() \n(count by (instance) (container_fs_reads_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"bloom-gateway\", device!~\".*sda.*\"}) * 0))",
"hide": false,
"instant": false,
- "legendFormat": "{{pod}}",
+ "interval": "",
+ "legendFormat": "Reads",
"range": true,
"refId": "D"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "sum(sum by(instance) (rate(node_disk_written_bytes_total[$__rate_interval]))\n+ on(instance) group_right() \n(count by(instance) (container_fs_writes_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"bloom-gateway\", device!~\".*sda.*\"}) * 0)) * -1",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Writes",
+ "range": true,
+ "refId": "A"
}
],
- "title": "Disk writes",
+ "title": "Disk reads/writes",
"type": "timeseries"
}
],
@@ -1673,7 +2657,7 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 17
+ "y": 22
},
"id": 2,
"panels": [
@@ -1694,6 +2678,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 100,
"gradientMode": "none",
@@ -1724,8 +2709,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
}
]
}
@@ -1781,7 +2765,7 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 18
+ "y": 1175
},
"id": 13,
"options": {
@@ -1797,6 +2781,7 @@
"sort": "none"
}
},
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -1804,7 +2789,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by (status_code) (\n rate(loki_request_duration_seconds_count{cluster=~\"$cluster\",job=~\"($namespace)/bloom-gateway\", route=\"/logproto.BloomGateway/FilterChunkRefs\"}[$__rate_interval])\n)",
+ "expr": "sum by (status_code) (\n rate(loki_request_duration_seconds_count{cluster=\"$cluster\",job=\"$namespace/bloom-gateway\", route=\"/logproto.BloomGateway/FilterChunkRefs\"}[$__rate_interval])\n)",
"hide": false,
"instant": false,
"legendFormat": "__auto",
@@ -1832,6 +2817,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 20,
"gradientMode": "none",
@@ -1862,8 +2848,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
}
]
}
@@ -1919,7 +2904,7 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 18
+ "y": 1175
},
"id": 86,
"options": {
@@ -1935,6 +2920,7 @@
"sort": "none"
}
},
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -1955,6 +2941,7 @@
},
{
"datasource": {
+ "default": false,
"type": "prometheus",
"uid": "${datasource}"
},
@@ -1971,6 +2958,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
@@ -2001,8 +2989,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
}
]
},
@@ -2014,7 +3001,7 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 25
+ "y": 1249
},
"id": 14,
"options": {
@@ -2030,6 +3017,7 @@
"sort": "none"
}
},
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -2069,6 +3057,19 @@
"legendFormat": "{{ route }} 99th percentile",
"range": true,
"refId": "D"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(1, sum by (le,route) (cluster_job_route:loki_request_duration_seconds_bucket:sum_rate{cluster=~\"$cluster\", job=~\"($namespace)/bloom-gateway\", route=~\"/logproto.BloomGateway/FilterChunkRefs\"}))",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "{{ route }} max",
+ "range": true,
+ "refId": "A"
}
],
"title": "Latency",
@@ -2091,6 +3092,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
@@ -2121,8 +3123,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
}
]
},
@@ -2134,7 +3135,7 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 25
+ "y": 1249
},
"id": 15,
"options": {
@@ -2150,6 +3151,7 @@
"sort": "none"
}
},
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -2178,7 +3180,7 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 18
+ "y": 23
},
"id": 58,
"panels": [
@@ -2200,6 +3202,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 25,
"gradientMode": "none",
@@ -2230,7 +3233,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
}
]
},
@@ -2242,7 +3246,7 @@
"h": 7,
"w": 8,
"x": 0,
- "y": 11
+ "y": 1176
},
"id": 16,
"options": {
@@ -2258,33 +3262,8 @@
"sort": "none"
}
},
+ "pluginVersion": "11.4.0-77663",
"targets": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "sum by (pod) (loki_bloom_gateway_queue_duration_seconds_sum{cluster=\"$cluster\", namespace=\"$namespace\"})\n/\nsum by (pod) (loki_bloom_gateway_queue_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\"})\n",
- "hide": true,
- "instant": false,
- "legendFormat": "__auto",
- "range": true,
- "refId": "C"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "sum (loki_bloom_gateway_queue_length{cluster=\"$cluster\", namespace=\"$namespace\"})",
- "hide": true,
- "instant": false,
- "legendFormat": "Total",
- "range": true,
- "refId": "D"
- },
{
"datasource": {
"type": "prometheus",
@@ -2320,6 +3299,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -2350,7 +3330,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
}
]
},
@@ -2362,7 +3343,7 @@
"h": 7,
"w": 8,
"x": 8,
- "y": 11
+ "y": 1176
},
"id": 17,
"options": {
@@ -2378,6 +3359,7 @@
"sort": "none"
}
},
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -2392,19 +3374,6 @@
"range": true,
"refId": "E"
},
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "histogram_quantile(0.90, sum by (le) (rate(loki_bloom_gateway_queue_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])))",
- "hide": true,
- "instant": false,
- "legendFormat": "p90",
- "range": true,
- "refId": "A"
- },
{
"datasource": {
"type": "prometheus",
@@ -2453,6 +3422,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -2483,7 +3453,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
}
]
},
@@ -2495,7 +3466,7 @@
"h": 7,
"w": 8,
"x": 16,
- "y": 11
+ "y": 1176
},
"id": 22,
"options": {
@@ -2511,6 +3482,7 @@
"sort": "none"
}
},
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -2539,7 +3511,7 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 19
+ "y": 24
},
"id": 68,
"panels": [
@@ -2561,6 +3533,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -2607,7 +3580,7 @@
"h": 8,
"w": 8,
"x": 0,
- "y": 12
+ "y": 1177
},
"id": 69,
"options": {
@@ -2623,6 +3596,7 @@
"sort": "none"
}
},
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -2684,6 +3658,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -2730,7 +3705,7 @@
"h": 8,
"w": 8,
"x": 8,
- "y": 12
+ "y": 1177
},
"id": 70,
"options": {
@@ -2746,6 +3721,7 @@
"sort": "none"
}
},
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -2807,6 +3783,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -2852,7 +3829,7 @@
"h": 8,
"w": 8,
"x": 16,
- "y": 12
+ "y": 1177
},
"id": 71,
"options": {
@@ -2868,6 +3845,7 @@
"sort": "none"
}
},
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -2887,15 +3865,114 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by (status) (rate(loki_bloom_gateway_tasks_processed_total{cluster=\"$cluster\",namespace=\"$namespace\",container=\"bloom-gateway\"}[$__rate_interval]))",
- "hide": false,
+ "expr": "sum by (status) (rate(loki_bloom_gateway_tasks_processed_total{cluster=\"$cluster\",namespace=\"$namespace\",container=\"bloom-gateway\"}[$__rate_interval]))",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "processed {{status}}",
+ "range": true,
+ "refId": "B"
+ }
+ ],
+ "title": "Tasks dequeued/processed",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "ops"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 8,
+ "x": 0,
+ "y": 1214
+ },
+ "id": 105,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "11.4.0-77663",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "sum(rate(loki_bloom_gateway_process_duration_seconds_count{cluster=\"$cluster\",namespace=\"$namespace\",container=\"bloom-gateway\"}[$__rate_interval])) by (status)",
"instant": false,
- "legendFormat": "processed {{status}}",
+ "legendFormat": "{{status}}",
"range": true,
- "refId": "B"
+ "refId": "A"
}
],
- "title": "Tasks dequeued/processed",
+ "title": "Worker Iterations per second",
"type": "timeseries"
}
],
@@ -2908,21 +3985,21 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 20
+ "y": 25
},
"id": 59,
"panels": [
{
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
"description": "",
+ "fieldConfig": {
+ "defaults": {},
+ "overrides": []
+ },
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
- "y": 13
+ "y": 1178
},
"id": 19,
"options": {
@@ -2934,7 +4011,7 @@
"content": "",
"mode": "markdown"
},
- "pluginVersion": "11.1.0-70005",
+ "pluginVersion": "11.4.0-77663",
"title": "We cache bloom blocks in memory to prevent the gateway from hitting the object store too often",
"transparent": true,
"type": "text"
@@ -2957,6 +4034,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -3003,7 +4081,7 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 14
+ "y": 1179
},
"id": 20,
"options": {
@@ -3019,6 +4097,7 @@
"sort": "none"
}
},
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -3068,6 +4147,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -3160,7 +4240,7 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 14
+ "y": 1179
},
"id": 83,
"options": {
@@ -3176,6 +4256,7 @@
"sort": "none"
}
},
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -3251,6 +4332,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 100,
"gradientMode": "none",
@@ -3329,7 +4411,7 @@
"h": 7,
"w": 24,
"x": 0,
- "y": 21
+ "y": 1186
},
"id": 92,
"options": {
@@ -3345,6 +4427,7 @@
"sort": "none"
}
},
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -3352,7 +4435,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by (status) (\n rate(loki_bloom_blocks_cache_fetched_total{container=\"bloom-gateway\"}[$__rate_interval])\n)\n/ ignoring(status) group_left\nsum (\n rate(loki_bloom_blocks_cache_fetched_total{container=\"bloom-gateway\"}[$__rate_interval])\n)",
+ "expr": "sum by (status) (\n rate(loki_bloom_blocks_cache_fetched_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"}[$__rate_interval])\n)\n/ ignoring(status) group_left\nsum (\n rate(loki_bloom_blocks_cache_fetched_total{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"}[$__rate_interval])\n)",
"hide": false,
"instant": false,
"legendFormat": "__auto",
@@ -3381,6 +4464,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -3440,7 +4524,7 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 28
+ "y": 1193
},
"id": 76,
"options": {
@@ -3456,6 +4540,7 @@
"sort": "none"
}
},
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -3504,6 +4589,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -3554,257 +4640,18 @@
{
"id": "unit",
"value": "bytes"
- }
- ]
- }
- ]
- },
- "gridPos": {
- "h": 7,
- "w": 12,
- "x": 12,
- "y": 28
- },
- "id": 21,
- "options": {
- "legend": {
- "calcs": [],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "maxHeight": 600,
- "mode": "single",
- "sort": "none"
- }
- },
- "targets": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "sum(rate(loki_bloom_store_metas_fetched_sum{cluster=\"$cluster\",namespace=\"$namespace\", container=\"bloom-gateway\"}[$__rate_interval]))",
- "hide": false,
- "instant": false,
- "legendFormat": "metas fetch rate",
- "range": true,
- "refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "sum(rate(loki_bloom_store_blocks_fetched_sum{cluster=\"$cluster\",namespace=\"$namespace\", container=\"bloom-gateway\"}[$__rate_interval]))",
- "hide": false,
- "instant": false,
- "legendFormat": "blocks fetch rate",
- "range": true,
- "refId": "B"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "histogram_quantile(0.9, sum(rate(loki_bloom_store_blocks_fetched_size_bytes_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (le))",
- "hide": false,
- "instant": false,
- "legendFormat": "p90 blocks size",
- "range": true,
- "refId": "C"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "histogram_quantile(0.9, sum(rate(loki_bloom_store_metas_fetched_size_bytes_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (le))",
- "hide": false,
- "instant": false,
- "legendFormat": "p90 metas size",
- "range": true,
- "refId": "D"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "histogram_quantile(1.0, sum(rate(loki_bloom_store_metas_fetched_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (le))",
- "hide": true,
- "instant": false,
- "legendFormat": "__auto",
- "range": true,
- "refId": "E"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "histogram_quantile(0.95, sum(rate(loki_bloom_store_metas_fetched_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (le))",
- "hide": true,
- "instant": false,
- "legendFormat": "__auto",
- "range": true,
- "refId": "F"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "histogram_quantile(0.5, sum(rate(loki_bloom_store_metas_fetched_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (le))",
- "hide": true,
- "instant": false,
- "legendFormat": "__auto",
- "range": true,
- "refId": "G"
- }
- ],
- "title": "Bloom Store",
- "type": "timeseries"
- }
- ],
- "title": "Blocks Cache",
- "type": "row"
- },
- {
- "collapsed": true,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 21
- },
- "id": 60,
- "panels": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "description": "",
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 14
- },
- "id": 61,
- "options": {
- "code": {
- "language": "plaintext",
- "showLineNumbers": false,
- "showMiniMap": false
- },
- "content": "",
- "mode": "markdown"
- },
- "pluginVersion": "11.1.0-70005",
- "title": "The gateway download bloom meta files and blocks from the object store.",
- "transparent": true,
- "type": "text"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "description": "",
- "gridPos": {
- "h": 7,
- "w": 2,
- "x": 0,
- "y": 15
- },
- "id": 24,
- "options": {
- "code": {
- "language": "plaintext",
- "showLineNumbers": false,
- "showMiniMap": false
- },
- "content": "---\n#### GCS\n",
- "mode": "markdown"
- },
- "pluginVersion": "11.1.0-70005",
- "transparent": true,
- "type": "text"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "description": "",
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 25,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "normal"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green"
- }
- ]
- },
- "unit": "none"
- },
- "overrides": []
+ }
+ ]
+ }
+ ]
},
"gridPos": {
"h": 7,
- "w": 11,
- "x": 2,
- "y": 15
+ "w": 12,
+ "x": 12,
+ "y": 1193
},
- "id": 25,
+ "id": 21,
"options": {
"legend": {
"calcs": [],
@@ -3818,6 +4665,7 @@
"sort": "none"
}
},
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -3825,14 +4673,93 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by (status_code, operation) (rate(loki_gcs_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} [$__rate_interval]))",
+ "expr": "sum(rate(loki_bloom_store_metas_fetched_sum{cluster=\"$cluster\",namespace=\"$namespace\", container=\"bloom-gateway\"}[$__rate_interval]))",
+ "hide": false,
"instant": false,
- "legendFormat": "{{operation}} {{status_code}}",
+ "legendFormat": "metas fetch rate",
"range": true,
"refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "sum(rate(loki_bloom_store_blocks_fetched_sum{cluster=\"$cluster\",namespace=\"$namespace\", container=\"bloom-gateway\"}[$__rate_interval]))",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "blocks fetch rate",
+ "range": true,
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.9, sum(rate(loki_bloom_store_blocks_fetched_size_bytes_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (le))",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "p90 blocks size",
+ "range": true,
+ "refId": "C"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.9, sum(rate(loki_bloom_store_metas_fetched_size_bytes_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (le))",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "p90 metas size",
+ "range": true,
+ "refId": "D"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(1.0, sum(rate(loki_bloom_store_metas_fetched_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (le))",
+ "hide": true,
+ "instant": false,
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "E"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.95, sum(rate(loki_bloom_store_metas_fetched_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (le))",
+ "hide": true,
+ "instant": false,
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "F"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.5, sum(rate(loki_bloom_store_metas_fetched_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])) by (le))",
+ "hide": true,
+ "instant": false,
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "G"
}
],
- "title": "QPS",
+ "title": "Bloom Store",
"type": "timeseries"
},
{
@@ -3853,6 +4780,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -3863,6 +4791,9 @@
},
"insertNulls": false,
"lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
@@ -3878,6 +4809,7 @@
"mode": "off"
}
},
+ "fieldMinMax": false,
"mappings": [],
"thresholds": {
"mode": "absolute",
@@ -3887,17 +4819,30 @@
}
]
},
- "unit": "none"
+ "unit": "short"
},
- "overrides": []
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byRegexp",
+ "options": "/Size (.*)/"
+ },
+ "properties": [
+ {
+ "id": "unit",
+ "value": "bytes"
+ }
+ ]
+ }
+ ]
},
"gridPos": {
"h": 7,
- "w": 11,
- "x": 13,
- "y": 15
+ "w": 12,
+ "x": 0,
+ "y": 1200
},
- "id": 29,
+ "id": 101,
"options": {
"legend": {
"calcs": [],
@@ -3911,6 +4856,7 @@
"sort": "none"
}
},
+ "pluginVersion": "11.4.0-77663",
"targets": [
{
"datasource": {
@@ -3918,65 +4864,83 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.99, sum by (operation, le) (rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} [$__rate_interval])))",
- "instant": false,
- "legendFormat": "{{operation}} p99",
- "range": true,
- "refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "histogram_quantile(0.90, sum by (operation, le) (rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} [$__rate_interval])))",
+ "expr": "sum by (job)(rate(loki_bloom_store_download_queue_size_sum{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"}[$__rate_interval]))",
"hide": false,
"instant": false,
- "legendFormat": "{{operation}} p90",
+ "interval": "",
+ "legendFormat": "Size",
"range": true,
"refId": "B"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "histogram_quantile(0.50, sum by (operation, le) (rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} [$__rate_interval])))",
- "hide": false,
- "instant": false,
- "legendFormat": "{{operation}} p50",
- "range": true,
- "refId": "C"
}
],
- "title": "Latency",
+ "title": "Block download queue size",
"type": "timeseries"
- },
+ }
+ ],
+ "title": "Blocks Cache",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 26
+ },
+ "id": 60,
+ "panels": [
{
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
+ "description": "",
+ "fieldConfig": {
+ "defaults": {},
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 1013
+ },
+ "id": 61,
+ "options": {
+ "code": {
+ "language": "plaintext",
+ "showLineNumbers": false,
+ "showMiniMap": false
+ },
+ "content": "",
+ "mode": "markdown"
},
+ "pluginVersion": "11.4.0-77765",
+ "title": "The gateway download bloom meta files and blocks from the object store.",
+ "transparent": true,
+ "type": "text"
+ },
+ {
"description": "",
+ "fieldConfig": {
+ "defaults": {},
+ "overrides": []
+ },
"gridPos": {
"h": 7,
"w": 2,
"x": 0,
- "y": 22
+ "y": 1014
},
- "id": 62,
+ "id": 24,
"options": {
"code": {
"language": "plaintext",
"showLineNumbers": false,
"showMiniMap": false
},
- "content": "---\n#### S3\n",
+ "content": "---\n#### GCS\n",
"mode": "markdown"
},
- "pluginVersion": "11.1.0-70005",
+ "pluginVersion": "11.4.0-77765",
+ "title": "",
"transparent": true,
"type": "text"
},
@@ -3998,6 +4962,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 25,
"gradientMode": "none",
@@ -4028,7 +4993,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
}
]
},
@@ -4040,9 +5006,9 @@
"h": 7,
"w": 11,
"x": 2,
- "y": 22
+ "y": 1014
},
- "id": 63,
+ "id": 25,
"options": {
"legend": {
"calcs": [],
@@ -4056,6 +5022,7 @@
"sort": "none"
}
},
+ "pluginVersion": "11.4.0-77765",
"targets": [
{
"datasource": {
@@ -4063,7 +5030,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by (status_code, operation) (rate(loki_s3_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} [$__rate_interval]))",
+ "expr": "sum by (status_code, operation) (rate(loki_gcs_request_duration_seconds_count{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"} [$__rate_interval]))",
"instant": false,
"legendFormat": "{{operation}} {{status_code}}",
"range": true,
@@ -4091,8 +5058,9 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
- "fillOpacity": 25,
+ "fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -4110,7 +5078,7 @@
"spanNulls": false,
"stacking": {
"group": "A",
- "mode": "normal"
+ "mode": "none"
},
"thresholdsStyle": {
"mode": "off"
@@ -4121,7 +5089,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
}
]
},
@@ -4133,9 +5102,9 @@
"h": 7,
"w": 11,
"x": 13,
- "y": 22
+ "y": 1014
},
- "id": 64,
+ "id": 29,
"options": {
"legend": {
"calcs": [],
@@ -4149,6 +5118,7 @@
"sort": "none"
}
},
+ "pluginVersion": "11.4.0-77765",
"targets": [
{
"datasource": {
@@ -4156,7 +5126,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.99, sum by (operation, le) (rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} [$__rate_interval])))",
+ "expr": "histogram_quantile(0.99, sum by (operation, le) (rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"} [$__rate_interval])))",
"instant": false,
"legendFormat": "{{operation}} p99",
"range": true,
@@ -4168,7 +5138,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.90, sum by (operation, le) (rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} [$__rate_interval])))",
+ "expr": "histogram_quantile(0.90, sum by (operation, le) (rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"} [$__rate_interval])))",
"hide": false,
"instant": false,
"legendFormat": "{{operation}} p90",
@@ -4181,7 +5151,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.50, sum by (operation, le) (rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} [$__rate_interval])))",
+ "expr": "histogram_quantile(0.50, sum by (operation, le) (rate(loki_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"} [$__rate_interval])))",
"hide": false,
"instant": false,
"legendFormat": "{{operation}} p50",
@@ -4193,28 +5163,29 @@
"type": "timeseries"
},
{
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
"description": "",
+ "fieldConfig": {
+ "defaults": {},
+ "overrides": []
+ },
"gridPos": {
"h": 7,
"w": 2,
"x": 0,
- "y": 29
+ "y": 1021
},
- "id": 65,
+ "id": 62,
"options": {
"code": {
"language": "plaintext",
"showLineNumbers": false,
"showMiniMap": false
},
- "content": "---\n#### Azure\nBlob Storage\n\n",
+ "content": "---\n#### S3\n",
"mode": "markdown"
},
- "pluginVersion": "11.1.0-70005",
+ "pluginVersion": "11.4.0-77765",
+ "title": "",
"transparent": true,
"type": "text"
},
@@ -4236,6 +5207,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 25,
"gradientMode": "none",
@@ -4266,7 +5238,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
}
]
},
@@ -4278,9 +5251,9 @@
"h": 7,
"w": 11,
"x": 2,
- "y": 29
+ "y": 1021
},
- "id": 66,
+ "id": 63,
"options": {
"legend": {
"calcs": [],
@@ -4294,6 +5267,7 @@
"sort": "none"
}
},
+ "pluginVersion": "11.4.0-77765",
"targets": [
{
"datasource": {
@@ -4301,7 +5275,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum by (status_code, operation) (rate(loki_azure_blob_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} [$__rate_interval]))",
+ "expr": "sum by (status_code, operation) (rate(loki_s3_request_duration_seconds_count{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"} [$__rate_interval]))",
"instant": false,
"legendFormat": "{{operation}} {{status_code}}",
"range": true,
@@ -4329,6 +5303,7 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 25,
"gradientMode": "none",
@@ -4359,7 +5334,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
}
]
},
@@ -4371,9 +5347,9 @@
"h": 7,
"w": 11,
"x": 13,
- "y": 29
+ "y": 1021
},
- "id": 67,
+ "id": 64,
"options": {
"legend": {
"calcs": [],
@@ -4387,6 +5363,7 @@
"sort": "none"
}
},
+ "pluginVersion": "11.4.0-77765",
"targets": [
{
"datasource": {
@@ -4394,7 +5371,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.99, sum by (operation, le) (rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} [$__rate_interval])))",
+ "expr": "histogram_quantile(0.99, sum by (operation, le) (rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"} [$__rate_interval])))",
"instant": false,
"legendFormat": "{{operation}} p99",
"range": true,
@@ -4406,7 +5383,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.90, sum by (operation, le) (rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} [$__rate_interval])))",
+ "expr": "histogram_quantile(0.90, sum by (operation, le) (rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"} [$__rate_interval])))",
"hide": false,
"instant": false,
"legendFormat": "{{operation}} p90",
@@ -4419,7 +5396,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.50, sum by (operation, le) (rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} [$__rate_interval])))",
+ "expr": "histogram_quantile(0.50, sum by (operation, le) (rate(loki_s3_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"} [$__rate_interval])))",
"hide": false,
"instant": false,
"legendFormat": "{{operation}} p50",
@@ -4429,25 +5406,38 @@
],
"title": "Latency",
"type": "timeseries"
- }
- ],
- "title": "Object Store",
- "type": "row"
- },
- {
- "collapsed": true,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 22
- },
- "id": 77,
- "panels": [
+ },
+ {
+ "description": "",
+ "fieldConfig": {
+ "defaults": {},
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 2,
+ "x": 0,
+ "y": 1028
+ },
+ "id": 65,
+ "options": {
+ "code": {
+ "language": "plaintext",
+ "showLineNumbers": false,
+ "showMiniMap": false
+ },
+ "content": "---\n#### Azure\nBlob Storage\n\n",
+ "mode": "markdown"
+ },
+ "pluginVersion": "11.4.0-77765",
+ "title": "",
+ "transparent": true,
+ "type": "text"
+ },
{
"datasource": {
- "type": "loki",
- "uid": "${loki_datasource}"
+ "type": "prometheus",
+ "uid": "${datasource}"
},
"description": "",
"fieldConfig": {
@@ -4462,8 +5452,9 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
- "fillOpacity": 10,
+ "fillOpacity": 25,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -4472,9 +5463,6 @@
},
"insertNulls": false,
"lineInterpolation": "linear",
- "lineStyle": {
- "fill": "solid"
- },
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
@@ -4484,7 +5472,7 @@
"spanNulls": false,
"stacking": {
"group": "A",
- "mode": "none"
+ "mode": "normal"
},
"thresholdsStyle": {
"mode": "off"
@@ -4495,24 +5483,22 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
- },
- {
- "color": "red",
- "value": 80
+ "color": "green",
+ "value": null
}
]
- }
+ },
+ "unit": "none"
},
"overrides": []
},
"gridPos": {
- "h": 8,
- "w": 12,
- "x": 0,
- "y": 23
+ "h": 7,
+ "w": 11,
+ "x": 2,
+ "y": 1028
},
- "id": 78,
+ "id": 66,
"options": {
"legend": {
"calcs": [],
@@ -4526,26 +5512,28 @@
"sort": "none"
}
},
+ "pluginVersion": "11.4.0-77765",
"targets": [
{
"datasource": {
- "type": "loki",
- "uid": "${loki_datasource}"
+ "type": "prometheus",
+ "uid": "${datasource}"
},
"editorMode": "code",
- "expr": "topk(3, sum by (tasks) (count_over_time({namespace=\"loki-dev-006\", container=\"bloom-gateway\"} |= \"process tasks with bounds\" | logfmt [5s])))",
- "legendFormat": "{{tasks}}",
- "queryType": "range",
+ "expr": "sum by (status_code, operation) (rate(loki_azure_blob_request_duration_seconds_count{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"} [$__rate_interval]))",
+ "instant": false,
+ "legendFormat": "{{operation}} {{status_code}}",
+ "range": true,
"refId": "A"
}
],
- "title": "Process tasks with bounds",
+ "title": "QPS",
"type": "timeseries"
},
{
"datasource": {
- "type": "loki",
- "uid": "${loki_datasource}"
+ "type": "prometheus",
+ "uid": "${datasource}"
},
"description": "",
"fieldConfig": {
@@ -4560,8 +5548,9 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
- "fillOpacity": 10,
+ "fillOpacity": 25,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -4570,9 +5559,6 @@
},
"insertNulls": false,
"lineInterpolation": "linear",
- "lineStyle": {
- "fill": "solid"
- },
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
@@ -4582,7 +5568,7 @@
"spanNulls": false,
"stacking": {
"group": "A",
- "mode": "none"
+ "mode": "normal"
},
"thresholdsStyle": {
"mode": "off"
@@ -4593,50 +5579,22 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
- },
- {
- "color": "red",
- "value": 80
+ "color": "green",
+ "value": null
}
]
- }
+ },
+ "unit": "none"
},
- "overrides": [
- {
- "__systemRef": "hideSeriesFrom",
- "matcher": {
- "id": "byNames",
- "options": {
- "mode": "exclude",
- "names": [
- "max",
- "avg"
- ],
- "prefix": "All except:",
- "readOnly": true
- }
- },
- "properties": [
- {
- "id": "custom.hideFrom",
- "value": {
- "legend": false,
- "tooltip": false,
- "viz": true
- }
- }
- ]
- }
- ]
+ "overrides": []
},
"gridPos": {
- "h": 8,
- "w": 12,
- "x": 12,
- "y": 23
+ "h": 7,
+ "w": 11,
+ "x": 13,
+ "y": 1028
},
- "id": 79,
+ "id": 67,
"options": {
"legend": {
"calcs": [],
@@ -4650,91 +5608,80 @@
"sort": "none"
}
},
+ "pluginVersion": "11.4.0-77765",
"targets": [
{
"datasource": {
- "type": "loki",
- "uid": "${loki_datasource}"
+ "type": "prometheus",
+ "uid": "${datasource}"
},
"editorMode": "code",
- "expr": "max(max_over_time({cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} |= \"fetch blocks\" | logfmt | unwrap duration(duration) [$__auto]))",
- "hide": false,
- "legendFormat": "max",
- "queryType": "range",
+ "expr": "histogram_quantile(0.99, sum by (operation, le) (rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"} [$__rate_interval])))",
+ "instant": false,
+ "legendFormat": "{{operation}} p99",
+ "range": true,
"refId": "A"
},
{
"datasource": {
- "type": "loki",
- "uid": "${loki_datasource}"
+ "type": "prometheus",
+ "uid": "${datasource}"
},
"editorMode": "code",
- "expr": "avg(avg_over_time({cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} |= \"fetch blocks\" | logfmt | unwrap duration(duration) [$__auto]))",
+ "expr": "histogram_quantile(0.90, sum by (operation, le) (rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"} [$__rate_interval])))",
"hide": false,
- "legendFormat": "avg",
- "queryType": "range",
+ "instant": false,
+ "legendFormat": "{{operation}} p90",
+ "range": true,
"refId": "B"
},
{
"datasource": {
- "type": "loki",
- "uid": "${loki_datasource}"
+ "type": "prometheus",
+ "uid": "${datasource}"
},
"editorMode": "code",
- "expr": "avg(avg_over_time({cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"} |= \"request unavailable blocks in the background\" | logfmt | missing > 0 | unwrap missing [$__auto]))",
+ "expr": "histogram_quantile(0.50, sum by (operation, le) (rate(loki_azure_blob_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"} [$__rate_interval])))",
"hide": false,
- "legendFormat": "avg missing",
- "queryType": "range",
+ "instant": false,
+ "legendFormat": "{{operation}} p50",
+ "range": true,
"refId": "C"
}
],
- "title": "Download enqueue duration",
+ "title": "Latency",
"type": "timeseries"
- },
+ }
+ ],
+ "title": "Object Store",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 27
+ },
+ "id": 77,
+ "panels": [
{
"datasource": {
- "type": "loki",
- "uid": "${loki_datasource}"
+ "type": "prometheus",
+ "uid": "${datasource}"
},
"fieldConfig": {
"defaults": {
- "color": {
- "fixedColor": "green",
- "mode": "palette-classic"
- },
"custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "fillOpacity": 80,
- "gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
- "lineWidth": 1,
"scaleDistribution": {
"type": "linear"
- },
- "thresholdsStyle": {
- "mode": "off"
}
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green"
- },
- {
- "color": "red",
- "value": 100
- }
- ]
}
},
"overrides": []
@@ -4743,143 +5690,125 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 31
+ "y": 1044
},
"id": 80,
"options": {
- "barRadius": 0,
- "barWidth": 0.97,
- "fullHighlight": false,
- "groupWidth": 0.7,
+ "calculate": false,
+ "cellGap": 1,
+ "color": {
+ "exponent": 0.5,
+ "fill": "dark-orange",
+ "mode": "scheme",
+ "reverse": false,
+ "scale": "exponential",
+ "scheme": "RdYlGn",
+ "steps": 64
+ },
+ "exemplars": {
+ "color": "rgba(255,0,255,0.7)"
+ },
+ "filterValues": {
+ "le": 1e-9
+ },
"legend": {
- "calcs": [],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
+ "show": true
+ },
+ "rowsFrame": {
+ "layout": "auto"
},
- "orientation": "horizontal",
- "showValue": "auto",
- "stacking": "none",
"tooltip": {
- "maxHeight": 600,
"mode": "single",
- "sort": "none"
+ "showColorScale": false,
+ "yHistogram": false
},
- "xTickLabelRotation": 0,
- "xTickLabelSpacing": 0
+ "yAxis": {
+ "axisPlacement": "left",
+ "reverse": false
+ }
},
- "pluginVersion": "11.0.0-67814",
+ "pluginVersion": "11.4.0-77765",
"targets": [
{
"datasource": {
- "type": "loki",
- "uid": "${loki_datasource}"
+ "type": "prometheus",
+ "uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sort_desc(topk(10, sum by (tasks) (count_over_time({namespace=\"loki-dev-006\", container=\"bloom-gateway\"} |= \"process tasks with bounds\" | logfmt [$__auto]))))",
- "legendFormat": "",
- "queryType": "instant",
+ "exemplar": false,
+ "expr": "increase(loki_bloom_gateway_dequeue_duration_seconds_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"}[$__rate_interval])",
+ "format": "heatmap",
+ "instant": false,
+ "legendFormat": "__auto",
+ "range": true,
"refId": "A"
}
],
- "title": "Tasks multiplexed",
- "type": "barchart"
+ "title": "Dequeue duration",
+ "type": "heatmap"
},
{
"datasource": {
"type": "prometheus",
"uid": "${datasource}"
},
- "description": "",
"fieldConfig": {
"defaults": {
- "color": {
- "mode": "palette-classic"
- },
"custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 10,
- "gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineStyle": {
- "fill": "solid"
- },
- "lineWidth": 1,
- "pointSize": 5,
"scaleDistribution": {
"type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
}
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green"
- },
- {
- "color": "red",
- "value": 80
- }
- ]
}
},
- "overrides": [
- {
- "matcher": {
- "id": "byName",
- "options": "Enqueue latency"
- },
- "properties": [
- {
- "id": "unit",
- "value": "s"
- }
- ]
- }
- ]
+ "overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
- "y": 31
+ "y": 1044
},
- "id": 82,
+ "id": 106,
"options": {
+ "calculate": false,
+ "cellGap": 1,
+ "color": {
+ "exponent": 0.5,
+ "fill": "dark-orange",
+ "mode": "scheme",
+ "reverse": false,
+ "scale": "exponential",
+ "scheme": "RdYlGn",
+ "steps": 64
+ },
+ "exemplars": {
+ "color": "rgba(255,0,255,0.7)"
+ },
+ "filterValues": {
+ "le": 1e-9
+ },
"legend": {
- "calcs": [],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
+ "show": true
+ },
+ "rowsFrame": {
+ "layout": "auto"
},
"tooltip": {
- "maxHeight": 600,
"mode": "single",
- "sort": "none"
+ "showColorScale": false,
+ "yHistogram": false
+ },
+ "yAxis": {
+ "axisPlacement": "left",
+ "reverse": false
}
},
+ "pluginVersion": "11.4.0-77765",
"targets": [
{
"datasource": {
@@ -4887,45 +5816,35 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.99, sum(rate(loki_bloom_store_download_queue_enqueue_time_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"}[$__rate_interval])) by (le))",
- "hide": false,
- "legendFormat": "Enqueue latency",
- "range": true,
- "refId": "A"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "editorMode": "code",
- "expr": "histogram_quantile(0.99, sum(rate(loki_bloom_store_download_queue_size_bucket{cluster=\"$cluster\", namespace=\"$namespace\", container=\"bloom-gateway\"}[$__rate_interval])) by (le))",
+ "expr": "increase(loki_bloom_gateway_tasks_dequeued_bucket{cluster=\"$cluster\", job=\"$namespace/bloom-gateway\"}[$__rate_interval])",
+ "format": "heatmap",
"hide": false,
- "legendFormat": "Size",
+ "instant": false,
+ "legendFormat": "__auto",
"range": true,
"refId": "B"
}
],
- "title": "Block download queue",
- "type": "timeseries"
+ "title": "Dequeue count",
+ "type": "heatmap"
}
],
"title": "Misc",
"type": "row"
}
],
- "refresh": "10s",
- "schemaVersion": 39,
+ "preload": false,
+ "refresh": "",
+ "schemaVersion": 40,
"tags": [],
"time": {
- "from": "now-6h",
+ "from": "now-1h",
"to": "now"
},
- "timeRangeUpdatedDuringEditOrView": false,
"timepicker": {},
"timezone": "utc",
- "title": "Bloom-Gateway",
+ "title": "Bloom Gateway",
"uid": "c495441a-1639-4ee2-9a32-42488dc5f81d",
- "version": 208,
+ "version": 272,
"weekStart": ""
-}
\ No newline at end of file
+}
diff --git a/production/loki-mixin/dashboards/loki-bloom-compactor.libsonnet b/production/loki-mixin/dashboards/loki-bloom-build.libsonnet
similarity index 77%
rename from production/loki-mixin/dashboards/loki-bloom-compactor.libsonnet
rename to production/loki-mixin/dashboards/loki-bloom-build.libsonnet
index fc78becb27133..5405f6ef1b602 100644
--- a/production/loki-mixin/dashboards/loki-bloom-compactor.libsonnet
+++ b/production/loki-mixin/dashboards/loki-bloom-build.libsonnet
@@ -1,13 +1,26 @@
-local raw = (import './dashboard-bloom-compactor.json');
+local raw = (import './dashboard-bloom-build.json');
+local template = import 'grafonnet/template.libsonnet';
// !--- HOW TO UPDATE THIS DASHBOARD ---!
// 1. Export the dashboard from Grafana as JSON
// !NOTE: Make sure you collapse all rows but the (first) Overview row.
-// 2. Copy the JSON into `dashboard-bloom-compactor.json`
+// 2. Copy the JSON into `dashboard-bloom-build.json`
// 3. Delete the `id` and `templating` fields from the JSON
(import 'dashboard-utils.libsonnet') {
+
+ local tenantTemplate =
+ template.new(
+ 'tenant',
+ '$datasource',
+ 'label_values(loki_bloomplanner_tenant_tasks_planned{cluster="$cluster", namespace="$namespace"}, tenant)',
+ label='Tenant',
+ sort=3, // numerical ascending
+ includeAll=true,
+ allValues='.+',
+ ),
+
grafanaDashboards+:: if !$._config.blooms.enabled then {} else {
- 'loki-bloom-compactor.json':
+ 'loki-bloom-build.json':
raw
{
local replaceClusterMatchers(expr) =
@@ -62,10 +75,13 @@ local raw = (import './dashboard-bloom-compactor.json');
for p in super.panels
],
}
- + $.dashboard('Loki / Bloom Compactor', uid='bloom-compactor')
+ + $.dashboard('Loki / Bloom Build', uid='bloom-build')
.addCluster()
.addNamespace()
.addLog()
- .addTag(),
+ .addTag()
+ + {
+ templating+: { list+: [tenantTemplate] },
+ },
},
}
diff --git a/production/loki-mixin/dashboards/loki-retention.libsonnet b/production/loki-mixin/dashboards/loki-retention.libsonnet
index 2a1c4777a2930..faee87a52ccc6 100644
--- a/production/loki-mixin/dashboards/loki-retention.libsonnet
+++ b/production/loki-mixin/dashboards/loki-retention.libsonnet
@@ -40,7 +40,7 @@
$.row('')
.addPanel(
$.newQueryPanel('Number of times Tables were skipped during Compaction') +
- $.queryPanel(['sum(increase(loki_compactor_skipped_compacting_locked_table_total{%s}[$__range]))' % $.namespaceMatcher()], ['{{table_name}}']),
+ $.queryPanel(['sum(loki_compactor_locked_table_successive_compaction_skips{%s})' % $.namespaceMatcher()], ['{{table_name}}']),
)
.addPanel(
$.newQueryPanel('Compact Tables Operations Per Status') +
diff --git a/tools/doc-generator/parse/root_blocks.go b/tools/doc-generator/parse/root_blocks.go
index 1bfcc57bc8965..776a0dee1afa7 100644
--- a/tools/doc-generator/parse/root_blocks.go
+++ b/tools/doc-generator/parse/root_blocks.go
@@ -30,9 +30,11 @@ import (
"github.com/grafana/loki/v3/pkg/querier/queryrange"
querier_worker "github.com/grafana/loki/v3/pkg/querier/worker"
"github.com/grafana/loki/v3/pkg/ruler"
+ "github.com/grafana/loki/v3/pkg/ruler/rulestore"
"github.com/grafana/loki/v3/pkg/runtime"
"github.com/grafana/loki/v3/pkg/scheduler"
"github.com/grafana/loki/v3/pkg/storage"
+ "github.com/grafana/loki/v3/pkg/storage/bucket/gcs"
"github.com/grafana/loki/v3/pkg/storage/chunk/cache"
"github.com/grafana/loki/v3/pkg/storage/chunk/client/alibaba"
"github.com/grafana/loki/v3/pkg/storage/chunk/client/aws"
@@ -280,12 +282,14 @@ When a memberlist config with atleast 1 join_members is defined, kvstore of type
Desc: `Configures additional object stores for a given storage provider.
Supported stores: aws, azure, bos, filesystem, gcs, swift.
Example:
-storage_config:
- named_stores:
- aws:
- store-1:
- endpoint: s3://foo-bucket
- region: us-west1
+` + "```yaml" + `
+ storage_config:
+ named_stores:
+ aws:
+ store-1:
+ endpoint: s3://foo-bucket
+ region: us-west1
+` + "```" + `
Named store from this example can be used by setting object_store to store-1 in period_config.`,
},
{
@@ -293,6 +297,18 @@ Named store from this example can be used by setting object_store to store-1 in
StructType: []reflect.Type{reflect.TypeOf(push.AttributesConfig{})},
Desc: "Define actions for matching OpenTelemetry (OTEL) attributes.",
},
+ {
+ Name: "gcs_storage_backend",
+ StructType: []reflect.Type{reflect.TypeOf(gcs.Config{})},
+ Desc: "The gcs_storage_backend block configures the connection to Google Cloud Storage object storage backend.",
+ },
+ {
+ Name: "ruler_storage_config",
+ StructType: []reflect.Type{reflect.TypeOf(rulestore.Config{})},
+ Desc: `The ruler_storage_config configures ruler storage backend.
+It uses thanos-io/objstore clients for connecting to object storage backends. This will become the default way of configuring object store clients in future releases.
+Currently this is opt-in and takes effect only when ` + "`-use-thanos-objstore` " + "is set to true.",
+ },
}
)
diff --git a/tools/lambda-promtail/lambda-promtail/kinesis.go b/tools/lambda-promtail/lambda-promtail/kinesis.go
index 2704c59af1495..5c14971b9bfd4 100644
--- a/tools/lambda-promtail/lambda-promtail/kinesis.go
+++ b/tools/lambda-promtail/lambda-promtail/kinesis.go
@@ -69,7 +69,7 @@ func isGzipped(data []byte) bool {
return len(data) >= 2 && data[0] == 0x1F && data[1] == 0x8B
}
-// unzipData decompress the gzipped data
+// ungzipData decompress the gzipped data
func ungzipData(data []byte) ([]byte, error) {
reader, err := gzip.NewReader(bytes.NewBuffer(data))
if err != nil {
diff --git a/tools/lambda-promtail/sqs.tf b/tools/lambda-promtail/sqs.tf
index 7080eaab4c4ab..bdc03fa159f0e 100644
--- a/tools/lambda-promtail/sqs.tf
+++ b/tools/lambda-promtail/sqs.tf
@@ -32,6 +32,10 @@ data "aws_iam_policy_document" "queue_policy" {
"sqs:SendMessage"
]
resources = ["arn:aws:sqs:*:*:${var.sqs_queue_name_prefix}-main-queue"]
+ principals {
+ type = "Service"
+ identifiers = ["s3.amazonaws.com"]
+ }
condition {
test = "ArnEquals"
variable = "aws:SourceArn"
diff --git a/tools/tsdb/index-analyzer/main.go b/tools/tsdb/index-analyzer/main.go
index 2d19ad9c3c421..d0e0f710a6fb2 100644
--- a/tools/tsdb/index-analyzer/main.go
+++ b/tools/tsdb/index-analyzer/main.go
@@ -24,7 +24,7 @@ func main() {
periodCfg, tableRange, tableName, err := helpers.GetPeriodConfigForTableNumber(bucket, conf.SchemaConfig.Configs)
helpers.ExitErr("find period config for bucket", err)
- objectClient, err := storage.NewObjectClient(periodCfg.ObjectType, conf.StorageConfig, clientMetrics)
+ objectClient, err := storage.NewObjectClient(periodCfg.ObjectType, "index-analyzer", conf.StorageConfig, clientMetrics)
helpers.ExitErr("creating object client", err)
shipper, err := indexshipper.NewIndexShipper(
diff --git a/tools/tsdb/migrate-versions/main.go b/tools/tsdb/migrate-versions/main.go
index d3853442b6e86..5aa0848abfee6 100644
--- a/tools/tsdb/migrate-versions/main.go
+++ b/tools/tsdb/migrate-versions/main.go
@@ -98,7 +98,7 @@ func main() {
}
func migrateTables(pCfg config.PeriodConfig, storageCfg storage.Config, clientMetrics storage.ClientMetrics, tableRange config.TableRange) error {
- objClient, err := storage.NewObjectClient(pCfg.ObjectType, storageCfg, clientMetrics)
+ objClient, err := storage.NewObjectClient(pCfg.ObjectType, "tsdb-migrate", storageCfg, clientMetrics)
if err != nil {
return err
}
diff --git a/tools/tsdb/migrate-versions/main_test.go b/tools/tsdb/migrate-versions/main_test.go
index 62519e04f61fc..7b211864292f8 100644
--- a/tools/tsdb/migrate-versions/main_test.go
+++ b/tools/tsdb/migrate-versions/main_test.go
@@ -52,7 +52,7 @@ func TestMigrateTables(t *testing.T) {
}
clientMetrics := storage.NewClientMetrics()
- objClient, err := storage.NewObjectClient(pcfg.ObjectType, storageCfg, clientMetrics)
+ objClient, err := storage.NewObjectClient(pcfg.ObjectType, "test", storageCfg, clientMetrics)
require.NoError(t, err)
indexStorageClient := shipperstorage.NewIndexStorageClient(objClient, pcfg.IndexTables.PathPrefix)
diff --git a/vendor/cel.dev/expr/BUILD.bazel b/vendor/cel.dev/expr/BUILD.bazel
index f631b6df06d13..0bbe9ed7736c4 100644
--- a/vendor/cel.dev/expr/BUILD.bazel
+++ b/vendor/cel.dev/expr/BUILD.bazel
@@ -1,3 +1,34 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
package(default_visibility = ["//visibility:public"])
licenses(["notice"]) # Apache 2.0
+
+go_library(
+ name = "expr",
+ srcs = [
+ "checked.pb.go",
+ "eval.pb.go",
+ "explain.pb.go",
+ "syntax.pb.go",
+ "value.pb.go",
+ ],
+ importpath = "cel.dev/expr",
+ visibility = ["//visibility:public"],
+ deps = [
+ "//proto/cel/expr:google_rpc_status_go_proto",
+ "@org_golang_google_protobuf//reflect/protoreflect",
+ "@org_golang_google_protobuf//runtime/protoimpl",
+ "@org_golang_google_protobuf//types/known/anypb",
+ "@org_golang_google_protobuf//types/known/durationpb",
+ "@org_golang_google_protobuf//types/known/emptypb",
+ "@org_golang_google_protobuf//types/known/structpb",
+ "@org_golang_google_protobuf//types/known/timestamppb",
+ ],
+)
+
+alias(
+ name = "go_default_library",
+ actual = ":expr",
+ visibility = ["//visibility:public"],
+)
diff --git a/vendor/cloud.google.com/go/.release-please-manifest-individual.json b/vendor/cloud.google.com/go/.release-please-manifest-individual.json
index 841b543576d16..39ed1f94745e7 100644
--- a/vendor/cloud.google.com/go/.release-please-manifest-individual.json
+++ b/vendor/cloud.google.com/go/.release-please-manifest-individual.json
@@ -1,18 +1,18 @@
{
"ai": "0.8.2",
"aiplatform": "1.68.0",
- "auth": "0.8.1",
+ "auth": "0.9.7",
"auth/oauth2adapt": "0.2.4",
- "bigquery": "1.62.0",
- "bigtable": "1.29.0",
- "datastore": "1.17.1",
+ "bigquery": "1.63.1",
+ "bigtable": "1.33.0",
+ "datastore": "1.19.0",
"errorreporting": "0.3.1",
- "firestore": "1.16.0",
+ "firestore": "1.17.0",
"logging": "1.11.0",
"profiler": "0.4.1",
- "pubsub": "1.41.0",
+ "pubsub": "1.44.0",
"pubsublite": "1.8.2",
- "spanner": "1.66.0",
- "storage": "1.43.0",
- "vertexai": "0.12.0"
+ "spanner": "1.69.0",
+ "storage": "1.44.0",
+ "vertexai": "0.13.1"
}
diff --git a/vendor/cloud.google.com/go/.release-please-manifest-submodules.json b/vendor/cloud.google.com/go/.release-please-manifest-submodules.json
index 8ac2f38f99937..edbdcf47fd969 100644
--- a/vendor/cloud.google.com/go/.release-please-manifest-submodules.json
+++ b/vendor/cloud.google.com/go/.release-please-manifest-submodules.json
@@ -1,149 +1,150 @@
{
- "accessapproval": "1.7.12",
- "accesscontextmanager": "1.8.12",
- "advisorynotifications": "1.4.6",
- "alloydb": "1.10.7",
- "analytics": "0.24.0",
- "apigateway": "1.6.12",
- "apigeeconnect": "1.6.12",
- "apigeeregistry": "0.8.10",
- "apikeys": "1.1.12",
- "appengine": "1.8.12",
- "apphub": "0.1.6",
- "apps": "0.4.7",
- "area120": "0.8.12",
- "artifactregistry": "1.14.14",
- "asset": "1.19.6",
- "assuredworkloads": "1.11.12",
- "automl": "1.13.12",
- "backupdr": "1.0.4",
- "baremetalsolution": "1.2.11",
- "batch": "1.9.3",
- "beyondcorp": "1.0.11",
- "billing": "1.18.10",
- "binaryauthorization": "1.8.8",
- "certificatemanager": "1.8.6",
- "channel": "1.17.12",
- "chat": "0.3.1",
- "cloudbuild": "1.16.6",
- "cloudcontrolspartner": "1.0.4",
- "clouddms": "1.7.11",
- "cloudprofiler": "0.3.6",
- "cloudquotas": "1.0.4",
- "cloudtasks": "1.12.13",
- "commerce": "1.0.5",
- "compute": "1.27.5",
- "compute/metadata": "0.5.0",
- "confidentialcomputing": "1.6.1",
- "config": "1.0.5",
- "contactcenterinsights": "1.13.7",
- "container": "1.38.1",
- "containeranalysis": "0.12.2",
- "datacatalog": "1.21.1",
- "dataflow": "0.9.12",
- "dataform": "0.9.9",
- "datafusion": "1.7.12",
- "datalabeling": "0.8.12",
- "dataplex": "1.18.3",
- "dataproc": "2.5.4",
- "dataqna": "0.8.12",
- "datastream": "1.10.11",
- "deploy": "1.21.1",
- "developerconnect": "0.1.4",
- "dialogflow": "1.56.0",
- "discoveryengine": "1.12.0",
- "dlp": "1.17.0",
- "documentai": "1.32.0",
- "domains": "0.9.12",
- "edgecontainer": "1.2.6",
- "edgenetwork": "1.1.3",
- "essentialcontacts": "1.6.13",
- "eventarc": "1.13.11",
- "filestore": "1.8.8",
- "functions": "1.17.0",
- "gkebackup": "1.5.5",
- "gkeconnect": "0.8.12",
- "gkehub": "0.14.12",
- "gkemulticloud": "1.2.5",
- "grafeas": "0.3.10",
- "gsuiteaddons": "1.6.12",
- "iam": "1.1.13",
- "iap": "1.9.11",
- "identitytoolkit": "0.1.4",
- "ids": "1.4.12",
- "iot": "1.7.12",
- "kms": "1.18.5",
- "language": "1.13.1",
- "lifesciences": "0.9.12",
- "longrunning": "0.5.12",
- "managedidentities": "1.6.12",
- "managedkafka": "0.1.6",
- "maps": "1.11.7",
- "mediatranslation": "0.8.12",
- "memcache": "1.10.12",
- "metastore": "1.13.11",
- "migrationcenter": "1.0.5",
- "monitoring": "1.20.4",
- "netapp": "1.2.1",
- "networkconnectivity": "1.14.11",
- "networkmanagement": "1.13.7",
- "networksecurity": "0.9.12",
- "networkservices": "0.1.6",
- "notebooks": "1.11.10",
- "optimization": "1.6.10",
- "orchestration": "1.9.7",
- "orgpolicy": "1.12.8",
- "osconfig": "1.13.3",
- "oslogin": "1.13.8",
- "parallelstore": "0.5.1",
- "phishingprotection": "0.8.12",
- "policysimulator": "0.2.10",
- "policytroubleshooter": "1.10.10",
- "privatecatalog": "0.9.12",
- "privilegedaccessmanager": "0.1.1",
- "rapidmigrationassessment": "1.0.12",
- "recaptchaenterprise": "2.14.3",
- "recommendationengine": "0.8.12",
- "recommender": "1.12.8",
- "redis": "1.16.5",
- "resourcemanager": "1.9.12",
- "resourcesettings": "1.7.5",
- "retail": "1.17.5",
- "run": "1.4.1",
- "scheduler": "1.10.13",
- "secretmanager": "1.13.6",
- "securesourcemanager": "1.1.1",
- "security": "1.17.5",
- "securitycenter": "1.34.0",
- "securitycentermanagement": "1.0.4",
- "securityposture": "0.1.8",
- "servicecontrol": "1.13.7",
- "servicedirectory": "1.11.12",
- "servicehealth": "1.0.5",
- "servicemanagement": "1.9.13",
- "serviceusage": "1.8.11",
- "shell": "1.7.12",
- "shopping": "0.8.7",
- "speech": "1.24.1",
- "storageinsights": "1.0.12",
- "storagetransfer": "1.10.11",
- "streetview": "0.1.5",
- "support": "1.0.11",
- "talent": "1.6.13",
- "telcoautomation": "1.0.4",
- "texttospeech": "1.7.12",
- "tpu": "1.6.12",
- "trace": "1.10.12",
- "translate": "1.11.0",
- "video": "1.22.1",
- "videointelligence": "1.11.12",
- "vision": "2.8.7",
- "visionai": "0.2.5",
- "vmmigration": "1.7.12",
- "vmwareengine": "1.2.1",
- "vpcaccess": "1.7.12",
- "webrisk": "1.9.12",
- "websecurityscanner": "1.6.12",
- "workflows": "1.12.11",
- "workstations": "1.0.5"
+ "accessapproval": "1.8.1",
+ "accesscontextmanager": "1.9.1",
+ "advisorynotifications": "1.5.1",
+ "alloydb": "1.12.1",
+ "analytics": "0.25.1",
+ "apigateway": "1.7.1",
+ "apigeeconnect": "1.7.1",
+ "apigeeregistry": "0.9.1",
+ "apihub": "0.1.1",
+ "apikeys": "1.2.1",
+ "appengine": "1.9.1",
+ "apphub": "0.2.1",
+ "apps": "0.5.1",
+ "area120": "0.9.1",
+ "artifactregistry": "1.15.1",
+ "asset": "1.20.2",
+ "assuredworkloads": "1.12.1",
+ "automl": "1.14.1",
+ "backupdr": "1.1.1",
+ "baremetalsolution": "1.3.1",
+ "batch": "1.11.0",
+ "beyondcorp": "1.1.1",
+ "billing": "1.19.1",
+ "binaryauthorization": "1.9.1",
+ "certificatemanager": "1.9.1",
+ "channel": "1.18.1",
+ "chat": "0.6.0",
+ "cloudbuild": "1.18.0",
+ "cloudcontrolspartner": "1.2.0",
+ "clouddms": "1.8.1",
+ "cloudprofiler": "0.4.1",
+ "cloudquotas": "1.1.1",
+ "cloudtasks": "1.13.1",
+ "commerce": "1.1.1",
+ "compute": "1.28.1",
+ "compute/metadata": "0.5.2",
+ "confidentialcomputing": "1.7.1",
+ "config": "1.1.1",
+ "contactcenterinsights": "1.14.1",
+ "container": "1.40.0",
+ "containeranalysis": "0.13.1",
+ "datacatalog": "1.22.1",
+ "dataflow": "0.10.1",
+ "dataform": "0.10.1",
+ "datafusion": "1.8.1",
+ "datalabeling": "0.9.1",
+ "dataplex": "1.19.1",
+ "dataproc": "2.9.0",
+ "dataqna": "0.9.1",
+ "datastream": "1.11.1",
+ "deploy": "1.22.1",
+ "developerconnect": "0.2.1",
+ "dialogflow": "1.58.0",
+ "discoveryengine": "1.14.0",
+ "dlp": "1.19.0",
+ "documentai": "1.34.0",
+ "domains": "0.10.1",
+ "edgecontainer": "1.3.1",
+ "edgenetwork": "1.2.1",
+ "essentialcontacts": "1.7.1",
+ "eventarc": "1.14.1",
+ "filestore": "1.9.1",
+ "functions": "1.19.1",
+ "gkebackup": "1.6.1",
+ "gkeconnect": "0.11.1",
+ "gkehub": "0.15.1",
+ "gkemulticloud": "1.4.0",
+ "grafeas": "0.3.11",
+ "gsuiteaddons": "1.7.1",
+ "iam": "1.2.1",
+ "iap": "1.10.1",
+ "identitytoolkit": "0.2.1",
+ "ids": "1.5.1",
+ "iot": "1.8.1",
+ "kms": "1.20.0",
+ "language": "1.14.1",
+ "lifesciences": "0.10.1",
+ "longrunning": "0.6.1",
+ "managedidentities": "1.7.1",
+ "managedkafka": "0.2.1",
+ "maps": "1.14.0",
+ "mediatranslation": "0.9.1",
+ "memcache": "1.11.1",
+ "metastore": "1.14.1",
+ "migrationcenter": "1.1.1",
+ "monitoring": "1.21.1",
+ "netapp": "1.4.0",
+ "networkconnectivity": "1.15.1",
+ "networkmanagement": "1.14.1",
+ "networksecurity": "0.10.1",
+ "networkservices": "0.2.1",
+ "notebooks": "1.12.1",
+ "optimization": "1.7.1",
+ "orchestration": "1.11.0",
+ "orgpolicy": "1.14.0",
+ "osconfig": "1.14.1",
+ "oslogin": "1.14.1",
+ "parallelstore": "0.6.1",
+ "phishingprotection": "0.9.1",
+ "policysimulator": "0.3.1",
+ "policytroubleshooter": "1.11.1",
+ "privatecatalog": "0.10.1",
+ "privilegedaccessmanager": "0.2.1",
+ "rapidmigrationassessment": "1.1.1",
+ "recaptchaenterprise": "2.17.1",
+ "recommendationengine": "0.9.1",
+ "recommender": "1.13.1",
+ "redis": "1.17.1",
+ "resourcemanager": "1.10.1",
+ "resourcesettings": "1.8.1",
+ "retail": "1.18.1",
+ "run": "1.5.1",
+ "scheduler": "1.11.1",
+ "secretmanager": "1.14.1",
+ "securesourcemanager": "1.2.1",
+ "security": "1.18.1",
+ "securitycenter": "1.35.1",
+ "securitycentermanagement": "1.1.1",
+ "securityposture": "0.2.1",
+ "servicecontrol": "1.14.1",
+ "servicedirectory": "1.12.1",
+ "servicehealth": "1.1.1",
+ "servicemanagement": "1.10.1",
+ "serviceusage": "1.9.1",
+ "shell": "1.8.1",
+ "shopping": "0.10.0",
+ "speech": "1.25.1",
+ "storageinsights": "1.1.1",
+ "storagetransfer": "1.11.1",
+ "streetview": "0.2.1",
+ "support": "1.1.1",
+ "talent": "1.7.1",
+ "telcoautomation": "1.1.1",
+ "texttospeech": "1.8.1",
+ "tpu": "1.7.1",
+ "trace": "1.11.1",
+ "translate": "1.12.1",
+ "video": "1.23.1",
+ "videointelligence": "1.12.1",
+ "vision": "2.9.1",
+ "visionai": "0.4.1",
+ "vmmigration": "1.8.1",
+ "vmwareengine": "1.3.1",
+ "vpcaccess": "1.8.1",
+ "webrisk": "1.10.1",
+ "websecurityscanner": "1.7.1",
+ "workflows": "1.13.1",
+ "workstations": "1.1.1"
}
diff --git a/vendor/cloud.google.com/go/.release-please-manifest.json b/vendor/cloud.google.com/go/.release-please-manifest.json
index 7b1015e63fc8b..c8f1da56d86db 100644
--- a/vendor/cloud.google.com/go/.release-please-manifest.json
+++ b/vendor/cloud.google.com/go/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "0.115.1"
+ ".": "0.116.0"
}
diff --git a/vendor/cloud.google.com/go/CHANGES.md b/vendor/cloud.google.com/go/CHANGES.md
index d48e0cfba4798..adc725ca1a72d 100644
--- a/vendor/cloud.google.com/go/CHANGES.md
+++ b/vendor/cloud.google.com/go/CHANGES.md
@@ -1,5 +1,12 @@
# Changes
+## [0.116.0](https://github.com/googleapis/google-cloud-go/compare/v0.115.1...v0.116.0) (2024-10-09)
+
+
+### Features
+
+* **genai:** Add tokenizer package ([#10699](https://github.com/googleapis/google-cloud-go/issues/10699)) ([214af16](https://github.com/googleapis/google-cloud-go/commit/214af1604bf3837f68e96dbf81c1331b90c9375f))
+
## [0.115.1](https://github.com/googleapis/google-cloud-go/compare/v0.115.0...v0.115.1) (2024-08-13)
diff --git a/vendor/cloud.google.com/go/README.md b/vendor/cloud.google.com/go/README.md
index 99514979018e0..63db0209c7dbe 100644
--- a/vendor/cloud.google.com/go/README.md
+++ b/vendor/cloud.google.com/go/README.md
@@ -28,12 +28,16 @@ For an updated list of all of our released APIs please see our
## [Go Versions Supported](#supported-versions)
+**Note:** As of Jan 1, 2025 the Cloud Client Libraries for Go will support the
+two most-recent major Go releases -- the same [policy](https://go.dev/doc/devel/release#policy)
+the Go programming language follows.
+
Our libraries are compatible with at least the three most recent, major Go
releases. They are currently compatible with:
+- Go 1.23
- Go 1.22
- Go 1.21
-- Go 1.20
## Authorization
@@ -56,14 +60,14 @@ client, err := storage.NewClient(ctx, option.WithCredentialsFile("path/to/keyfil
```
You can exert more control over authorization by using the
-[`golang.org/x/oauth2`](https://pkg.go.dev/golang.org/x/oauth2) package to
-create an `oauth2.TokenSource`. Then pass
-[`option.WithTokenSource`](https://pkg.go.dev/google.golang.org/api/option#WithTokenSource)
+[credentials](https://pkg.go.dev/cloud.google.com/go/auth/credentials) package to
+create an [auth.Credentials](https://pkg.go.dev/cloud.google.com/go/auth#Credentials).
+Then pass [`option.WithAuthCredentials`](https://pkg.go.dev/google.golang.org/api/option#WithAuthCredentials)
to the `NewClient` function:
```go
-tokenSource := ...
-client, err := storage.NewClient(ctx, option.WithTokenSource(tokenSource))
+creds := ...
+client, err := storage.NewClient(ctx, option.WithAuthCredentials(creds))
```
## Contributing
diff --git a/vendor/cloud.google.com/go/auth/CHANGES.md b/vendor/cloud.google.com/go/auth/CHANGES.md
index 8042bdae809b6..c81df7392784a 100644
--- a/vendor/cloud.google.com/go/auth/CHANGES.md
+++ b/vendor/cloud.google.com/go/auth/CHANGES.md
@@ -1,5 +1,70 @@
# Changelog
+## [0.9.8](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.7...auth/v0.9.8) (2024-10-09)
+
+
+### Bug Fixes
+
+* **auth:** Restore OpenTelemetry handling in transports ([#10968](https://github.com/googleapis/google-cloud-go/issues/10968)) ([08c6d04](https://github.com/googleapis/google-cloud-go/commit/08c6d04901c1a20e219b2d86df41dbaa6d7d7b55)), refs [#10962](https://github.com/googleapis/google-cloud-go/issues/10962)
+* **auth:** Try talk to plaintext S2A if credentials can not be found for mTLS-S2A ([#10941](https://github.com/googleapis/google-cloud-go/issues/10941)) ([0f0bf2d](https://github.com/googleapis/google-cloud-go/commit/0f0bf2d18c97dd8b65bcf0099f0802b5631c6287))
+
+## [0.9.7](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.6...auth/v0.9.7) (2024-10-01)
+
+
+### Bug Fixes
+
+* **auth:** Restore support for non-default service accounts for DirectPath ([#10937](https://github.com/googleapis/google-cloud-go/issues/10937)) ([a38650e](https://github.com/googleapis/google-cloud-go/commit/a38650edbf420223077498cafa537aec74b37aad)), refs [#10907](https://github.com/googleapis/google-cloud-go/issues/10907)
+
+## [0.9.6](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.5...auth/v0.9.6) (2024-09-30)
+
+
+### Bug Fixes
+
+* **auth:** Make aws credentials provider retrieve fresh credentials ([#10920](https://github.com/googleapis/google-cloud-go/issues/10920)) ([250fbf8](https://github.com/googleapis/google-cloud-go/commit/250fbf87d858d865e399a241b7e537c4ff0c3dd8))
+
+## [0.9.5](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.4...auth/v0.9.5) (2024-09-25)
+
+
+### Bug Fixes
+
+* **auth:** Restore support for GOOGLE_CLOUD_UNIVERSE_DOMAIN env ([#10915](https://github.com/googleapis/google-cloud-go/issues/10915)) ([94caaaa](https://github.com/googleapis/google-cloud-go/commit/94caaaa061362d0e00ef6214afcc8a0a3e7ebfb2))
+* **auth:** Skip directpath credentials overwrite when it's not on GCE ([#10833](https://github.com/googleapis/google-cloud-go/issues/10833)) ([7e5e8d1](https://github.com/googleapis/google-cloud-go/commit/7e5e8d10b761b0a6e43e19a028528db361bc07b1))
+* **auth:** Use new context for non-blocking token refresh ([#10919](https://github.com/googleapis/google-cloud-go/issues/10919)) ([cf7102d](https://github.com/googleapis/google-cloud-go/commit/cf7102d33a21be1e5a9d47a49456b3a57c43b350))
+
+## [0.9.4](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.3...auth/v0.9.4) (2024-09-11)
+
+
+### Bug Fixes
+
+* **auth:** Enable self-signed JWT for non-GDU universe domain ([#10831](https://github.com/googleapis/google-cloud-go/issues/10831)) ([f9869f7](https://github.com/googleapis/google-cloud-go/commit/f9869f7903cfd34d1b97c25d0dc5669d2c5138e6))
+
+## [0.9.3](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.2...auth/v0.9.3) (2024-09-03)
+
+
+### Bug Fixes
+
+* **auth:** Choose quota project envvar over file when both present ([#10807](https://github.com/googleapis/google-cloud-go/issues/10807)) ([2d8dd77](https://github.com/googleapis/google-cloud-go/commit/2d8dd7700eff92d4b95027be55e26e1e7aa79181)), refs [#10804](https://github.com/googleapis/google-cloud-go/issues/10804)
+
+## [0.9.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.1...auth/v0.9.2) (2024-08-30)
+
+
+### Bug Fixes
+
+* **auth:** Handle non-Transport DefaultTransport ([#10733](https://github.com/googleapis/google-cloud-go/issues/10733)) ([98d91dc](https://github.com/googleapis/google-cloud-go/commit/98d91dc8316b247498fab41ab35e57a0446fe556)), refs [#10742](https://github.com/googleapis/google-cloud-go/issues/10742)
+* **auth:** Make sure quota option takes precedence over env/file ([#10797](https://github.com/googleapis/google-cloud-go/issues/10797)) ([f1b050d](https://github.com/googleapis/google-cloud-go/commit/f1b050d56d804b245cab048c2980d32b0eaceb4e)), refs [#10795](https://github.com/googleapis/google-cloud-go/issues/10795)
+
+
+### Documentation
+
+* **auth:** Fix Go doc comment link ([#10751](https://github.com/googleapis/google-cloud-go/issues/10751)) ([015acfa](https://github.com/googleapis/google-cloud-go/commit/015acfab4d172650928bb1119bc2cd6307b9a437))
+
+## [0.9.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.0...auth/v0.9.1) (2024-08-22)
+
+
+### Bug Fixes
+
+* **auth:** Setting expireEarly to default when the value is 0 ([#10732](https://github.com/googleapis/google-cloud-go/issues/10732)) ([5e67869](https://github.com/googleapis/google-cloud-go/commit/5e67869a31e9e8ecb4eeebd2cfa11a761c3b1948))
+
## [0.9.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.8.1...auth/v0.9.0) (2024-08-16)
diff --git a/vendor/cloud.google.com/go/auth/README.md b/vendor/cloud.google.com/go/auth/README.md
index 36de276a0743e..6fe4f0763e318 100644
--- a/vendor/cloud.google.com/go/auth/README.md
+++ b/vendor/cloud.google.com/go/auth/README.md
@@ -1,4 +1,40 @@
-# auth
+# Google Auth Library for Go
-This module is currently EXPERIMENTAL and under active development. It is not
-yet intended to be used.
+[](https://pkg.go.dev/cloud.google.com/go/auth)
+
+## Install
+
+``` bash
+go get cloud.google.com/go/auth@latest
+```
+
+## Usage
+
+The most common way this library is used is transitively, by default, from any
+of our Go client libraries.
+
+### Notable use-cases
+
+- To create a credential directly please see examples in the
+ [credentials](https://pkg.go.dev/cloud.google.com/go/auth/credentials)
+ package.
+- To create a authenticated HTTP client please see examples in the
+ [httptransport](https://pkg.go.dev/cloud.google.com/go/auth/httptransport)
+ package.
+- To create a authenticated gRPC connection please see examples in the
+ [grpctransport](https://pkg.go.dev/cloud.google.com/go/auth/grpctransport)
+ package.
+- To create an ID token please see examples in the
+ [idtoken](https://pkg.go.dev/cloud.google.com/go/auth/credentials/idtoken)
+ package.
+
+## Contributing
+
+Contributions are welcome. Please, see the
+[CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md)
+document for details.
+
+Please note that this project is released with a Contributor Code of Conduct.
+By participating in this project you agree to abide by its terms.
+See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct)
+for more information.
diff --git a/vendor/cloud.google.com/go/auth/auth.go b/vendor/cloud.google.com/go/auth/auth.go
index 41e03f293546e..314bd292e3f5e 100644
--- a/vendor/cloud.google.com/go/auth/auth.go
+++ b/vendor/cloud.google.com/go/auth/auth.go
@@ -12,6 +12,11 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+// Package auth provides utilities for managing Google Cloud credentials,
+// including functionality for creating, caching, and refreshing OAuth2 tokens.
+// It offers customizable options for different OAuth2 flows, such as 2-legged
+// (2LO) and 3-legged (3LO) OAuth, along with support for PKCE and automatic
+// token management.
package auth
import (
@@ -130,7 +135,9 @@ func (t *Token) isEmpty() bool {
}
// Credentials holds Google credentials, including
-// [Application Default Credentials](https://developers.google.com/accounts/docs/application-default-credentials).
+// [Application Default Credentials].
+//
+// [Application Default Credentials]: https://developers.google.com/accounts/docs/application-default-credentials
type Credentials struct {
json []byte
projectID CredentialsPropertyProvider
@@ -258,7 +265,7 @@ func (ctpo *CachedTokenProviderOptions) autoRefresh() bool {
}
func (ctpo *CachedTokenProviderOptions) expireEarly() time.Duration {
- if ctpo == nil {
+ if ctpo == nil || ctpo.ExpireEarly == 0 {
return defaultExpiryDelta
}
return ctpo.ExpireEarly
@@ -321,7 +328,9 @@ func (c *cachedTokenProvider) tokenNonBlocking(ctx context.Context) (*Token, err
defer c.mu.Unlock()
return c.cachedToken, nil
case stale:
- c.tokenAsync(ctx)
+ // Call tokenAsync with a new Context because the user-provided context
+ // may have a short timeout incompatible with async token refresh.
+ c.tokenAsync(context.Background())
// Return the stale token immediately to not block customer requests to Cloud services.
c.mu.Lock()
defer c.mu.Unlock()
@@ -336,13 +345,14 @@ func (c *cachedTokenProvider) tokenState() tokenState {
c.mu.Lock()
defer c.mu.Unlock()
t := c.cachedToken
+ now := timeNow()
if t == nil || t.Value == "" {
return invalid
} else if t.Expiry.IsZero() {
return fresh
- } else if timeNow().After(t.Expiry.Round(0)) {
+ } else if now.After(t.Expiry.Round(0)) {
return invalid
- } else if timeNow().After(t.Expiry.Round(0).Add(-c.expireEarly)) {
+ } else if now.After(t.Expiry.Round(0).Add(-c.expireEarly)) {
return stale
}
return fresh
diff --git a/vendor/cloud.google.com/go/auth/credentials/detect.go b/vendor/cloud.google.com/go/auth/credentials/detect.go
index cce6224186a73..010afc37c8fe4 100644
--- a/vendor/cloud.google.com/go/auth/credentials/detect.go
+++ b/vendor/cloud.google.com/go/auth/credentials/detect.go
@@ -98,8 +98,8 @@ func DetectDefault(opts *DetectOptions) (*auth.Credentials, error) {
if OnGCE() {
return auth.NewCredentials(&auth.CredentialsOptions{
TokenProvider: computeTokenProvider(opts),
- ProjectIDProvider: auth.CredentialsPropertyFunc(func(context.Context) (string, error) {
- return metadata.ProjectID()
+ ProjectIDProvider: auth.CredentialsPropertyFunc(func(ctx context.Context) (string, error) {
+ return metadata.ProjectIDWithContext(ctx)
}),
UniverseDomainProvider: &internal.ComputeUniverseDomainProvider{},
}), nil
diff --git a/vendor/cloud.google.com/go/auth/credentials/filetypes.go b/vendor/cloud.google.com/go/auth/credentials/filetypes.go
index b426e16d29757..6591b181132f7 100644
--- a/vendor/cloud.google.com/go/auth/credentials/filetypes.go
+++ b/vendor/cloud.google.com/go/auth/credentials/filetypes.go
@@ -33,7 +33,7 @@ func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) {
return nil, err
}
- var projectID, quotaProjectID, universeDomain string
+ var projectID, universeDomain string
var tp auth.TokenProvider
switch fileType {
case credsfile.ServiceAccountKey:
@@ -56,7 +56,6 @@ func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) {
if err != nil {
return nil, err
}
- quotaProjectID = f.QuotaProjectID
universeDomain = f.UniverseDomain
case credsfile.ExternalAccountKey:
f, err := credsfile.ParseExternalAccount(b)
@@ -67,7 +66,6 @@ func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) {
if err != nil {
return nil, err
}
- quotaProjectID = f.QuotaProjectID
universeDomain = resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain)
case credsfile.ExternalAccountAuthorizedUserKey:
f, err := credsfile.ParseExternalAccountAuthorizedUser(b)
@@ -78,7 +76,6 @@ func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) {
if err != nil {
return nil, err
}
- quotaProjectID = f.QuotaProjectID
universeDomain = f.UniverseDomain
case credsfile.ImpersonatedServiceAccountKey:
f, err := credsfile.ParseImpersonatedServiceAccount(b)
@@ -108,9 +105,9 @@ func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) {
TokenProvider: auth.NewCachedTokenProvider(tp, &auth.CachedTokenProviderOptions{
ExpireEarly: opts.EarlyTokenRefresh,
}),
- JSON: b,
- ProjectIDProvider: internalauth.StaticCredentialsProperty(projectID),
- QuotaProjectIDProvider: internalauth.StaticCredentialsProperty(quotaProjectID),
+ JSON: b,
+ ProjectIDProvider: internalauth.StaticCredentialsProperty(projectID),
+ // TODO(codyoss): only set quota project here if there was a user override
UniverseDomainProvider: internalauth.StaticCredentialsProperty(universeDomain),
}), nil
}
@@ -127,8 +124,14 @@ func resolveUniverseDomain(optsUniverseDomain, fileUniverseDomain string) string
}
func handleServiceAccount(f *credsfile.ServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) {
+ ud := resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain)
if opts.UseSelfSignedJWT {
return configureSelfSignedJWT(f, opts)
+ } else if ud != "" && ud != internalauth.DefaultUniverseDomain {
+ // For non-GDU universe domains, token exchange is impossible and services
+ // must support self-signed JWTs.
+ opts.UseSelfSignedJWT = true
+ return configureSelfSignedJWT(f, opts)
}
opts2LO := &auth.Options2LO{
Email: f.ClientEmail,
diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go
index a34f6b06f8460..d8b5d4fdeb9e0 100644
--- a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go
+++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go
@@ -94,32 +94,30 @@ func (sp *awsSubjectProvider) subjectToken(ctx context.Context) (string, error)
if sp.RegionalCredVerificationURL == "" {
sp.RegionalCredVerificationURL = defaultRegionalCredentialVerificationURL
}
- if sp.requestSigner == nil {
- headers := make(map[string]string)
- if sp.shouldUseMetadataServer() {
- awsSessionToken, err := sp.getAWSSessionToken(ctx)
- if err != nil {
- return "", err
- }
-
- if awsSessionToken != "" {
- headers[awsIMDSv2SessionTokenHeader] = awsSessionToken
- }
- }
-
- awsSecurityCredentials, err := sp.getSecurityCredentials(ctx, headers)
+ headers := make(map[string]string)
+ if sp.shouldUseMetadataServer() {
+ awsSessionToken, err := sp.getAWSSessionToken(ctx)
if err != nil {
return "", err
}
- if sp.region, err = sp.getRegion(ctx, headers); err != nil {
- return "", err
- }
- sp.requestSigner = &awsRequestSigner{
- RegionName: sp.region,
- AwsSecurityCredentials: awsSecurityCredentials,
+
+ if awsSessionToken != "" {
+ headers[awsIMDSv2SessionTokenHeader] = awsSessionToken
}
}
+ awsSecurityCredentials, err := sp.getSecurityCredentials(ctx, headers)
+ if err != nil {
+ return "", err
+ }
+ if sp.region, err = sp.getRegion(ctx, headers); err != nil {
+ return "", err
+ }
+ sp.requestSigner = &awsRequestSigner{
+ RegionName: sp.region,
+ AwsSecurityCredentials: awsSecurityCredentials,
+ }
+
// Generate the signed request to AWS STS GetCallerIdentity API.
// Use the required regional endpoint. Otherwise, the request will fail.
req, err := http.NewRequestWithContext(ctx, "POST", strings.Replace(sp.RegionalCredVerificationURL, "{region}", sp.region, 1), nil)
diff --git a/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go b/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go
index b62a8ae4d5d70..6ae29de6c2789 100644
--- a/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go
+++ b/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go
@@ -17,6 +17,7 @@ package credentials
import (
"context"
"crypto/rsa"
+ "errors"
"fmt"
"strings"
"time"
@@ -35,6 +36,9 @@ var (
// configureSelfSignedJWT uses the private key in the service account to create
// a JWT without making a network call.
func configureSelfSignedJWT(f *credsfile.ServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) {
+ if len(opts.scopes()) == 0 && opts.Audience == "" {
+ return nil, errors.New("credentials: both scopes and audience are empty")
+ }
pk, err := internal.ParseKey([]byte(f.PrivateKey))
if err != nil {
return nil, fmt.Errorf("credentials: could not parse key: %w", err)
diff --git a/vendor/cloud.google.com/go/auth/grpctransport/directpath.go b/vendor/cloud.google.com/go/auth/grpctransport/directpath.go
index efc91c2b0c355..8696df1487fc6 100644
--- a/vendor/cloud.google.com/go/auth/grpctransport/directpath.go
+++ b/vendor/cloud.google.com/go/auth/grpctransport/directpath.go
@@ -22,7 +22,7 @@ import (
"strings"
"cloud.google.com/go/auth"
- "cloud.google.com/go/compute/metadata"
+ "cloud.google.com/go/auth/internal/compute"
"google.golang.org/grpc"
grpcgoogle "google.golang.org/grpc/credentials/google"
)
@@ -55,7 +55,7 @@ func checkDirectPathEndPoint(endpoint string) bool {
return true
}
-func isTokenProviderDirectPathCompatible(tp auth.TokenProvider, _ *Options) bool {
+func isTokenProviderDirectPathCompatible(tp auth.TokenProvider, o *Options) bool {
if tp == nil {
return false
}
@@ -66,6 +66,9 @@ func isTokenProviderDirectPathCompatible(tp auth.TokenProvider, _ *Options) bool
if tok == nil {
return false
}
+ if o.InternalOptions != nil && o.InternalOptions.EnableNonDefaultSAForDirectPath {
+ return true
+ }
if tok.MetadataString("auth.google.tokenSource") != "compute-metadata" {
return false
}
@@ -91,7 +94,7 @@ func isDirectPathXdsUsed(o *Options) bool {
// configuration allows the use of direct path. If it does not the provided
// grpcOpts and endpoint are returned.
func configureDirectPath(grpcOpts []grpc.DialOption, opts *Options, endpoint string, creds *auth.Credentials) ([]grpc.DialOption, string) {
- if isDirectPathEnabled(endpoint, opts) && metadata.OnGCE() && isTokenProviderDirectPathCompatible(creds, opts) {
+ if isDirectPathEnabled(endpoint, opts) && compute.OnComputeEngine() && isTokenProviderDirectPathCompatible(creds, opts) {
// Overwrite all of the previously specific DialOptions, DirectPath uses its own set of credentials and certificates.
grpcOpts = []grpc.DialOption{
grpc.WithCredentialsBundle(grpcgoogle.NewDefaultCredentialsWithOptions(grpcgoogle.DefaultCredentialsOptions{PerRPCCreds: &grpcCredentialsProvider{creds: creds}}))}
diff --git a/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go b/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go
index 0442a5938a800..42d4cbe3062ed 100644
--- a/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go
+++ b/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go
@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+// Package grpctransport provides functionality for managing gRPC client
+// connections to Google Cloud services.
package grpctransport
import (
@@ -20,15 +22,19 @@ import (
"errors"
"fmt"
"net/http"
+ "os"
+ "sync"
"cloud.google.com/go/auth"
"cloud.google.com/go/auth/credentials"
"cloud.google.com/go/auth/internal"
"cloud.google.com/go/auth/internal/transport"
"go.opencensus.io/plugin/ocgrpc"
+ "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
"google.golang.org/grpc"
grpccreds "google.golang.org/grpc/credentials"
grpcinsecure "google.golang.org/grpc/credentials/insecure"
+ "google.golang.org/grpc/stats"
)
const (
@@ -38,7 +44,7 @@ const (
// Check env to decide if using google-c2p resolver for DirectPath traffic.
enableDirectPathXdsEnvVar = "GOOGLE_CLOUD_ENABLE_DIRECT_PATH_XDS"
- quotaProjectHeaderKey = "X-Goog-User-Project"
+ quotaProjectHeaderKey = "X-goog-user-project"
)
var (
@@ -46,6 +52,27 @@ var (
timeoutDialerOption grpc.DialOption
)
+// otelStatsHandler is a singleton otelgrpc.clientHandler to be used across
+// all dial connections to avoid the memory leak documented in
+// https://github.com/open-telemetry/opentelemetry-go-contrib/issues/4226
+//
+// TODO: When this module depends on a version of otelgrpc containing the fix,
+// replace this singleton with inline usage for simplicity.
+// The fix should be in https://github.com/open-telemetry/opentelemetry-go/pull/5797.
+var (
+ initOtelStatsHandlerOnce sync.Once
+ otelStatsHandler stats.Handler
+)
+
+// otelGRPCStatsHandler returns singleton otelStatsHandler for reuse across all
+// dial connections.
+func otelGRPCStatsHandler() stats.Handler {
+ initOtelStatsHandlerOnce.Do(func() {
+ otelStatsHandler = otelgrpc.NewClientHandler()
+ })
+ return otelStatsHandler
+}
+
// ClientCertProvider is a function that returns a TLS client certificate to be
// used when opening TLS connections. It follows the same semantics as
// [crypto/tls.Config.GetClientCertificate].
@@ -271,7 +298,10 @@ func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, er
if metadata == nil {
metadata = make(map[string]string, 1)
}
- metadata[quotaProjectHeaderKey] = qp
+ // Don't overwrite user specified quota
+ if _, ok := metadata[quotaProjectHeaderKey]; !ok {
+ metadata[quotaProjectHeaderKey] = qp
+ }
}
grpcOpts = append(grpcOpts,
grpc.WithPerRPCCredentials(&grpcCredentialsProvider{
@@ -289,9 +319,10 @@ func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, er
// gRPC stats handler.
// This assumes that gRPC options are processed in order, left to right.
grpcOpts = addOCStatsHandler(grpcOpts, opts)
+ grpcOpts = addOpenTelemetryStatsHandler(grpcOpts, opts)
grpcOpts = append(grpcOpts, opts.GRPCDialOpts...)
- return grpc.DialContext(ctx, endpoint, grpcOpts...)
+ return grpc.NewClient(endpoint, grpcOpts...)
}
// grpcKeyProvider satisfies https://pkg.go.dev/google.golang.org/grpc/credentials#PerRPCCredentials.
@@ -325,15 +356,23 @@ type grpcCredentialsProvider struct {
clientUniverseDomain string
}
-// getClientUniverseDomain returns the default service domain for a given Cloud universe.
-// The default value is "googleapis.com". This is the universe domain
-// configured for the client, which will be compared to the universe domain
-// that is separately configured for the credentials.
+// getClientUniverseDomain returns the default service domain for a given Cloud
+// universe, with the following precedence:
+//
+// 1. A non-empty option.WithUniverseDomain or similar client option.
+// 2. A non-empty environment variable GOOGLE_CLOUD_UNIVERSE_DOMAIN.
+// 3. The default value "googleapis.com".
+//
+// This is the universe domain configured for the client, which will be compared
+// to the universe domain that is separately configured for the credentials.
func (c *grpcCredentialsProvider) getClientUniverseDomain() string {
- if c.clientUniverseDomain == "" {
- return internal.DefaultUniverseDomain
+ if c.clientUniverseDomain != "" {
+ return c.clientUniverseDomain
+ }
+ if envUD := os.Getenv(internal.UniverseDomainEnvVar); envUD != "" {
+ return envUD
}
- return c.clientUniverseDomain
+ return internal.DefaultUniverseDomain
}
func (c *grpcCredentialsProvider) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
@@ -384,3 +423,10 @@ func addOCStatsHandler(dialOpts []grpc.DialOption, opts *Options) []grpc.DialOpt
}
return append(dialOpts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{}))
}
+
+func addOpenTelemetryStatsHandler(dialOpts []grpc.DialOption, opts *Options) []grpc.DialOption {
+ if opts.DisableTelemetry {
+ return dialOpts
+ }
+ return append(dialOpts, grpc.WithStatsHandler(otelGRPCStatsHandler()))
+}
diff --git a/vendor/cloud.google.com/go/auth/httptransport/httptransport.go b/vendor/cloud.google.com/go/auth/httptransport/httptransport.go
index 969c8d4d2008c..30fedf9562f96 100644
--- a/vendor/cloud.google.com/go/auth/httptransport/httptransport.go
+++ b/vendor/cloud.google.com/go/auth/httptransport/httptransport.go
@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+// Package httptransport provides functionality for managing HTTP client
+// connections to Google Cloud services.
package httptransport
import (
diff --git a/vendor/cloud.google.com/go/auth/httptransport/transport.go b/vendor/cloud.google.com/go/auth/httptransport/transport.go
index 07eea474446b3..63498ee792be9 100644
--- a/vendor/cloud.google.com/go/auth/httptransport/transport.go
+++ b/vendor/cloud.google.com/go/auth/httptransport/transport.go
@@ -19,6 +19,7 @@ import (
"crypto/tls"
"net"
"net/http"
+ "os"
"time"
"cloud.google.com/go/auth"
@@ -27,11 +28,12 @@ import (
"cloud.google.com/go/auth/internal/transport"
"cloud.google.com/go/auth/internal/transport/cert"
"go.opencensus.io/plugin/ochttp"
+ "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
"golang.org/x/net/http2"
)
const (
- quotaProjectHeaderKey = "X-Goog-User-Project"
+ quotaProjectHeaderKey = "X-goog-user-project"
)
func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, error) {
@@ -41,6 +43,9 @@ func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, err
headers: headers,
}
var trans http.RoundTripper = ht
+ // Give OpenTelemetry precedence over OpenCensus in case user configuration
+ // causes both to write the same header (`X-Cloud-Trace-Context`).
+ trans = addOpenTelemetryTransport(trans, opts)
trans = addOCTransport(trans, opts)
switch {
case opts.DisableAuthentication:
@@ -76,7 +81,10 @@ func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, err
if headers == nil {
headers = make(map[string][]string, 1)
}
- headers.Set(quotaProjectHeaderKey, qp)
+ // Don't overwrite user specified quota
+ if v := headers.Get(quotaProjectHeaderKey); v == "" {
+ headers.Set(quotaProjectHeaderKey, qp)
+ }
}
creds.TokenProvider = auth.NewCachedTokenProvider(creds.TokenProvider, nil)
trans = &authTransport{
@@ -94,7 +102,11 @@ func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, err
// http.DefaultTransport.
// If TLSCertificate is available, set TLSClientConfig as well.
func defaultBaseTransport(clientCertSource cert.Provider, dialTLSContext func(context.Context, string, string) (net.Conn, error)) http.RoundTripper {
- trans := http.DefaultTransport.(*http.Transport).Clone()
+ defaultTransport, ok := http.DefaultTransport.(*http.Transport)
+ if !ok {
+ defaultTransport = transport.BaseTransport()
+ }
+ trans := defaultTransport.Clone()
trans.MaxIdleConnsPerHost = 100
if clientCertSource != nil {
@@ -155,6 +167,13 @@ func (t *headerTransport) RoundTrip(req *http.Request) (*http.Response, error) {
return rt.RoundTrip(&newReq)
}
+func addOpenTelemetryTransport(trans http.RoundTripper, opts *Options) http.RoundTripper {
+ if opts.DisableTelemetry {
+ return trans
+ }
+ return otelhttp.NewTransport(trans)
+}
+
func addOCTransport(trans http.RoundTripper, opts *Options) http.RoundTripper {
if opts.DisableTelemetry {
return trans
@@ -171,13 +190,23 @@ type authTransport struct {
clientUniverseDomain string
}
-// getClientUniverseDomain returns the universe domain configured for the client.
-// The default value is "googleapis.com".
+// getClientUniverseDomain returns the default service domain for a given Cloud
+// universe, with the following precedence:
+//
+// 1. A non-empty option.WithUniverseDomain or similar client option.
+// 2. A non-empty environment variable GOOGLE_CLOUD_UNIVERSE_DOMAIN.
+// 3. The default value "googleapis.com".
+//
+// This is the universe domain configured for the client, which will be compared
+// to the universe domain that is separately configured for the credentials.
func (t *authTransport) getClientUniverseDomain() string {
- if t.clientUniverseDomain == "" {
- return internal.DefaultUniverseDomain
+ if t.clientUniverseDomain != "" {
+ return t.clientUniverseDomain
+ }
+ if envUD := os.Getenv(internal.UniverseDomainEnvVar); envUD != "" {
+ return envUD
}
- return t.clientUniverseDomain
+ return internal.DefaultUniverseDomain
}
// RoundTrip authorizes and authenticates the request with an
diff --git a/vendor/cloud.google.com/go/auth/internal/compute/compute.go b/vendor/cloud.google.com/go/auth/internal/compute/compute.go
new file mode 100644
index 0000000000000..651bd61fbbc24
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/internal/compute/compute.go
@@ -0,0 +1,66 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package compute
+
+import (
+ "log"
+ "runtime"
+ "strings"
+ "sync"
+)
+
+var (
+ vmOnGCEOnce sync.Once
+ vmOnGCE bool
+)
+
+// OnComputeEngine returns whether the client is running on GCE.
+//
+// This is a copy of the gRPC internal googlecloud.OnGCE() func at:
+// https://github.com/grpc/grpc-go/blob/master/internal/googlecloud/googlecloud.go
+// The functionality is similar to the metadata.OnGCE() func at:
+// https://github.com/xmenxk/google-cloud-go/blob/main/compute/metadata/metadata.go
+//
+// The difference is that OnComputeEngine() does not perform HTTP or DNS check on the metadata server.
+// In particular, OnComputeEngine() will return false on Serverless.
+func OnComputeEngine() bool {
+ vmOnGCEOnce.Do(func() {
+ mf, err := manufacturer()
+ if err != nil {
+ log.Printf("Failed to read manufacturer, vmOnGCE=false: %v", err)
+ return
+ }
+ vmOnGCE = isRunningOnGCE(mf, runtime.GOOS)
+ })
+ return vmOnGCE
+}
+
+// isRunningOnGCE checks whether the local system, without doing a network request, is
+// running on GCP.
+func isRunningOnGCE(manufacturer []byte, goos string) bool {
+ name := string(manufacturer)
+ switch goos {
+ case "linux":
+ name = strings.TrimSpace(name)
+ return name == "Google" || name == "Google Compute Engine"
+ case "windows":
+ name = strings.Replace(name, " ", "", -1)
+ name = strings.Replace(name, "\n", "", -1)
+ name = strings.Replace(name, "\r", "", -1)
+ return name == "Google"
+ default:
+ return false
+ }
+}
diff --git a/vendor/cloud.google.com/go/auth/internal/compute/manufacturer.go b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer.go
new file mode 100644
index 0000000000000..af490bf4f490c
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer.go
@@ -0,0 +1,22 @@
+//go:build !(linux || windows)
+// +build !linux,!windows
+
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package compute
+
+func manufacturer() ([]byte, error) {
+ return nil, nil
+}
diff --git a/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_linux.go b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_linux.go
new file mode 100644
index 0000000000000..d92178df86c27
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_linux.go
@@ -0,0 +1,23 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package compute
+
+import "os"
+
+const linuxProductNameFile = "/sys/class/dmi/id/product_name"
+
+func manufacturer() ([]byte, error) {
+ return os.ReadFile(linuxProductNameFile)
+}
diff --git a/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_windows.go b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_windows.go
new file mode 100644
index 0000000000000..16be9df3064bd
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_windows.go
@@ -0,0 +1,46 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package compute
+
+import (
+ "errors"
+ "os/exec"
+ "regexp"
+ "strings"
+)
+
+const (
+ windowsCheckCommand = "powershell.exe"
+ windowsCheckCommandArgs = "Get-WmiObject -Class Win32_BIOS"
+ powershellOutputFilter = "Manufacturer"
+ windowsManufacturerRegex = ":(.*)"
+)
+
+func manufacturer() ([]byte, error) {
+ cmd := exec.Command(windowsCheckCommand, windowsCheckCommandArgs)
+ out, err := cmd.Output()
+ if err != nil {
+ return nil, err
+ }
+ for _, line := range strings.Split(strings.TrimSuffix(string(out), "\n"), "\n") {
+ if strings.HasPrefix(line, powershellOutputFilter) {
+ re := regexp.MustCompile(windowsManufacturerRegex)
+ name := re.FindString(line)
+ name = strings.TrimLeft(name, ":")
+ return []byte(name), nil
+ }
+ }
+ return nil, errors.New("cannot determine the machine's manufacturer")
+}
diff --git a/vendor/cloud.google.com/go/auth/internal/internal.go b/vendor/cloud.google.com/go/auth/internal/internal.go
index 4308345eda335..66a51f19c73e4 100644
--- a/vendor/cloud.google.com/go/auth/internal/internal.go
+++ b/vendor/cloud.google.com/go/auth/internal/internal.go
@@ -38,8 +38,11 @@ const (
// QuotaProjectEnvVar is the environment variable for setting the quota
// project.
QuotaProjectEnvVar = "GOOGLE_CLOUD_QUOTA_PROJECT"
- projectEnvVar = "GOOGLE_CLOUD_PROJECT"
- maxBodySize = 1 << 20
+ // UniverseDomainEnvVar is the environment variable for setting the default
+ // service domain for a given Cloud universe.
+ UniverseDomainEnvVar = "GOOGLE_CLOUD_UNIVERSE_DOMAIN"
+ projectEnvVar = "GOOGLE_CLOUD_PROJECT"
+ maxBodySize = 1 << 20
// DefaultUniverseDomain is the default value for universe domain.
// Universe domain is the default service domain for a given Cloud universe.
diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cba.go b/vendor/cloud.google.com/go/auth/internal/transport/cba.go
index 26e037c1a374b..f606888f12048 100644
--- a/vendor/cloud.google.com/go/auth/internal/transport/cba.go
+++ b/vendor/cloud.google.com/go/auth/internal/transport/cba.go
@@ -133,7 +133,11 @@ func GetGRPCTransportCredsAndEndpoint(opts *Options) (credentials.TransportCrede
transportCredsForS2A, err = loadMTLSMDSTransportCreds(mtlsMDSRoot, mtlsMDSKey)
if err != nil {
log.Printf("Loading MTLS MDS credentials failed: %v", err)
- return defaultTransportCreds, config.endpoint, nil
+ if config.s2aAddress != "" {
+ s2aAddr = config.s2aAddress
+ } else {
+ return defaultTransportCreds, config.endpoint, nil
+ }
}
} else if config.s2aAddress != "" {
s2aAddr = config.s2aAddress
@@ -177,7 +181,11 @@ func GetHTTPTransportConfig(opts *Options) (cert.Provider, func(context.Context,
transportCredsForS2A, err = loadMTLSMDSTransportCreds(mtlsMDSRoot, mtlsMDSKey)
if err != nil {
log.Printf("Loading MTLS MDS credentials failed: %v", err)
- return config.clientCertSource, nil, nil
+ if config.s2aAddress != "" {
+ s2aAddr = config.s2aAddress
+ } else {
+ return config.clientCertSource, nil, nil
+ }
}
} else if config.s2aAddress != "" {
s2aAddr = config.s2aAddress
diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go
index 3227aba280c8e..738cb21618e79 100644
--- a/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go
+++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go
@@ -62,11 +62,11 @@ func NewSecureConnectProvider(configFilePath string) (Provider, error) {
file, err := os.ReadFile(configFilePath)
if err != nil {
- if errors.Is(err, os.ErrNotExist) {
- // Config file missing means Secure Connect is not supported.
- return nil, errSourceUnavailable
- }
- return nil, err
+ // Config file missing means Secure Connect is not supported.
+ // There are non-os.ErrNotExist errors that may be returned.
+ // (e.g. if the home directory is /dev/null, *nix systems will
+ // return ENOTDIR instead of ENOENT)
+ return nil, errSourceUnavailable
}
var metadata secureConnectMetadata
diff --git a/vendor/cloud.google.com/go/auth/internal/transport/s2a.go b/vendor/cloud.google.com/go/auth/internal/transport/s2a.go
index 4df73edce986e..37894bfcd013b 100644
--- a/vendor/cloud.google.com/go/auth/internal/transport/s2a.go
+++ b/vendor/cloud.google.com/go/auth/internal/transport/s2a.go
@@ -15,6 +15,7 @@
package transport
import (
+ "context"
"encoding/json"
"fmt"
"log"
@@ -84,7 +85,7 @@ func getMetadataMTLSAutoConfig() {
}
var httpGetMetadataMTLSConfig = func() (string, error) {
- return metadata.Get(configEndpointSuffix)
+ return metadata.GetWithContext(context.Background(), configEndpointSuffix)
}
func queryConfig() (*mtlsConfig, error) {
diff --git a/vendor/cloud.google.com/go/auth/internal/transport/transport.go b/vendor/cloud.google.com/go/auth/internal/transport/transport.go
index 718a6b1714586..cc586ec5b1a5d 100644
--- a/vendor/cloud.google.com/go/auth/internal/transport/transport.go
+++ b/vendor/cloud.google.com/go/auth/internal/transport/transport.go
@@ -81,12 +81,14 @@ func ValidateUniverseDomain(clientUniverseDomain, credentialsUniverseDomain stri
// DefaultHTTPClientWithTLS constructs an HTTPClient using the provided tlsConfig, to support mTLS.
func DefaultHTTPClientWithTLS(tlsConfig *tls.Config) *http.Client {
- trans := baseTransport()
+ trans := BaseTransport()
trans.TLSClientConfig = tlsConfig
return &http.Client{Transport: trans}
}
-func baseTransport() *http.Transport {
+// BaseTransport returns a default [http.Transport] which can be used if
+// [http.DefaultTransport] has been overwritten.
+func BaseTransport() *http.Transport {
return &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
diff --git a/vendor/cloud.google.com/go/bigtable/CHANGES.md b/vendor/cloud.google.com/go/bigtable/CHANGES.md
index 05b4291c9fd32..2c6917bbe2831 100644
--- a/vendor/cloud.google.com/go/bigtable/CHANGES.md
+++ b/vendor/cloud.google.com/go/bigtable/CHANGES.md
@@ -1,5 +1,60 @@
# Changes
+## [1.33.0](https://github.com/googleapis/google-cloud-go/compare/bigtable/v1.32.0...bigtable/v1.33.0) (2024-09-23)
+
+
+### Features
+
+* **bigtable/admin:** Add support for Cloud Bigtable Row Affinity in App Profiles ([b9dfce5](https://github.com/googleapis/google-cloud-go/commit/b9dfce5e509d0c795e89c66b7f6a6bb356e3a172))
+
+
+### Bug Fixes
+
+* **bigtable:** Rollback new auth library ([#10906](https://github.com/googleapis/google-cloud-go/issues/10906)) ([8109157](https://github.com/googleapis/google-cloud-go/commit/8109157cb2bfb700fde04361e0fa7c1345608fce))
+
+## [1.32.0](https://github.com/googleapis/google-cloud-go/compare/bigtable/v1.31.0...bigtable/v1.32.0) (2024-09-09)
+
+
+### Features
+
+* **bigtable:** Add "TypeUnspecified" to represent an unspecified type ([#10820](https://github.com/googleapis/google-cloud-go/issues/10820)) ([8d008de](https://github.com/googleapis/google-cloud-go/commit/8d008def4378d33ab66ca0ec346534be87155576))
+* **bigtable:** Add client side metrics to feature flag ([#10678](https://github.com/googleapis/google-cloud-go/issues/10678)) ([02b2d12](https://github.com/googleapis/google-cloud-go/commit/02b2d12d51f774ea9ce6985b3f03006ef3d23e50))
+* **bigtable:** Add update value type test. ([#10771](https://github.com/googleapis/google-cloud-go/issues/10771)) ([210f022](https://github.com/googleapis/google-cloud-go/commit/210f0228e68452c23cbf6bf42862974303f54450))
+* **bigtable:** Wrapping errors on Export ([#10836](https://github.com/googleapis/google-cloud-go/issues/10836)) ([fc6d6a8](https://github.com/googleapis/google-cloud-go/commit/fc6d6a8b6bb90714e92bfb09762cc5a99930a6a8))
+
+
+### Bug Fixes
+
+* **bigtable:** Bump dependencies ([2ddeb15](https://github.com/googleapis/google-cloud-go/commit/2ddeb1544a53188a7592046b98913982f1b0cf04))
+* **bigtable:** Error logging for client side metrics ([#10658](https://github.com/googleapis/google-cloud-go/issues/10658)) ([9a94ff8](https://github.com/googleapis/google-cloud-go/commit/9a94ff87b83f37472aa94b6e0d1cc69bbb83c3bc))
+
+## [1.31.0](https://github.com/googleapis/google-cloud-go/compare/bigtable/v1.30.0...bigtable/v1.31.0) (2024-08-28)
+
+
+### Features
+
+* **bigtable:** Using new auth library ([#10766](https://github.com/googleapis/google-cloud-go/issues/10766)) ([8daf304](https://github.com/googleapis/google-cloud-go/commit/8daf304faf3808690996ad3a45d2890b107a0939))
+
+
+### Performance Improvements
+
+* **bigtable:** Use RecvMsg instead of Recv ([#10674](https://github.com/googleapis/google-cloud-go/issues/10674)) ([7e4fbc5](https://github.com/googleapis/google-cloud-go/commit/7e4fbc5612441c59bfaa1e5b9bbd06e3387b5c02))
+
+## [1.30.0](https://github.com/googleapis/google-cloud-go/compare/bigtable/v1.29.0...bigtable/v1.30.0) (2024-08-26)
+
+
+### Features
+
+* **bigtable:** Add MarshalJSON to allow clients to get a stringified version of the protobuf ([#10679](https://github.com/googleapis/google-cloud-go/issues/10679)) ([663f399](https://github.com/googleapis/google-cloud-go/commit/663f3996ced66c312c8202535574d3ffcb72d283))
+* **bigtable:** Add nil checks to Equal method ([#10758](https://github.com/googleapis/google-cloud-go/issues/10758)) ([f1aad7f](https://github.com/googleapis/google-cloud-go/commit/f1aad7f3a05a959d0dd973e026026391deda7657))
+* **bigtable:** Add UpdateFamily to allow updating a family type ([#10759](https://github.com/googleapis/google-cloud-go/issues/10759)) ([ec0cbb2](https://github.com/googleapis/google-cloud-go/commit/ec0cbb20ba42b7ef03688a06dc0a380e9b27e394))
+* **bigtable:** Update go version for conformance tests ([#10743](https://github.com/googleapis/google-cloud-go/issues/10743)) ([74cf45e](https://github.com/googleapis/google-cloud-go/commit/74cf45efe7dc6f74cadac3f015a705f8dbf69622))
+
+
+### Bug Fixes
+
+* **bigtable:** Use new auth library ([#10670](https://github.com/googleapis/google-cloud-go/issues/10670)) ([fab520d](https://github.com/googleapis/google-cloud-go/commit/fab520d226340bbf1aedc001dcb7384651e075a0))
+
## [1.29.0](https://github.com/googleapis/google-cloud-go/compare/bigtable/v1.28.0...bigtable/v1.29.0) (2024-08-09)
diff --git a/vendor/cloud.google.com/go/bigtable/admin.go b/vendor/cloud.google.com/go/bigtable/admin.go
index 8dd9f929056fa..69f57f0efa7b8 100644
--- a/vendor/cloud.google.com/go/bigtable/admin.go
+++ b/vendor/cloud.google.com/go/bigtable/admin.go
@@ -667,41 +667,82 @@ func (ac *AdminClient) TableInfo(ctx context.Context, table string) (*TableInfo,
return ti, nil
}
-type gcPolicySettings struct {
+type updateFamilyOption struct {
ignoreWarnings bool
}
-// GCPolicyOption is the interface to change GC policy settings
+// GCPolicyOption is deprecated, kept for backwards compatibility, use UpdateFamilyOption in new code
type GCPolicyOption interface {
- apply(s *gcPolicySettings)
+ apply(s *updateFamilyOption)
}
+// UpdateFamilyOption is the interface to update family settings
+type UpdateFamilyOption GCPolicyOption
+
type ignoreWarnings bool
-func (w ignoreWarnings) apply(s *gcPolicySettings) {
+func (w ignoreWarnings) apply(s *updateFamilyOption) {
s.ignoreWarnings = bool(w)
}
-// IgnoreWarnings returns a gcPolicyOption that ignores safety checks when modifying the column families
+// IgnoreWarnings returns a updateFamilyOption that ignores safety checks when modifying the column families
func IgnoreWarnings() GCPolicyOption {
return ignoreWarnings(true)
}
-func (ac *AdminClient) setGCPolicy(ctx context.Context, table, family string, policy GCPolicy, opts ...GCPolicyOption) error {
+// SetGCPolicy specifies which cells in a column family should be garbage collected.
+// GC executes opportunistically in the background; table reads may return data
+// matching the GC policy.
+func (ac *AdminClient) SetGCPolicy(ctx context.Context, table, family string, policy GCPolicy) error {
+ return ac.UpdateFamily(ctx, table, family, Family{GCPolicy: policy})
+}
+
+// SetGCPolicyWithOptions is similar to SetGCPolicy but allows passing options
+func (ac *AdminClient) SetGCPolicyWithOptions(ctx context.Context, table, family string, policy GCPolicy, opts ...GCPolicyOption) error {
+ familyOpts := []UpdateFamilyOption{}
+ for _, opt := range opts {
+ if opt != nil {
+ familyOpts = append(familyOpts, opt.(UpdateFamilyOption))
+ }
+ }
+ return ac.UpdateFamily(ctx, table, family, Family{GCPolicy: policy}, familyOpts...)
+}
+
+// UpdateFamily updates column families' garbage colleciton policies and value type.
+func (ac *AdminClient) UpdateFamily(ctx context.Context, table, familyName string, family Family, opts ...UpdateFamilyOption) error {
ctx = mergeOutgoingMetadata(ctx, ac.md)
prefix := ac.instancePrefix()
- s := gcPolicySettings{}
+ s := updateFamilyOption{}
for _, opt := range opts {
if opt != nil {
opt.apply(&s)
}
}
+
+ cf := &btapb.ColumnFamily{}
+ mask := &field_mask.FieldMask{}
+ if family.GCPolicy != nil {
+ cf.GcRule = family.GCPolicy.proto()
+ mask.Paths = append(mask.Paths, "gc_rule")
+
+ }
+ if family.ValueType != nil {
+ cf.ValueType = family.ValueType.proto()
+ mask.Paths = append(mask.Paths, "value_type")
+ }
+
+ // No update
+ if len(mask.Paths) == 0 {
+ return nil
+ }
+
req := &btapb.ModifyColumnFamiliesRequest{
Name: prefix + "/tables/" + table,
Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{
- Id: family,
- Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Update{Update: &btapb.ColumnFamily{GcRule: policy.proto()}},
+ Id: familyName,
+ Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Update{Update: cf},
+ UpdateMask: mask,
}},
IgnoreWarnings: s.ignoreWarnings,
}
@@ -709,18 +750,6 @@ func (ac *AdminClient) setGCPolicy(ctx context.Context, table, family string, po
return err
}
-// SetGCPolicy specifies which cells in a column family should be garbage collected.
-// GC executes opportunistically in the background; table reads may return data
-// matching the GC policy.
-func (ac *AdminClient) SetGCPolicy(ctx context.Context, table, family string, policy GCPolicy) error {
- return ac.SetGCPolicyWithOptions(ctx, table, family, policy)
-}
-
-// SetGCPolicyWithOptions is similar to SetGCPolicy but allows passing options
-func (ac *AdminClient) SetGCPolicyWithOptions(ctx context.Context, table, family string, policy GCPolicy, opts ...GCPolicyOption) error {
- return ac.setGCPolicy(ctx, table, family, policy, opts...)
-}
-
// DropRowRange permanently deletes a row range from the specified table.
func (ac *AdminClient) DropRowRange(ctx context.Context, table, rowKeyPrefix string) error {
ctx = mergeOutgoingMetadata(ctx, ac.md)
diff --git a/vendor/cloud.google.com/go/bigtable/admin/apiv2/adminpb/instance.pb.go b/vendor/cloud.google.com/go/bigtable/admin/apiv2/adminpb/instance.pb.go
index 5fec7f86af29e..0fc21d396ded7 100644
--- a/vendor/cloud.google.com/go/bigtable/admin/apiv2/adminpb/instance.pb.go
+++ b/vendor/cloud.google.com/go/bigtable/admin/apiv2/adminpb/instance.pb.go
@@ -1186,6 +1186,19 @@ type AppProfile_MultiClusterRoutingUseAny struct {
// The set of clusters to route to. The order is ignored; clusters will be
// tried in order of distance. If left empty, all clusters are eligible.
ClusterIds []string `protobuf:"bytes,1,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"`
+ // Possible algorithms for routing affinity. If enabled, Bigtable will
+ // route between equidistant clusters in a deterministic order rather than
+ // choosing randomly.
+ //
+ // This mechanism gives read-your-writes consistency for *most* requests
+ // under *most* circumstances, without sacrificing availability. Consistency
+ // is *not* guaranteed, as requests might still fail over between clusters
+ // in the event of errors or latency.
+ //
+ // Types that are assignable to Affinity:
+ //
+ // *AppProfile_MultiClusterRoutingUseAny_RowAffinity_
+ Affinity isAppProfile_MultiClusterRoutingUseAny_Affinity `protobuf_oneof:"affinity"`
}
func (x *AppProfile_MultiClusterRoutingUseAny) Reset() {
@@ -1227,6 +1240,33 @@ func (x *AppProfile_MultiClusterRoutingUseAny) GetClusterIds() []string {
return nil
}
+func (m *AppProfile_MultiClusterRoutingUseAny) GetAffinity() isAppProfile_MultiClusterRoutingUseAny_Affinity {
+ if m != nil {
+ return m.Affinity
+ }
+ return nil
+}
+
+func (x *AppProfile_MultiClusterRoutingUseAny) GetRowAffinity() *AppProfile_MultiClusterRoutingUseAny_RowAffinity {
+ if x, ok := x.GetAffinity().(*AppProfile_MultiClusterRoutingUseAny_RowAffinity_); ok {
+ return x.RowAffinity
+ }
+ return nil
+}
+
+type isAppProfile_MultiClusterRoutingUseAny_Affinity interface {
+ isAppProfile_MultiClusterRoutingUseAny_Affinity()
+}
+
+type AppProfile_MultiClusterRoutingUseAny_RowAffinity_ struct {
+ // Row affinity sticky routing based on the row key of the request.
+ // Requests that span multiple rows are routed non-deterministically.
+ RowAffinity *AppProfile_MultiClusterRoutingUseAny_RowAffinity `protobuf:"bytes,3,opt,name=row_affinity,json=rowAffinity,proto3,oneof"`
+}
+
+func (*AppProfile_MultiClusterRoutingUseAny_RowAffinity_) isAppProfile_MultiClusterRoutingUseAny_Affinity() {
+}
+
// Unconditionally routes all read/write requests to a specific cluster.
// This option preserves read-your-writes consistency but does not improve
// availability.
@@ -1399,6 +1439,53 @@ func (x *AppProfile_DataBoostIsolationReadOnly) GetComputeBillingOwner() AppProf
return AppProfile_DataBoostIsolationReadOnly_COMPUTE_BILLING_OWNER_UNSPECIFIED
}
+// If enabled, Bigtable will route the request based on the row key of the
+// request, rather than randomly. Instead, each row key will be assigned
+// to a cluster, and will stick to that cluster. If clusters are added or
+// removed, then this may affect which row keys stick to which clusters.
+// To avoid this, users can use a cluster group to specify which clusters
+// are to be used. In this case, new clusters that are not a part of the
+// cluster group will not be routed to, and routing will be unaffected by
+// the new cluster. Moreover, clusters specified in the cluster group cannot
+// be deleted unless removed from the cluster group.
+type AppProfile_MultiClusterRoutingUseAny_RowAffinity struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *AppProfile_MultiClusterRoutingUseAny_RowAffinity) Reset() {
+ *x = AppProfile_MultiClusterRoutingUseAny_RowAffinity{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AppProfile_MultiClusterRoutingUseAny_RowAffinity) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AppProfile_MultiClusterRoutingUseAny_RowAffinity) ProtoMessage() {}
+
+func (x *AppProfile_MultiClusterRoutingUseAny_RowAffinity) ProtoReflect() protoreflect.Message {
+ mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[14]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AppProfile_MultiClusterRoutingUseAny_RowAffinity.ProtoReflect.Descriptor instead.
+func (*AppProfile_MultiClusterRoutingUseAny_RowAffinity) Descriptor() ([]byte, []int) {
+ return file_google_bigtable_admin_v2_instance_proto_rawDescGZIP(), []int{4, 0, 0}
+}
+
var File_google_bigtable_admin_v2_instance_proto protoreflect.FileDescriptor
var file_google_bigtable_admin_v2_instance_proto_rawDesc = []byte{
@@ -1543,8 +1630,8 @@ var file_google_bigtable_admin_v2_instance_proto_rawDesc = []byte{
0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73,
0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65,
0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x6c, 0x75, 0x73,
- 0x74, 0x65, 0x72, 0x7d, 0x42, 0x08, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xa8,
- 0x0b, 0x0a, 0x0a, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x12, 0x0a,
+ 0x74, 0x65, 0x72, 0x7d, 0x42, 0x08, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xb5,
+ 0x0c, 0x0a, 0x0a, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x12, 0x0a,
0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d,
0x65, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
@@ -1585,107 +1672,116 @@ var file_google_bigtable_admin_v2_instance_proto_rawDesc = []byte{
0x73, 0x74, 0x49, 0x73, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x61, 0x64, 0x4f,
0x6e, 0x6c, 0x79, 0x48, 0x01, 0x52, 0x1a, 0x64, 0x61, 0x74, 0x61, 0x42, 0x6f, 0x6f, 0x73, 0x74,
0x49, 0x73, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c,
- 0x79, 0x1a, 0x3c, 0x0a, 0x19, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65,
- 0x72, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x55, 0x73, 0x65, 0x41, 0x6e, 0x79, 0x12, 0x1f,
- 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20,
- 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x1a,
- 0x73, 0x0a, 0x14, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
- 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74,
- 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75,
- 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x3c, 0x0a, 0x1a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f,
- 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x77, 0x72,
- 0x69, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x61, 0x6c, 0x6c, 0x6f,
- 0x77, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x57, 0x72,
- 0x69, 0x74, 0x65, 0x73, 0x1a, 0x5e, 0x0a, 0x11, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64,
- 0x49, 0x73, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x49, 0x0a, 0x08, 0x70, 0x72, 0x69,
- 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64,
- 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c,
- 0x65, 0x2e, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x52, 0x08, 0x70, 0x72, 0x69, 0x6f,
- 0x72, 0x69, 0x74, 0x79, 0x1a, 0x92, 0x02, 0x0a, 0x1a, 0x44, 0x61, 0x74, 0x61, 0x42, 0x6f, 0x6f,
- 0x73, 0x74, 0x49, 0x73, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x61, 0x64, 0x4f,
- 0x6e, 0x6c, 0x79, 0x12, 0x8c, 0x01, 0x0a, 0x15, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x5f,
- 0x62, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x0e, 0x32, 0x53, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67,
- 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x41,
- 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x42, 0x6f,
- 0x6f, 0x73, 0x74, 0x49, 0x73, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x61, 0x64,
- 0x4f, 0x6e, 0x6c, 0x79, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x42, 0x69, 0x6c, 0x6c,
- 0x69, 0x6e, 0x67, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x48, 0x00, 0x52, 0x13, 0x63, 0x6f, 0x6d, 0x70,
- 0x75, 0x74, 0x65, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x88,
- 0x01, 0x01, 0x22, 0x4b, 0x0a, 0x13, 0x43, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x42, 0x69, 0x6c,
- 0x6c, 0x69, 0x6e, 0x67, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x25, 0x0a, 0x21, 0x43, 0x4f, 0x4d,
- 0x50, 0x55, 0x54, 0x45, 0x5f, 0x42, 0x49, 0x4c, 0x4c, 0x49, 0x4e, 0x47, 0x5f, 0x4f, 0x57, 0x4e,
- 0x45, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00,
- 0x12, 0x0d, 0x0a, 0x09, 0x48, 0x4f, 0x53, 0x54, 0x5f, 0x50, 0x41, 0x59, 0x53, 0x10, 0x01, 0x42,
- 0x18, 0x0a, 0x16, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x5f, 0x62, 0x69, 0x6c, 0x6c,
- 0x69, 0x6e, 0x67, 0x5f, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x22, 0x5e, 0x0a, 0x08, 0x50, 0x72, 0x69,
- 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x18, 0x0a, 0x14, 0x50, 0x52, 0x49, 0x4f, 0x52, 0x49, 0x54,
- 0x59, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12,
- 0x10, 0x0a, 0x0c, 0x50, 0x52, 0x49, 0x4f, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4c, 0x4f, 0x57, 0x10,
- 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x50, 0x52, 0x49, 0x4f, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4d, 0x45,
- 0x44, 0x49, 0x55, 0x4d, 0x10, 0x02, 0x12, 0x11, 0x0a, 0x0d, 0x50, 0x52, 0x49, 0x4f, 0x52, 0x49,
- 0x54, 0x59, 0x5f, 0x48, 0x49, 0x47, 0x48, 0x10, 0x03, 0x3a, 0x6f, 0xea, 0x41, 0x6c, 0x0a, 0x27,
- 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x70, 0x70,
- 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x41, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
- 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74,
- 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d,
- 0x2f, 0x61, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x2f, 0x7b, 0x61, 0x70,
- 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x7d, 0x42, 0x10, 0x0a, 0x0e, 0x72, 0x6f,
- 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x0b, 0x0a, 0x09,
- 0x69, 0x73, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xd4, 0x03, 0x0a, 0x09, 0x48, 0x6f,
- 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x46, 0x0a, 0x0a, 0x74,
- 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42,
- 0x27, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64,
- 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63,
- 0x6f, 0x6d, 0x2f, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e,
- 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d,
- 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74,
- 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54,
- 0x69, 0x6d, 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18,
- 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
- 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12,
- 0x1b, 0x0a, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x08, 0x73, 0x74, 0x61, 0x72, 0x74, 0x4b, 0x65, 0x79, 0x12, 0x17, 0x0a, 0x07,
- 0x65, 0x6e, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65,
- 0x6e, 0x64, 0x4b, 0x65, 0x79, 0x12, 0x38, 0x0a, 0x16, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x63, 0x70,
- 0x75, 0x5f, 0x75, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18,
- 0x07, 0x20, 0x01, 0x28, 0x02, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x13, 0x6e, 0x6f, 0x64, 0x65,
- 0x43, 0x70, 0x75, 0x55, 0x73, 0x61, 0x67, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x3a,
- 0x7f, 0xea, 0x41, 0x7c, 0x0a, 0x26, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64,
- 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63,
- 0x6f, 0x6d, 0x2f, 0x48, 0x6f, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x52, 0x70, 0x72,
- 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d,
- 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74,
- 0x61, 0x6e, 0x63, 0x65, 0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x7b,
- 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x7d, 0x2f, 0x68, 0x6f, 0x74, 0x54, 0x61, 0x62, 0x6c,
- 0x65, 0x74, 0x73, 0x2f, 0x7b, 0x68, 0x6f, 0x74, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x7d,
- 0x42, 0xcb, 0x02, 0xea, 0x41, 0x78, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73,
+ 0x79, 0x1a, 0xc8, 0x01, 0x0a, 0x19, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x43, 0x6c, 0x75, 0x73, 0x74,
+ 0x65, 0x72, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x55, 0x73, 0x65, 0x41, 0x6e, 0x79, 0x12,
+ 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01,
+ 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73,
+ 0x12, 0x6f, 0x0a, 0x0c, 0x72, 0x6f, 0x77, 0x5f, 0x61, 0x66, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x79,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76,
+ 0x32, 0x2e, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x2e, 0x4d, 0x75, 0x6c,
+ 0x74, 0x69, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67,
+ 0x55, 0x73, 0x65, 0x41, 0x6e, 0x79, 0x2e, 0x52, 0x6f, 0x77, 0x41, 0x66, 0x66, 0x69, 0x6e, 0x69,
+ 0x74, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x72, 0x6f, 0x77, 0x41, 0x66, 0x66, 0x69, 0x6e, 0x69, 0x74,
+ 0x79, 0x1a, 0x0d, 0x0a, 0x0b, 0x52, 0x6f, 0x77, 0x41, 0x66, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x79,
+ 0x42, 0x0a, 0x0a, 0x08, 0x61, 0x66, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x79, 0x1a, 0x73, 0x0a, 0x14,
+ 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x6f, 0x75,
+ 0x74, 0x69, 0x6e, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f,
+ 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65,
+ 0x72, 0x49, 0x64, 0x12, 0x3c, 0x0a, 0x1a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x72, 0x61,
+ 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65,
+ 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x54, 0x72,
+ 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74, 0x65,
+ 0x73, 0x1a, 0x5e, 0x0a, 0x11, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x49, 0x73, 0x6f,
+ 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x49, 0x0a, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69,
+ 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e,
+ 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x2e, 0x50,
+ 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x52, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74,
+ 0x79, 0x1a, 0x92, 0x02, 0x0a, 0x1a, 0x44, 0x61, 0x74, 0x61, 0x42, 0x6f, 0x6f, 0x73, 0x74, 0x49,
+ 0x73, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79,
+ 0x12, 0x8c, 0x01, 0x0a, 0x15, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x5f, 0x62, 0x69, 0x6c,
+ 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e,
+ 0x32, 0x53, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62,
+ 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x70, 0x70, 0x50,
+ 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x42, 0x6f, 0x6f, 0x73, 0x74,
+ 0x49, 0x73, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c,
+ 0x79, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67,
+ 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x48, 0x00, 0x52, 0x13, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65,
+ 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x88, 0x01, 0x01, 0x22,
+ 0x4b, 0x0a, 0x13, 0x43, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e,
+ 0x67, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x25, 0x0a, 0x21, 0x43, 0x4f, 0x4d, 0x50, 0x55, 0x54,
+ 0x45, 0x5f, 0x42, 0x49, 0x4c, 0x4c, 0x49, 0x4e, 0x47, 0x5f, 0x4f, 0x57, 0x4e, 0x45, 0x52, 0x5f,
+ 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0d, 0x0a,
+ 0x09, 0x48, 0x4f, 0x53, 0x54, 0x5f, 0x50, 0x41, 0x59, 0x53, 0x10, 0x01, 0x42, 0x18, 0x0a, 0x16,
+ 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x5f, 0x62, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67,
+ 0x5f, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x22, 0x5e, 0x0a, 0x08, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69,
+ 0x74, 0x79, 0x12, 0x18, 0x0a, 0x14, 0x50, 0x52, 0x49, 0x4f, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x55,
+ 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c,
+ 0x50, 0x52, 0x49, 0x4f, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4c, 0x4f, 0x57, 0x10, 0x01, 0x12, 0x13,
+ 0x0a, 0x0f, 0x50, 0x52, 0x49, 0x4f, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4d, 0x45, 0x44, 0x49, 0x55,
+ 0x4d, 0x10, 0x02, 0x12, 0x11, 0x0a, 0x0d, 0x50, 0x52, 0x49, 0x4f, 0x52, 0x49, 0x54, 0x59, 0x5f,
+ 0x48, 0x49, 0x47, 0x48, 0x10, 0x03, 0x3a, 0x6f, 0xea, 0x41, 0x6c, 0x0a, 0x27, 0x62, 0x69, 0x67,
+ 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f,
+ 0x66, 0x69, 0x6c, 0x65, 0x12, 0x41, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b,
+ 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
+ 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d, 0x2f, 0x61, 0x70,
+ 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x2f, 0x7b, 0x61, 0x70, 0x70, 0x5f, 0x70,
+ 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x7d, 0x42, 0x10, 0x0a, 0x0e, 0x72, 0x6f, 0x75, 0x74, 0x69,
+ 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x0b, 0x0a, 0x09, 0x69, 0x73, 0x6f,
+ 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xd4, 0x03, 0x0a, 0x09, 0x48, 0x6f, 0x74, 0x54, 0x61,
+ 0x62, 0x6c, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x46, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c,
+ 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, 0xfa, 0x41,
+ 0x24, 0x0a, 0x22, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e,
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
- 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x53, 0x70, 0x72, 0x6f, 0x6a, 0x65,
- 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f,
- 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79,
- 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79,
- 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x0a, 0x1c,
- 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61,
- 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x42, 0x0d, 0x49, 0x6e,
- 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x38, 0x63,
- 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
- 0x67, 0x6f, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2f, 0x61, 0x64, 0x6d, 0x69,
- 0x6e, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x70, 0x62, 0x3b,
- 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x70, 0x62, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e,
- 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65,
- 0x5c, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x5c, 0x56, 0x32, 0xea, 0x02, 0x22, 0x47, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x42, 0x69, 0x67, 0x74, 0x61,
- 0x62, 0x6c, 0x65, 0x3a, 0x3a, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x06,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65,
+ 0x12, 0x3e, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70,
+ 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65,
+ 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03,
+ 0xe0, 0x41, 0x03, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09,
+ 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x08, 0x73, 0x74, 0x61, 0x72, 0x74, 0x4b, 0x65, 0x79, 0x12, 0x17, 0x0a, 0x07, 0x65, 0x6e, 0x64,
+ 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x64, 0x4b,
+ 0x65, 0x79, 0x12, 0x38, 0x0a, 0x16, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x63, 0x70, 0x75, 0x5f, 0x75,
+ 0x73, 0x61, 0x67, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01,
+ 0x28, 0x02, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x13, 0x6e, 0x6f, 0x64, 0x65, 0x43, 0x70, 0x75,
+ 0x55, 0x73, 0x61, 0x67, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x3a, 0x7f, 0xea, 0x41,
+ 0x7c, 0x0a, 0x26, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
+ 0x48, 0x6f, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x52, 0x70, 0x72, 0x6f, 0x6a, 0x65,
+ 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e,
+ 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
+ 0x65, 0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x6c, 0x75,
+ 0x73, 0x74, 0x65, 0x72, 0x7d, 0x2f, 0x68, 0x6f, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73,
+ 0x2f, 0x7b, 0x68, 0x6f, 0x74, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x7d, 0x42, 0xcb, 0x02,
+ 0xea, 0x41, 0x78, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79,
+ 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x53, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73,
+ 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f,
+ 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69,
+ 0x6e, 0x67, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b,
+ 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65,
+ 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x42, 0x0d, 0x49, 0x6e, 0x73, 0x74, 0x61,
+ 0x6e, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x38, 0x63, 0x6c, 0x6f, 0x75,
+ 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f,
+ 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x61,
+ 0x70, 0x69, 0x76, 0x32, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x70, 0x62, 0x3b, 0x61, 0x64, 0x6d,
+ 0x69, 0x6e, 0x70, 0x62, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c,
+ 0x6f, 0x75, 0x64, 0x2e, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x41, 0x64, 0x6d,
+ 0x69, 0x6e, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43,
+ 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, 0x41, 0x64,
+ 0x6d, 0x69, 0x6e, 0x5c, 0x56, 0x32, 0xea, 0x02, 0x22, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a,
+ 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65,
+ 0x3a, 0x3a, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x33,
}
var (
@@ -1701,56 +1797,58 @@ func file_google_bigtable_admin_v2_instance_proto_rawDescGZIP() []byte {
}
var file_google_bigtable_admin_v2_instance_proto_enumTypes = make([]protoimpl.EnumInfo, 5)
-var file_google_bigtable_admin_v2_instance_proto_msgTypes = make([]protoimpl.MessageInfo, 14)
+var file_google_bigtable_admin_v2_instance_proto_msgTypes = make([]protoimpl.MessageInfo, 15)
var file_google_bigtable_admin_v2_instance_proto_goTypes = []any{
(Instance_State)(0), // 0: google.bigtable.admin.v2.Instance.State
(Instance_Type)(0), // 1: google.bigtable.admin.v2.Instance.Type
(Cluster_State)(0), // 2: google.bigtable.admin.v2.Cluster.State
(AppProfile_Priority)(0), // 3: google.bigtable.admin.v2.AppProfile.Priority
(AppProfile_DataBoostIsolationReadOnly_ComputeBillingOwner)(0), // 4: google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner
- (*Instance)(nil), // 5: google.bigtable.admin.v2.Instance
- (*AutoscalingTargets)(nil), // 6: google.bigtable.admin.v2.AutoscalingTargets
- (*AutoscalingLimits)(nil), // 7: google.bigtable.admin.v2.AutoscalingLimits
- (*Cluster)(nil), // 8: google.bigtable.admin.v2.Cluster
- (*AppProfile)(nil), // 9: google.bigtable.admin.v2.AppProfile
- (*HotTablet)(nil), // 10: google.bigtable.admin.v2.HotTablet
- nil, // 11: google.bigtable.admin.v2.Instance.LabelsEntry
- (*Cluster_ClusterAutoscalingConfig)(nil), // 12: google.bigtable.admin.v2.Cluster.ClusterAutoscalingConfig
- (*Cluster_ClusterConfig)(nil), // 13: google.bigtable.admin.v2.Cluster.ClusterConfig
- (*Cluster_EncryptionConfig)(nil), // 14: google.bigtable.admin.v2.Cluster.EncryptionConfig
- (*AppProfile_MultiClusterRoutingUseAny)(nil), // 15: google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny
- (*AppProfile_SingleClusterRouting)(nil), // 16: google.bigtable.admin.v2.AppProfile.SingleClusterRouting
- (*AppProfile_StandardIsolation)(nil), // 17: google.bigtable.admin.v2.AppProfile.StandardIsolation
- (*AppProfile_DataBoostIsolationReadOnly)(nil), // 18: google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly
- (*timestamppb.Timestamp)(nil), // 19: google.protobuf.Timestamp
- (StorageType)(0), // 20: google.bigtable.admin.v2.StorageType
+ (*Instance)(nil), // 5: google.bigtable.admin.v2.Instance
+ (*AutoscalingTargets)(nil), // 6: google.bigtable.admin.v2.AutoscalingTargets
+ (*AutoscalingLimits)(nil), // 7: google.bigtable.admin.v2.AutoscalingLimits
+ (*Cluster)(nil), // 8: google.bigtable.admin.v2.Cluster
+ (*AppProfile)(nil), // 9: google.bigtable.admin.v2.AppProfile
+ (*HotTablet)(nil), // 10: google.bigtable.admin.v2.HotTablet
+ nil, // 11: google.bigtable.admin.v2.Instance.LabelsEntry
+ (*Cluster_ClusterAutoscalingConfig)(nil), // 12: google.bigtable.admin.v2.Cluster.ClusterAutoscalingConfig
+ (*Cluster_ClusterConfig)(nil), // 13: google.bigtable.admin.v2.Cluster.ClusterConfig
+ (*Cluster_EncryptionConfig)(nil), // 14: google.bigtable.admin.v2.Cluster.EncryptionConfig
+ (*AppProfile_MultiClusterRoutingUseAny)(nil), // 15: google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny
+ (*AppProfile_SingleClusterRouting)(nil), // 16: google.bigtable.admin.v2.AppProfile.SingleClusterRouting
+ (*AppProfile_StandardIsolation)(nil), // 17: google.bigtable.admin.v2.AppProfile.StandardIsolation
+ (*AppProfile_DataBoostIsolationReadOnly)(nil), // 18: google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly
+ (*AppProfile_MultiClusterRoutingUseAny_RowAffinity)(nil), // 19: google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny.RowAffinity
+ (*timestamppb.Timestamp)(nil), // 20: google.protobuf.Timestamp
+ (StorageType)(0), // 21: google.bigtable.admin.v2.StorageType
}
var file_google_bigtable_admin_v2_instance_proto_depIdxs = []int32{
0, // 0: google.bigtable.admin.v2.Instance.state:type_name -> google.bigtable.admin.v2.Instance.State
1, // 1: google.bigtable.admin.v2.Instance.type:type_name -> google.bigtable.admin.v2.Instance.Type
11, // 2: google.bigtable.admin.v2.Instance.labels:type_name -> google.bigtable.admin.v2.Instance.LabelsEntry
- 19, // 3: google.bigtable.admin.v2.Instance.create_time:type_name -> google.protobuf.Timestamp
+ 20, // 3: google.bigtable.admin.v2.Instance.create_time:type_name -> google.protobuf.Timestamp
2, // 4: google.bigtable.admin.v2.Cluster.state:type_name -> google.bigtable.admin.v2.Cluster.State
13, // 5: google.bigtable.admin.v2.Cluster.cluster_config:type_name -> google.bigtable.admin.v2.Cluster.ClusterConfig
- 20, // 6: google.bigtable.admin.v2.Cluster.default_storage_type:type_name -> google.bigtable.admin.v2.StorageType
+ 21, // 6: google.bigtable.admin.v2.Cluster.default_storage_type:type_name -> google.bigtable.admin.v2.StorageType
14, // 7: google.bigtable.admin.v2.Cluster.encryption_config:type_name -> google.bigtable.admin.v2.Cluster.EncryptionConfig
15, // 8: google.bigtable.admin.v2.AppProfile.multi_cluster_routing_use_any:type_name -> google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny
16, // 9: google.bigtable.admin.v2.AppProfile.single_cluster_routing:type_name -> google.bigtable.admin.v2.AppProfile.SingleClusterRouting
3, // 10: google.bigtable.admin.v2.AppProfile.priority:type_name -> google.bigtable.admin.v2.AppProfile.Priority
17, // 11: google.bigtable.admin.v2.AppProfile.standard_isolation:type_name -> google.bigtable.admin.v2.AppProfile.StandardIsolation
18, // 12: google.bigtable.admin.v2.AppProfile.data_boost_isolation_read_only:type_name -> google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly
- 19, // 13: google.bigtable.admin.v2.HotTablet.start_time:type_name -> google.protobuf.Timestamp
- 19, // 14: google.bigtable.admin.v2.HotTablet.end_time:type_name -> google.protobuf.Timestamp
+ 20, // 13: google.bigtable.admin.v2.HotTablet.start_time:type_name -> google.protobuf.Timestamp
+ 20, // 14: google.bigtable.admin.v2.HotTablet.end_time:type_name -> google.protobuf.Timestamp
7, // 15: google.bigtable.admin.v2.Cluster.ClusterAutoscalingConfig.autoscaling_limits:type_name -> google.bigtable.admin.v2.AutoscalingLimits
6, // 16: google.bigtable.admin.v2.Cluster.ClusterAutoscalingConfig.autoscaling_targets:type_name -> google.bigtable.admin.v2.AutoscalingTargets
12, // 17: google.bigtable.admin.v2.Cluster.ClusterConfig.cluster_autoscaling_config:type_name -> google.bigtable.admin.v2.Cluster.ClusterAutoscalingConfig
- 3, // 18: google.bigtable.admin.v2.AppProfile.StandardIsolation.priority:type_name -> google.bigtable.admin.v2.AppProfile.Priority
- 4, // 19: google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.compute_billing_owner:type_name -> google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner
- 20, // [20:20] is the sub-list for method output_type
- 20, // [20:20] is the sub-list for method input_type
- 20, // [20:20] is the sub-list for extension type_name
- 20, // [20:20] is the sub-list for extension extendee
- 0, // [0:20] is the sub-list for field type_name
+ 19, // 18: google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny.row_affinity:type_name -> google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny.RowAffinity
+ 3, // 19: google.bigtable.admin.v2.AppProfile.StandardIsolation.priority:type_name -> google.bigtable.admin.v2.AppProfile.Priority
+ 4, // 20: google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.compute_billing_owner:type_name -> google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner
+ 21, // [21:21] is the sub-list for method output_type
+ 21, // [21:21] is the sub-list for method input_type
+ 21, // [21:21] is the sub-list for extension type_name
+ 21, // [21:21] is the sub-list for extension extendee
+ 0, // [0:21] is the sub-list for field type_name
}
func init() { file_google_bigtable_admin_v2_instance_proto_init() }
@@ -1916,6 +2014,18 @@ func file_google_bigtable_admin_v2_instance_proto_init() {
return nil
}
}
+ file_google_bigtable_admin_v2_instance_proto_msgTypes[14].Exporter = func(v any, i int) any {
+ switch v := v.(*AppProfile_MultiClusterRoutingUseAny_RowAffinity); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
}
file_google_bigtable_admin_v2_instance_proto_msgTypes[0].OneofWrappers = []any{}
file_google_bigtable_admin_v2_instance_proto_msgTypes[3].OneofWrappers = []any{
@@ -1928,6 +2038,9 @@ func file_google_bigtable_admin_v2_instance_proto_init() {
(*AppProfile_StandardIsolation_)(nil),
(*AppProfile_DataBoostIsolationReadOnly_)(nil),
}
+ file_google_bigtable_admin_v2_instance_proto_msgTypes[10].OneofWrappers = []any{
+ (*AppProfile_MultiClusterRoutingUseAny_RowAffinity_)(nil),
+ }
file_google_bigtable_admin_v2_instance_proto_msgTypes[13].OneofWrappers = []any{}
type x struct{}
out := protoimpl.TypeBuilder{
@@ -1935,7 +2048,7 @@ func file_google_bigtable_admin_v2_instance_proto_init() {
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_google_bigtable_admin_v2_instance_proto_rawDesc,
NumEnums: 5,
- NumMessages: 14,
+ NumMessages: 15,
NumExtensions: 0,
NumServices: 0,
},
diff --git a/vendor/cloud.google.com/go/bigtable/bigtable.go b/vendor/cloud.google.com/go/bigtable/bigtable.go
index e12035db4e7a8..b08785658714e 100644
--- a/vendor/cloud.google.com/go/bigtable/bigtable.go
+++ b/vendor/cloud.google.com/go/bigtable/bigtable.go
@@ -18,6 +18,7 @@ package bigtable // import "cloud.google.com/go/bigtable"
import (
"context"
+ "encoding/base64"
"errors"
"fmt"
"io"
@@ -49,6 +50,7 @@ import (
const prodAddr = "bigtable.googleapis.com:443"
const mtlsProdAddr = "bigtable.mtls.googleapis.com:443"
+const featureFlagsHeaderKey = "bigtable-features"
// Client is a client for reading and writing data to tables in an instance.
//
@@ -122,7 +124,7 @@ func NewClientWithConfig(ctx context.Context, project, instance string, config C
}
// Create a OpenTelemetry metrics configuration
- metricsTracerFactory, err := newBuiltinMetricsTracerFactory(ctx, project, instance, config.AppProfile, metricsProvider)
+ metricsTracerFactory, err := newBuiltinMetricsTracerFactory(ctx, project, instance, config.AppProfile, metricsProvider, opts...)
if err != nil {
return nil, err
}
@@ -266,6 +268,25 @@ type Table struct {
authorizedView string
}
+// newFeatureFlags creates the feature flags `bigtable-features` header
+// to be sent on each request. This includes all features supported and
+// and enabled on the client
+func (c *Client) newFeatureFlags() metadata.MD {
+ ff := btpb.FeatureFlags{
+ ReverseScans: true,
+ LastScannedRowResponses: true,
+ ClientSideMetricsEnabled: c.metricsTracerFactory.enabled,
+ }
+
+ val := ""
+ b, err := proto.Marshal(&ff)
+ if err == nil {
+ val = base64.URLEncoding.EncodeToString(b)
+ }
+
+ return metadata.Pairs(featureFlagsHeaderKey, val)
+}
+
// Open opens a table.
func (c *Client) Open(table string) *Table {
return &Table{
@@ -274,7 +295,7 @@ func (c *Client) Open(table string) *Table {
md: metadata.Join(metadata.Pairs(
resourcePrefixHeader, c.fullTableName(table),
requestParamsHeader, c.requestParamsHeaderValue(table),
- ), btopt.WithFeatureFlags()),
+ ), c.newFeatureFlags()),
}
}
@@ -286,7 +307,7 @@ func (c *Client) OpenTable(table string) TableAPI {
md: metadata.Join(metadata.Pairs(
resourcePrefixHeader, c.fullTableName(table),
requestParamsHeader, c.requestParamsHeaderValue(table),
- ), btopt.WithFeatureFlags()),
+ ), c.newFeatureFlags()),
}}
}
@@ -298,7 +319,7 @@ func (c *Client) OpenAuthorizedView(table, authorizedView string) TableAPI {
md: metadata.Join(metadata.Pairs(
resourcePrefixHeader, c.fullAuthorizedViewName(table, authorizedView),
requestParamsHeader, c.requestParamsHeaderValue(table),
- ), btopt.WithFeatureFlags()),
+ ), c.newFeatureFlags()),
authorizedView: authorizedView,
}}
}
@@ -395,8 +416,10 @@ func (t *Table) readRows(ctx context.Context, arg RowSet, f func(Row) bool, mt *
// Ignore error since header is only being used to record builtin metrics
// Failure to record metrics should not fail the operation
*headerMD, _ = stream.Header()
+ res := new(btpb.ReadRowsResponse)
for {
- res, err := stream.Recv()
+ proto.Reset(res)
+ err := stream.RecvMsg(res)
if err == io.EOF {
*trailerMD = stream.Trailer()
break
@@ -437,7 +460,8 @@ func (t *Table) readRows(ctx context.Context, arg RowSet, f func(Row) bool, mt *
// Cancel and drain stream.
cancel()
for {
- if _, err := stream.Recv(); err != nil {
+ proto.Reset(res)
+ if err := stream.RecvMsg(res); err != nil {
*trailerMD = stream.Trailer()
// The stream has ended. We don't return an error
// because the caller has intentionally interrupted the scan.
@@ -1562,40 +1586,48 @@ func recordOperationCompletion(mt *builtinMetricsTracer) {
// - then, calls gax.Invoke with 'callWrapper' as an argument
func gaxInvokeWithRecorder(ctx context.Context, mt *builtinMetricsTracer, method string,
f func(ctx context.Context, headerMD, trailerMD *metadata.MD, _ gax.CallSettings) error, opts ...gax.CallOption) error {
-
+ attemptHeaderMD := metadata.New(nil)
+ attempTrailerMD := metadata.New(nil)
mt.method = method
- callWrapper := func(ctx context.Context, callSettings gax.CallSettings) error {
- // Increment number of attempts
- mt.currOp.incrementAttemptCount()
- attemptHeaderMD := metadata.New(nil)
- attempTrailerMD := metadata.New(nil)
- mt.currOp.currAttempt = attemptTracer{}
+ var callWrapper func(context.Context, gax.CallSettings) error
+ if !mt.builtInEnabled {
+ callWrapper = func(ctx context.Context, callSettings gax.CallSettings) error {
+ // f makes calls to CBT service
+ return f(ctx, &attemptHeaderMD, &attempTrailerMD, callSettings)
+ }
+ } else {
+ callWrapper = func(ctx context.Context, callSettings gax.CallSettings) error {
+ // Increment number of attempts
+ mt.currOp.incrementAttemptCount()
+
+ mt.currOp.currAttempt = attemptTracer{}
- // record start time
- mt.currOp.currAttempt.setStartTime(time.Now())
+ // record start time
+ mt.currOp.currAttempt.setStartTime(time.Now())
- // f makes calls to CBT service
- err := f(ctx, &attemptHeaderMD, &attempTrailerMD, callSettings)
+ // f makes calls to CBT service
+ err := f(ctx, &attemptHeaderMD, &attempTrailerMD, callSettings)
- // Set attempt status
- statusCode, _ := convertToGrpcStatusErr(err)
- mt.currOp.currAttempt.setStatus(statusCode.String())
+ // Set attempt status
+ statusCode, _ := convertToGrpcStatusErr(err)
+ mt.currOp.currAttempt.setStatus(statusCode.String())
- // Get location attributes from metadata and set it in tracer
- // Ignore get location error since the metric can still be recorded with rest of the attributes
- clusterID, zoneID, _ := extractLocation(attemptHeaderMD, attempTrailerMD)
- mt.currOp.currAttempt.setClusterID(clusterID)
- mt.currOp.currAttempt.setZoneID(zoneID)
+ // Get location attributes from metadata and set it in tracer
+ // Ignore get location error since the metric can still be recorded with rest of the attributes
+ clusterID, zoneID, _ := extractLocation(attemptHeaderMD, attempTrailerMD)
+ mt.currOp.currAttempt.setClusterID(clusterID)
+ mt.currOp.currAttempt.setZoneID(zoneID)
- // Set server latency in tracer
- serverLatency, serverLatencyErr := extractServerLatency(attemptHeaderMD, attempTrailerMD)
- mt.currOp.currAttempt.setServerLatencyErr(serverLatencyErr)
- mt.currOp.currAttempt.setServerLatency(serverLatency)
+ // Set server latency in tracer
+ serverLatency, serverLatencyErr := extractServerLatency(attemptHeaderMD, attempTrailerMD)
+ mt.currOp.currAttempt.setServerLatencyErr(serverLatencyErr)
+ mt.currOp.currAttempt.setServerLatency(serverLatency)
- // Record attempt specific metrics
- recordAttemptCompletion(mt)
- return err
+ // Record attempt specific metrics
+ recordAttemptCompletion(mt)
+ return err
+ }
}
return gax.Invoke(ctx, callWrapper, opts...)
}
diff --git a/vendor/cloud.google.com/go/bigtable/bttest/inmem.go b/vendor/cloud.google.com/go/bigtable/bttest/inmem.go
index c53bb2b4dc008..ca0947a4f2f4b 100644
--- a/vendor/cloud.google.com/go/bigtable/bttest/inmem.go
+++ b/vendor/cloud.google.com/go/bigtable/bttest/inmem.go
@@ -333,10 +333,18 @@ func (s *server) ModifyColumnFamilies(ctx context.Context, req *btapb.ModifyColu
return true
})
} else if modify := mod.GetUpdate(); modify != nil {
- if _, ok := tbl.families[mod.Id]; !ok {
+ newcf := newColumnFamily(req.Name+"/columnFamilies/"+mod.Id, 0, modify)
+ cf, ok := tbl.families[mod.Id]
+ if !ok {
return nil, fmt.Errorf("no such family %q", mod.Id)
}
- newcf := newColumnFamily(req.Name+"/columnFamilies/"+mod.Id, 0, modify)
+ if cf.valueType != nil {
+ _, isOldAggregateType := cf.valueType.Kind.(*btapb.Type_AggregateType)
+ if isOldAggregateType && cf.valueType != newcf.valueType {
+ return nil, status.Errorf(codes.InvalidArgument, "Immutable fields 'value_type.aggregate_type' cannot be updated")
+ }
+ }
+
// assume that we ALWAYS want to replace by the new setting
// we may need partial update through
tbl.families[mod.Id] = newcf
diff --git a/vendor/cloud.google.com/go/bigtable/conformance_test.sh b/vendor/cloud.google.com/go/bigtable/conformance_test.sh
index 35a126e2f94d8..bf6f520a6b0c0 100644
--- a/vendor/cloud.google.com/go/bigtable/conformance_test.sh
+++ b/vendor/cloud.google.com/go/bigtable/conformance_test.sh
@@ -50,10 +50,10 @@ trap cleanup EXIT
# Run the conformance tests
cd $conformanceTestsHome
-# Tests in https://github.com/googleapis/cloud-bigtable-clients-test/tree/main/tests can only be run on go1.20.2
-go install golang.org/dl/go1.20.2@latest
-go1.20.2 download
-go1.20.2 test -v -proxy_addr=:$testProxyPort | tee -a $sponge_log
+# Tests in https://github.com/googleapis/cloud-bigtable-clients-test/tree/main/tests can only be run on go1.22.5
+go install golang.org/dl/go1.22.5@latest
+go1.22.5 download
+go1.22.5 test -v -proxy_addr=:$testProxyPort | tee -a $sponge_log
RETURN_CODE=$?
echo "exiting with ${RETURN_CODE}"
diff --git a/vendor/cloud.google.com/go/bigtable/internal/option/option.go b/vendor/cloud.google.com/go/bigtable/internal/option/option.go
index 5c15e6502fbbb..d6c879266e242 100644
--- a/vendor/cloud.google.com/go/bigtable/internal/option/option.go
+++ b/vendor/cloud.google.com/go/bigtable/internal/option/option.go
@@ -19,13 +19,9 @@ package option
import (
"context"
- "encoding/base64"
"fmt"
"os"
- btpb "cloud.google.com/go/bigtable/apiv2/bigtablepb"
- "google.golang.org/protobuf/proto"
-
"cloud.google.com/go/bigtable/internal"
"cloud.google.com/go/internal/version"
gax "github.com/googleapis/gax-go/v2"
@@ -66,25 +62,6 @@ func withGoogleClientInfo() metadata.MD {
return metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
}
-func makeFeatureFlags() string {
- ff := btpb.FeatureFlags{ReverseScans: true, LastScannedRowResponses: true}
- b, err := proto.Marshal(&ff)
- if err != nil {
- return ""
- }
-
- return base64.URLEncoding.EncodeToString(b)
-}
-
-var featureFlags = makeFeatureFlags()
-
-// WithFeatureFlags set the feature flags the client supports in the
-// `bigtable-features` header sent on each request. Intended for
-// use by Google-written clients.
-func WithFeatureFlags() metadata.MD {
- return metadata.Pairs("bigtable-features", featureFlags)
-}
-
// streamInterceptor intercepts the creation of ClientStream within the bigtable
// client to inject Google client information into the context metadata for
// streaming RPCs.
diff --git a/vendor/cloud.google.com/go/bigtable/internal/version.go b/vendor/cloud.google.com/go/bigtable/internal/version.go
index a08cb7cabc62d..ba70a43673b7c 100644
--- a/vendor/cloud.google.com/go/bigtable/internal/version.go
+++ b/vendor/cloud.google.com/go/bigtable/internal/version.go
@@ -15,4 +15,4 @@
package internal
// Version is the current tagged release of the library.
-const Version = "1.29.0"
+const Version = "1.33.0"
diff --git a/vendor/cloud.google.com/go/bigtable/metrics.go b/vendor/cloud.google.com/go/bigtable/metrics.go
index 9d9069d328607..c76ecfa1e0ddf 100644
--- a/vendor/cloud.google.com/go/bigtable/metrics.go
+++ b/vendor/cloud.google.com/go/bigtable/metrics.go
@@ -20,7 +20,6 @@ import (
"context"
"errors"
"fmt"
- "log"
"os"
"time"
@@ -66,11 +65,13 @@ const (
metricUnitCount = "1"
)
-// These are effectively const, but for testing purposes they are mutable
+// These are effectively constant, but for testing purposes they are mutable
var (
// duration between two metric exports
defaultSamplePeriod = 5 * time.Minute
+ metricsErrorPrefix = "bigtable-metrics: "
+
clientName = fmt.Sprintf("go-bigtable/%v", internal.Version)
bucketBounds = []float64{0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 8.0, 10.0, 13.0, 16.0, 20.0, 25.0, 30.0, 40.0,
@@ -120,7 +121,12 @@ var (
return "go-" + uuid.NewString() + "@" + hostname, nil
}
- exporterOpts = []option.ClientOption{}
+ // GCM exporter should use the same options as Bigtable client
+ // createExporterOptions takes Bigtable client options and returns exporter options
+ // Overwritten in tests
+ createExporterOptions = func(btOpts ...option.ClientOption) []option.ClientOption {
+ return btOpts
+ }
)
type metricInfo struct {
@@ -144,10 +150,10 @@ type builtinMetricsTracerFactory struct {
retryCount metric.Int64Counter
}
-func newBuiltinMetricsTracerFactory(ctx context.Context, project, instance, appProfile string, metricsProvider MetricsProvider) (*builtinMetricsTracerFactory, error) {
+func newBuiltinMetricsTracerFactory(ctx context.Context, project, instance, appProfile string, metricsProvider MetricsProvider, opts ...option.ClientOption) (*builtinMetricsTracerFactory, error) {
clientUID, err := generateClientUID()
if err != nil {
- log.Printf("built-in metrics: generateClientUID failed: %v. Using empty string in the %v metric atteribute", err, metricLabelKeyClientUID)
+ return nil, err
}
tracerFactory := &builtinMetricsTracerFactory{
@@ -165,7 +171,7 @@ func newBuiltinMetricsTracerFactory(ctx context.Context, project, instance, appP
var meterProvider *sdkmetric.MeterProvider
if metricsProvider == nil {
// Create default meter provider
- mpOptions, err := builtInMeterProviderOptions(project)
+ mpOptions, err := builtInMeterProviderOptions(project, opts...)
if err != nil {
return tracerFactory, err
}
@@ -190,8 +196,9 @@ func newBuiltinMetricsTracerFactory(ctx context.Context, project, instance, appP
return tracerFactory, err
}
-func builtInMeterProviderOptions(project string) ([]sdkmetric.Option, error) {
- defaultExporter, err := newMonitoringExporter(context.Background(), project, exporterOpts...)
+func builtInMeterProviderOptions(project string, opts ...option.ClientOption) ([]sdkmetric.Option, error) {
+ allOpts := createExporterOptions(opts...)
+ defaultExporter, err := newMonitoringExporter(context.Background(), project, allOpts...)
if err != nil {
return nil, err
}
diff --git a/vendor/cloud.google.com/go/bigtable/metrics_monitoring_exporter.go b/vendor/cloud.google.com/go/bigtable/metrics_monitoring_exporter.go
index 29bc957b3b497..98d63743d0527 100644
--- a/vendor/cloud.google.com/go/bigtable/metrics_monitoring_exporter.go
+++ b/vendor/cloud.google.com/go/bigtable/metrics_monitoring_exporter.go
@@ -89,28 +89,37 @@ func newMonitoringExporter(ctx context.Context, project string, opts ...option.C
}, nil
}
+func wrapMetricsError(err error) error {
+ if err == nil {
+ return err
+ }
+ return fmt.Errorf("%v%w", metricsErrorPrefix, err)
+}
+
// ForceFlush does nothing, the exporter holds no state.
-func (e *monitoringExporter) ForceFlush(ctx context.Context) error { return ctx.Err() }
+func (me *monitoringExporter) ForceFlush(ctx context.Context) error {
+ return wrapMetricsError(ctx.Err())
+}
// Shutdown shuts down the client connections.
-func (e *monitoringExporter) Shutdown(ctx context.Context) error {
+func (me *monitoringExporter) Shutdown(ctx context.Context) error {
err := errShutdown
- e.shutdownOnce.Do(func() {
- close(e.shutdown)
- err = errors.Join(ctx.Err(), e.client.Close())
+ me.shutdownOnce.Do(func() {
+ close(me.shutdown)
+ err = errors.Join(ctx.Err(), me.client.Close())
})
- return err
+ return wrapMetricsError(err)
}
// Export exports OpenTelemetry Metrics to Google Cloud Monitoring.
func (me *monitoringExporter) Export(ctx context.Context, rm *otelmetricdata.ResourceMetrics) error {
select {
case <-me.shutdown:
- return errShutdown
+ return wrapMetricsError(errShutdown)
default:
}
- return me.exportTimeSeries(ctx, rm)
+ return wrapMetricsError(me.exportTimeSeries(ctx, rm))
}
// Temporality returns the Temporality to use for an instrument kind.
diff --git a/vendor/cloud.google.com/go/bigtable/metric_util.go b/vendor/cloud.google.com/go/bigtable/metrics_util.go
similarity index 97%
rename from vendor/cloud.google.com/go/bigtable/metric_util.go
rename to vendor/cloud.google.com/go/bigtable/metrics_util.go
index a4631f8b5b742..8783f6ff4b214 100644
--- a/vendor/cloud.google.com/go/bigtable/metric_util.go
+++ b/vendor/cloud.google.com/go/bigtable/metrics_util.go
@@ -17,7 +17,7 @@ limitations under the License.
package bigtable
import (
- "fmt"
+ "errors"
"strconv"
"strings"
"time"
@@ -81,7 +81,7 @@ func extractLocation(headerMD metadata.MD, trailerMD metadata.MD) (string, strin
}
if len(locationMetadata) < 1 {
- return defaultCluster, defaultZone, fmt.Errorf("failed to get location metadata")
+ return defaultCluster, defaultZone, errors.New("failed to get location metadata")
}
// Unmarshal binary location metadata
diff --git a/vendor/cloud.google.com/go/bigtable/type.go b/vendor/cloud.google.com/go/bigtable/type.go
index 59f954f081f7b..88dd992196902 100644
--- a/vendor/cloud.google.com/go/bigtable/type.go
+++ b/vendor/cloud.google.com/go/bigtable/type.go
@@ -16,7 +16,11 @@ limitations under the License.
package bigtable
-import btapb "cloud.google.com/go/bigtable/admin/apiv2/adminpb"
+import (
+ btapb "cloud.google.com/go/bigtable/admin/apiv2/adminpb"
+ "google.golang.org/protobuf/encoding/protojson"
+ "google.golang.org/protobuf/proto"
+)
// Type wraps the protobuf representation of a type. See the protobuf definition
// for more details on types.
@@ -24,6 +28,41 @@ type Type interface {
proto() *btapb.Type
}
+var marshalOptions = protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
+var unmarshalOptions = protojson.UnmarshalOptions{AllowPartial: true}
+
+// MarshalJSON returns the string representation of the Type protobuf.
+func MarshalJSON(t Type) ([]byte, error) {
+ return marshalOptions.Marshal(t.proto())
+}
+
+// UnmarshalJSON returns a Type object from json bytes.
+func UnmarshalJSON(data []byte) (Type, error) {
+ result := &btapb.Type{}
+ if err := unmarshalOptions.Unmarshal(data, result); err != nil {
+ return nil, err
+ }
+ return ProtoToType(result), nil
+}
+
+// Equal compares Type objects.
+func Equal(a, b Type) bool {
+ if a == nil && b == nil {
+ return true
+ }
+ if a == nil || b == nil {
+ return false
+ }
+ return proto.Equal(a.proto(), b.proto())
+}
+
+// TypeUnspecified represents the absence of a type.
+type TypeUnspecified struct{}
+
+func (n TypeUnspecified) proto() *btapb.Type {
+ return &btapb.Type{}
+}
+
type unknown[T interface{}] struct {
wrapped *T
}
@@ -67,9 +106,9 @@ type StringEncoding interface {
proto() *btapb.Type_String_Encoding
}
-// StringUtf8Encoding represents a string with UTF-8 encoding.
-type StringUtf8Encoding struct {
-}
+// StringUtf8Encoding represents an UTF-8 raw encoding for a string.
+// DEPRECATED: Please use StringUtf8BytesEncoding.
+type StringUtf8Encoding struct{}
func (encoding StringUtf8Encoding) proto() *btapb.Type_String_Encoding {
return &btapb.Type_String_Encoding{
@@ -77,6 +116,15 @@ func (encoding StringUtf8Encoding) proto() *btapb.Type_String_Encoding {
}
}
+// StringUtf8BytesEncoding represents an UTF-8 bytes encoding for a string.
+type StringUtf8BytesEncoding struct{}
+
+func (encoding StringUtf8BytesEncoding) proto() *btapb.Type_String_Encoding {
+ return &btapb.Type_String_Encoding{
+ Encoding: &btapb.Type_String_Encoding_Utf8Bytes_{},
+ }
+}
+
// StringType represents a string
type StringType struct {
Encoding StringEncoding
@@ -199,12 +247,16 @@ func ProtoToType(pb *btapb.Type) Type {
if pb == nil {
return unknown[btapb.Type]{wrapped: nil}
}
-
+ if pb.Kind == nil {
+ return TypeUnspecified{}
+ }
switch t := pb.Kind.(type) {
case *btapb.Type_Int64Type:
return int64ProtoToType(t.Int64Type)
case *btapb.Type_BytesType:
return bytesProtoToType(t.BytesType)
+ case *btapb.Type_StringType:
+ return stringProtoToType(t.StringType)
case *btapb.Type_AggregateType:
return aggregateProtoToType(t.AggregateType)
default:
@@ -229,6 +281,23 @@ func bytesProtoToType(b *btapb.Type_Bytes) BytesType {
return BytesType{Encoding: bytesEncodingProtoToType(b.Encoding)}
}
+func stringEncodingProtoToType(se *btapb.Type_String_Encoding) StringEncoding {
+ if se == nil {
+ return unknown[btapb.Type_String_Encoding]{wrapped: se}
+ }
+
+ switch se.Encoding.(type) {
+ case *btapb.Type_String_Encoding_Utf8Raw_:
+ return StringUtf8Encoding{}
+ default:
+ return unknown[btapb.Type_String_Encoding]{wrapped: se}
+ }
+}
+
+func stringProtoToType(s *btapb.Type_String) Type {
+ return StringType{Encoding: stringEncodingProtoToType(s.Encoding)}
+}
+
func int64EncodingProtoToEncoding(ie *btapb.Type_Int64_Encoding) Int64Encoding {
if ie == nil {
return unknown[btapb.Type_Int64_Encoding]{wrapped: ie}
@@ -246,7 +315,7 @@ func int64ProtoToType(i *btapb.Type_Int64) Type {
return Int64Type{Encoding: int64EncodingProtoToEncoding(i.Encoding)}
}
-func aggregateProtoToType(agg *btapb.Type_Aggregate) Type {
+func aggregateProtoToType(agg *btapb.Type_Aggregate) AggregateType {
if agg == nil {
return AggregateType{Input: nil, Aggregator: unknownAggregator{wrapped: agg}}
}
diff --git a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md
index 9594e1e2793c6..da7db19b1c6d1 100644
--- a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md
+++ b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md
@@ -1,5 +1,19 @@
# Changes
+## [0.5.2](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.5.1...compute/metadata/v0.5.2) (2024-09-20)
+
+
+### Bug Fixes
+
+* **compute/metadata:** Close Response Body for failed request ([#10891](https://github.com/googleapis/google-cloud-go/issues/10891)) ([e91d45e](https://github.com/googleapis/google-cloud-go/commit/e91d45e4757a9e354114509ba9800085d9e0ff1f))
+
+## [0.5.1](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.5.0...compute/metadata/v0.5.1) (2024-09-12)
+
+
+### Bug Fixes
+
+* **compute/metadata:** Check error chain for retryable error ([#10840](https://github.com/googleapis/google-cloud-go/issues/10840)) ([2bdedef](https://github.com/googleapis/google-cloud-go/commit/2bdedeff621b223d63cebc4355fcf83bc68412cd))
+
## [0.5.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.4.0...compute/metadata/v0.5.0) (2024-07-10)
diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go
index 345080b729790..c160b4786bbdf 100644
--- a/vendor/cloud.google.com/go/compute/metadata/metadata.go
+++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go
@@ -456,6 +456,9 @@ func (c *Client) getETag(ctx context.Context, suffix string) (value, etag string
code = res.StatusCode
}
if delay, shouldRetry := retryer.Retry(code, reqErr); shouldRetry {
+ if res != nil && res.Body != nil {
+ res.Body.Close()
+ }
if err := sleep(ctx, delay); err != nil {
return "", "", err
}
diff --git a/vendor/cloud.google.com/go/compute/metadata/retry_linux.go b/vendor/cloud.google.com/go/compute/metadata/retry_linux.go
index bb412f8917e31..2e53f01230090 100644
--- a/vendor/cloud.google.com/go/compute/metadata/retry_linux.go
+++ b/vendor/cloud.google.com/go/compute/metadata/retry_linux.go
@@ -17,10 +17,15 @@
package metadata
-import "syscall"
+import (
+ "errors"
+ "syscall"
+)
func init() {
// Initialize syscallRetryable to return true on transient socket-level
// errors. These errors are specific to Linux.
- syscallRetryable = func(err error) bool { return err == syscall.ECONNRESET || err == syscall.ECONNREFUSED }
+ syscallRetryable = func(err error) bool {
+ return errors.Is(err, syscall.ECONNRESET) || errors.Is(err, syscall.ECONNREFUSED)
+ }
}
diff --git a/vendor/cloud.google.com/go/doc.go b/vendor/cloud.google.com/go/doc.go
index 133ff68553f70..8644f614c864c 100644
--- a/vendor/cloud.google.com/go/doc.go
+++ b/vendor/cloud.google.com/go/doc.go
@@ -79,12 +79,15 @@ are also provided in all auto-generated libraries: for example,
cloud.google.com/go/secretmanager/apiv1 provides DefaultAuthScopes. Example:
ctx := context.Background()
- // https://pkg.go.dev/golang.org/x/oauth2/google
- creds, err := google.CredentialsFromJSON(ctx, []byte("JSON creds"), secretmanager.DefaultAuthScopes()...)
+ // https://pkg.go.dev/cloud.google.com/go/auth/credentials
+ creds, err := credentials.DetectDefault(&credentials.DetectOptions{
+ Scopes: secretmanager.DefaultAuthScopes(),
+ CredentialsJSON: []byte("JSON creds")
+ }), secretmanager.DefaultAuthScopes()...)
if err != nil {
// TODO: handle error.
}
- client, err := secretmanager.NewClient(ctx, option.WithCredentials(creds))
+ client, err := secretmanager.NewClient(ctx, option.WithAuthCredentials(creds))
if err != nil {
// TODO: handle error.
}
diff --git a/vendor/cloud.google.com/go/iam/CHANGES.md b/vendor/cloud.google.com/go/iam/CHANGES.md
index 89f757b1161c3..498a15a5fcd52 100644
--- a/vendor/cloud.google.com/go/iam/CHANGES.md
+++ b/vendor/cloud.google.com/go/iam/CHANGES.md
@@ -1,6 +1,13 @@
# Changes
+## [1.2.1](https://github.com/googleapis/google-cloud-go/compare/iam/v1.2.0...iam/v1.2.1) (2024-09-12)
+
+
+### Bug Fixes
+
+* **iam:** Bump dependencies ([2ddeb15](https://github.com/googleapis/google-cloud-go/commit/2ddeb1544a53188a7592046b98913982f1b0cf04))
+
## [1.2.0](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.13...iam/v1.2.0) (2024-08-20)
diff --git a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json
index 54acae7cdc2ed..6b58b6a6f3b55 100644
--- a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json
+++ b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json
@@ -179,6 +179,16 @@
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
+ "cloud.google.com/go/apihub/apiv1": {
+ "api_shortname": "apihub",
+ "distribution_name": "cloud.google.com/go/apihub/apiv1",
+ "description": "API hub API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apihub/latest/apiv1",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
"cloud.google.com/go/apikeys/apiv2": {
"api_shortname": "apikeys",
"distribution_name": "cloud.google.com/go/apikeys/apiv2",
@@ -559,6 +569,16 @@
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
+ "cloud.google.com/go/bigquery/storage/apiv1alpha": {
+ "api_shortname": "bigquerystorage",
+ "distribution_name": "cloud.google.com/go/bigquery/storage/apiv1alpha",
+ "description": "BigQuery Storage API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/storage/apiv1alpha",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
"cloud.google.com/go/bigquery/storage/apiv1beta1": {
"api_shortname": "bigquerystorage",
"distribution_name": "cloud.google.com/go/bigquery/storage/apiv1beta1",
@@ -1009,6 +1029,16 @@
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
+ "cloud.google.com/go/datastore/apiv1": {
+ "api_shortname": "datastore",
+ "distribution_name": "cloud.google.com/go/datastore/apiv1",
+ "description": "Cloud Datastore API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datastore/latest/apiv1",
+ "release_level": "stable",
+ "library_type": "GAPIC_AUTO"
+ },
"cloud.google.com/go/datastream/apiv1": {
"api_shortname": "datastream",
"distribution_name": "cloud.google.com/go/datastream/apiv1",
@@ -1329,6 +1359,16 @@
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
+ "cloud.google.com/go/gkeconnect/gateway/apiv1": {
+ "api_shortname": "connectgateway",
+ "distribution_name": "cloud.google.com/go/gkeconnect/gateway/apiv1",
+ "description": "Connect Gateway API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/gkeconnect/latest/gateway/apiv1",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
"cloud.google.com/go/gkeconnect/gateway/apiv1beta1": {
"api_shortname": "connectgateway",
"distribution_name": "cloud.google.com/go/gkeconnect/gateway/apiv1beta1",
@@ -1352,7 +1392,7 @@
"cloud.google.com/go/gkemulticloud/apiv1": {
"api_shortname": "gkemulticloud",
"distribution_name": "cloud.google.com/go/gkemulticloud/apiv1",
- "description": "Anthos Multi-Cloud API",
+ "description": "GKE Multi-Cloud API",
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/gkemulticloud/latest/apiv1",
@@ -1552,7 +1592,7 @@
"cloud.google.com/go/managedkafka/apiv1": {
"api_shortname": "managedkafka",
"distribution_name": "cloud.google.com/go/managedkafka/apiv1",
- "description": "Apache Kafka for BigQuery API",
+ "description": "Managed Service for Apache Kafka API",
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/managedkafka/latest/apiv1",
@@ -1569,6 +1609,16 @@
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
+ "cloud.google.com/go/maps/areainsights/apiv1": {
+ "api_shortname": "areainsights",
+ "distribution_name": "cloud.google.com/go/maps/areainsights/apiv1",
+ "description": "Places Insights API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/areainsights/apiv1",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
"cloud.google.com/go/maps/fleetengine/apiv1": {
"api_shortname": "fleetengine",
"distribution_name": "cloud.google.com/go/maps/fleetengine/apiv1",
diff --git a/vendor/cloud.google.com/go/longrunning/CHANGES.md b/vendor/cloud.google.com/go/longrunning/CHANGES.md
index 3d239e249dbc1..d120456cd3b34 100644
--- a/vendor/cloud.google.com/go/longrunning/CHANGES.md
+++ b/vendor/cloud.google.com/go/longrunning/CHANGES.md
@@ -1,5 +1,12 @@
# Changes
+## [0.6.1](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.6.0...longrunning/v0.6.1) (2024-09-12)
+
+
+### Bug Fixes
+
+* **longrunning:** Bump dependencies ([2ddeb15](https://github.com/googleapis/google-cloud-go/commit/2ddeb1544a53188a7592046b98913982f1b0cf04))
+
## [0.6.0](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.5.12...longrunning/v0.6.0) (2024-08-20)
diff --git a/vendor/cloud.google.com/go/monitoring/internal/version.go b/vendor/cloud.google.com/go/monitoring/internal/version.go
index b1672963f6c50..670f0797ee667 100644
--- a/vendor/cloud.google.com/go/monitoring/internal/version.go
+++ b/vendor/cloud.google.com/go/monitoring/internal/version.go
@@ -15,4 +15,4 @@
package internal
// Version is the current tagged release of the library.
-const Version = "1.21.0"
+const Version = "1.21.1"
diff --git a/vendor/cloud.google.com/go/pubsub/CHANGES.md b/vendor/cloud.google.com/go/pubsub/CHANGES.md
index 64aeb66a13021..5702b6c435a6c 100644
--- a/vendor/cloud.google.com/go/pubsub/CHANGES.md
+++ b/vendor/cloud.google.com/go/pubsub/CHANGES.md
@@ -1,5 +1,42 @@
# Changes
+## [1.45.0](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.44.0...pubsub/v1.45.0) (2024-10-22)
+
+
+### Features
+
+* **pubsub:** Add IngestionFailureEvent to the external proto ([f0b05e2](https://github.com/googleapis/google-cloud-go/commit/f0b05e260435d5e8889b9a0ca0ab215fcde169ab))
+* **pubsub:** Add support for ingestion platform logging settings ([#10969](https://github.com/googleapis/google-cloud-go/issues/10969)) ([c60241f](https://github.com/googleapis/google-cloud-go/commit/c60241f46db2b021d799f621851a352f2baec96e))
+
+## [1.44.0](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.43.0...pubsub/v1.44.0) (2024-10-08)
+
+
+### Features
+
+* **pubsub:** Add ingestion Cloud Storage fields and Platform Logging fields to Topic ([7250d71](https://github.com/googleapis/google-cloud-go/commit/7250d714a638dcd5df3fbe0e91c5f1250c3f80f9))
+* **pubsub:** Add support for cloud storage ingestion topics ([#10959](https://github.com/googleapis/google-cloud-go/issues/10959)) ([1a11675](https://github.com/googleapis/google-cloud-go/commit/1a116759ce0d25fdcb5776bf73c52408ae1ec985))
+* **pubsub:** Return listing information for subscriptions created via Analytics Hub ([fdb4ea9](https://github.com/googleapis/google-cloud-go/commit/fdb4ea99189657880e5f0e0dce16bef1c3aa0d2f))
+
+
+### Documentation
+
+* **pubsub:** Update documentation for 31 day subscription message retention ([#10845](https://github.com/googleapis/google-cloud-go/issues/10845)) ([9b4b2fa](https://github.com/googleapis/google-cloud-go/commit/9b4b2fa87864906aeae3a8fda460466f951bc6c9))
+
+## [1.43.0](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.42.0...pubsub/v1.43.0) (2024-09-09)
+
+
+### Features
+
+* **pubsub:** Add support for Go 1.23 iterators ([84461c0](https://github.com/googleapis/google-cloud-go/commit/84461c0ba464ec2f951987ba60030e37c8a8fc18))
+* **pubsub:** Allow trace extraction from protobuf message ([#10827](https://github.com/googleapis/google-cloud-go/issues/10827)) ([caa826c](https://github.com/googleapis/google-cloud-go/commit/caa826cea826473ebf4c806b57b0c3b0a2f0f365))
+
+
+### Bug Fixes
+
+* **pubsub:** Add attributes before startSpan ([#10800](https://github.com/googleapis/google-cloud-go/issues/10800)) ([48addbf](https://github.com/googleapis/google-cloud-go/commit/48addbff725ee2bb226ce0ab926415c27fd4ffad))
+* **pubsub:** Bump dependencies ([2ddeb15](https://github.com/googleapis/google-cloud-go/commit/2ddeb1544a53188a7592046b98913982f1b0cf04))
+* **pubsub:** Close grpc streams on retry ([#10624](https://github.com/googleapis/google-cloud-go/issues/10624)) ([79a0e11](https://github.com/googleapis/google-cloud-go/commit/79a0e118c88190cbe1b56250a75b67bd98b0d7f2))
+
## [1.42.0](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.41.0...pubsub/v1.42.0) (2024-08-19)
diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/auxiliary_go123.go b/vendor/cloud.google.com/go/pubsub/apiv1/auxiliary_go123.go
new file mode 100644
index 0000000000000..c7a04ffb92e96
--- /dev/null
+++ b/vendor/cloud.google.com/go/pubsub/apiv1/auxiliary_go123.go
@@ -0,0 +1,56 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
+
+//go:build go1.23
+
+package pubsub
+
+import (
+ "iter"
+
+ pubsubpb "cloud.google.com/go/pubsub/apiv1/pubsubpb"
+ "github.com/googleapis/gax-go/v2/iterator"
+)
+
+// All returns an iterator. If an error is returned by the iterator, the
+// iterator will stop after that iteration.
+func (it *SchemaIterator) All() iter.Seq2[*pubsubpb.Schema, error] {
+ return iterator.RangeAdapter(it.Next)
+}
+
+// All returns an iterator. If an error is returned by the iterator, the
+// iterator will stop after that iteration.
+func (it *SnapshotIterator) All() iter.Seq2[*pubsubpb.Snapshot, error] {
+ return iterator.RangeAdapter(it.Next)
+}
+
+// All returns an iterator. If an error is returned by the iterator, the
+// iterator will stop after that iteration.
+func (it *StringIterator) All() iter.Seq2[string, error] {
+ return iterator.RangeAdapter(it.Next)
+}
+
+// All returns an iterator. If an error is returned by the iterator, the
+// iterator will stop after that iteration.
+func (it *SubscriptionIterator) All() iter.Seq2[*pubsubpb.Subscription, error] {
+ return iterator.RangeAdapter(it.Next)
+}
+
+// All returns an iterator. If an error is returned by the iterator, the
+// iterator will stop after that iteration.
+func (it *TopicIterator) All() iter.Seq2[*pubsubpb.Topic, error] {
+ return iterator.RangeAdapter(it.Next)
+}
diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client.go b/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client.go
index 03ac865cf48f3..cae0b96f505c7 100644
--- a/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client.go
+++ b/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client.go
@@ -68,6 +68,7 @@ func defaultPublisherGRPCClientOptions() []option.ClientOption {
internaloption.WithDefaultAudience("https://pubsub.googleapis.com/"),
internaloption.WithDefaultScopes(DefaultAuthScopes()...),
internaloption.EnableJwtWithScope(),
+ internaloption.EnableNewAuthLibrary(),
option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
grpc.MaxCallRecvMsgSize(math.MaxInt32))),
}
@@ -587,6 +588,7 @@ func defaultPublisherRESTClientOptions() []option.ClientOption {
internaloption.WithDefaultUniverseDomain("googleapis.com"),
internaloption.WithDefaultAudience("https://pubsub.googleapis.com/"),
internaloption.WithDefaultScopes(DefaultAuthScopes()...),
+ internaloption.EnableNewAuthLibrary(),
}
}
diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/pubsubpb/pubsub.pb.go b/vendor/cloud.google.com/go/pubsub/apiv1/pubsubpb/pubsub.pb.go
index b266b23b471cf..f18defab41d3d 100644
--- a/vendor/cloud.google.com/go/pubsub/apiv1/pubsubpb/pubsub.pb.go
+++ b/vendor/cloud.google.com/go/pubsub/apiv1/pubsubpb/pubsub.pb.go
@@ -119,6 +119,145 @@ func (IngestionDataSourceSettings_AwsKinesis_State) EnumDescriptor() ([]byte, []
return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{2, 0, 0}
}
+// Possible states for ingestion from Cloud Storage.
+type IngestionDataSourceSettings_CloudStorage_State int32
+
+const (
+ // Default value. This value is unused.
+ IngestionDataSourceSettings_CloudStorage_STATE_UNSPECIFIED IngestionDataSourceSettings_CloudStorage_State = 0
+ // Ingestion is active.
+ IngestionDataSourceSettings_CloudStorage_ACTIVE IngestionDataSourceSettings_CloudStorage_State = 1
+ // Permission denied encountered while calling the Cloud Storage API. This
+ // can happen if the Pub/Sub SA has not been granted the
+ // [appropriate
+ // permissions](https://cloud.google.com/storage/docs/access-control/iam-permissions):
+ // - storage.objects.list: to list the objects in a bucket.
+ // - storage.objects.get: to read the objects in a bucket.
+ // - storage.buckets.get: to verify the bucket exists.
+ IngestionDataSourceSettings_CloudStorage_CLOUD_STORAGE_PERMISSION_DENIED IngestionDataSourceSettings_CloudStorage_State = 2
+ // Permission denied encountered while publishing to the topic. This can
+ // happen if the Pub/Sub SA has not been granted the [appropriate publish
+ // permissions](https://cloud.google.com/pubsub/docs/access-control#pubsub.publisher)
+ IngestionDataSourceSettings_CloudStorage_PUBLISH_PERMISSION_DENIED IngestionDataSourceSettings_CloudStorage_State = 3
+ // The provided Cloud Storage bucket doesn't exist.
+ IngestionDataSourceSettings_CloudStorage_BUCKET_NOT_FOUND IngestionDataSourceSettings_CloudStorage_State = 4
+ // The Cloud Storage bucket has too many objects, ingestion will be
+ // paused.
+ IngestionDataSourceSettings_CloudStorage_TOO_MANY_OBJECTS IngestionDataSourceSettings_CloudStorage_State = 5
+)
+
+// Enum value maps for IngestionDataSourceSettings_CloudStorage_State.
+var (
+ IngestionDataSourceSettings_CloudStorage_State_name = map[int32]string{
+ 0: "STATE_UNSPECIFIED",
+ 1: "ACTIVE",
+ 2: "CLOUD_STORAGE_PERMISSION_DENIED",
+ 3: "PUBLISH_PERMISSION_DENIED",
+ 4: "BUCKET_NOT_FOUND",
+ 5: "TOO_MANY_OBJECTS",
+ }
+ IngestionDataSourceSettings_CloudStorage_State_value = map[string]int32{
+ "STATE_UNSPECIFIED": 0,
+ "ACTIVE": 1,
+ "CLOUD_STORAGE_PERMISSION_DENIED": 2,
+ "PUBLISH_PERMISSION_DENIED": 3,
+ "BUCKET_NOT_FOUND": 4,
+ "TOO_MANY_OBJECTS": 5,
+ }
+)
+
+func (x IngestionDataSourceSettings_CloudStorage_State) Enum() *IngestionDataSourceSettings_CloudStorage_State {
+ p := new(IngestionDataSourceSettings_CloudStorage_State)
+ *p = x
+ return p
+}
+
+func (x IngestionDataSourceSettings_CloudStorage_State) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (IngestionDataSourceSettings_CloudStorage_State) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_pubsub_v1_pubsub_proto_enumTypes[1].Descriptor()
+}
+
+func (IngestionDataSourceSettings_CloudStorage_State) Type() protoreflect.EnumType {
+ return &file_google_pubsub_v1_pubsub_proto_enumTypes[1]
+}
+
+func (x IngestionDataSourceSettings_CloudStorage_State) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use IngestionDataSourceSettings_CloudStorage_State.Descriptor instead.
+func (IngestionDataSourceSettings_CloudStorage_State) EnumDescriptor() ([]byte, []int) {
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{2, 1, 0}
+}
+
+// Severity levels of Platform Logs.
+type PlatformLogsSettings_Severity int32
+
+const (
+ // Default value. Logs level is unspecified. Logs will be disabled.
+ PlatformLogsSettings_SEVERITY_UNSPECIFIED PlatformLogsSettings_Severity = 0
+ // Logs will be disabled.
+ PlatformLogsSettings_DISABLED PlatformLogsSettings_Severity = 1
+ // Debug logs and higher-severity logs will be written.
+ PlatformLogsSettings_DEBUG PlatformLogsSettings_Severity = 2
+ // Info logs and higher-severity logs will be written.
+ PlatformLogsSettings_INFO PlatformLogsSettings_Severity = 3
+ // Warning logs and higher-severity logs will be written.
+ PlatformLogsSettings_WARNING PlatformLogsSettings_Severity = 4
+ // Only error logs will be written.
+ PlatformLogsSettings_ERROR PlatformLogsSettings_Severity = 5
+)
+
+// Enum value maps for PlatformLogsSettings_Severity.
+var (
+ PlatformLogsSettings_Severity_name = map[int32]string{
+ 0: "SEVERITY_UNSPECIFIED",
+ 1: "DISABLED",
+ 2: "DEBUG",
+ 3: "INFO",
+ 4: "WARNING",
+ 5: "ERROR",
+ }
+ PlatformLogsSettings_Severity_value = map[string]int32{
+ "SEVERITY_UNSPECIFIED": 0,
+ "DISABLED": 1,
+ "DEBUG": 2,
+ "INFO": 3,
+ "WARNING": 4,
+ "ERROR": 5,
+ }
+)
+
+func (x PlatformLogsSettings_Severity) Enum() *PlatformLogsSettings_Severity {
+ p := new(PlatformLogsSettings_Severity)
+ *p = x
+ return p
+}
+
+func (x PlatformLogsSettings_Severity) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (PlatformLogsSettings_Severity) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_pubsub_v1_pubsub_proto_enumTypes[2].Descriptor()
+}
+
+func (PlatformLogsSettings_Severity) Type() protoreflect.EnumType {
+ return &file_google_pubsub_v1_pubsub_proto_enumTypes[2]
+}
+
+func (x PlatformLogsSettings_Severity) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use PlatformLogsSettings_Severity.Descriptor instead.
+func (PlatformLogsSettings_Severity) EnumDescriptor() ([]byte, []int) {
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{3, 0}
+}
+
// The state of the topic.
type Topic_State int32
@@ -158,11 +297,11 @@ func (x Topic_State) String() string {
}
func (Topic_State) Descriptor() protoreflect.EnumDescriptor {
- return file_google_pubsub_v1_pubsub_proto_enumTypes[1].Descriptor()
+ return file_google_pubsub_v1_pubsub_proto_enumTypes[3].Descriptor()
}
func (Topic_State) Type() protoreflect.EnumType {
- return &file_google_pubsub_v1_pubsub_proto_enumTypes[1]
+ return &file_google_pubsub_v1_pubsub_proto_enumTypes[3]
}
func (x Topic_State) Number() protoreflect.EnumNumber {
@@ -171,7 +310,7 @@ func (x Topic_State) Number() protoreflect.EnumNumber {
// Deprecated: Use Topic_State.Descriptor instead.
func (Topic_State) EnumDescriptor() ([]byte, []int) {
- return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{3, 0}
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{5, 0}
}
// Possible states for a subscription.
@@ -213,11 +352,11 @@ func (x Subscription_State) String() string {
}
func (Subscription_State) Descriptor() protoreflect.EnumDescriptor {
- return file_google_pubsub_v1_pubsub_proto_enumTypes[2].Descriptor()
+ return file_google_pubsub_v1_pubsub_proto_enumTypes[4].Descriptor()
}
func (Subscription_State) Type() protoreflect.EnumType {
- return &file_google_pubsub_v1_pubsub_proto_enumTypes[2]
+ return &file_google_pubsub_v1_pubsub_proto_enumTypes[4]
}
func (x Subscription_State) Number() protoreflect.EnumNumber {
@@ -226,7 +365,7 @@ func (x Subscription_State) Number() protoreflect.EnumNumber {
// Deprecated: Use Subscription_State.Descriptor instead.
func (Subscription_State) EnumDescriptor() ([]byte, []int) {
- return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{18, 0}
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{20, 0}
}
// Possible states for a BigQuery subscription.
@@ -284,11 +423,11 @@ func (x BigQueryConfig_State) String() string {
}
func (BigQueryConfig_State) Descriptor() protoreflect.EnumDescriptor {
- return file_google_pubsub_v1_pubsub_proto_enumTypes[3].Descriptor()
+ return file_google_pubsub_v1_pubsub_proto_enumTypes[5].Descriptor()
}
func (BigQueryConfig_State) Type() protoreflect.EnumType {
- return &file_google_pubsub_v1_pubsub_proto_enumTypes[3]
+ return &file_google_pubsub_v1_pubsub_proto_enumTypes[5]
}
func (x BigQueryConfig_State) Number() protoreflect.EnumNumber {
@@ -297,7 +436,7 @@ func (x BigQueryConfig_State) Number() protoreflect.EnumNumber {
// Deprecated: Use BigQueryConfig_State.Descriptor instead.
func (BigQueryConfig_State) EnumDescriptor() ([]byte, []int) {
- return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{23, 0}
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{25, 0}
}
// Possible states for a Cloud Storage subscription.
@@ -352,11 +491,11 @@ func (x CloudStorageConfig_State) String() string {
}
func (CloudStorageConfig_State) Descriptor() protoreflect.EnumDescriptor {
- return file_google_pubsub_v1_pubsub_proto_enumTypes[4].Descriptor()
+ return file_google_pubsub_v1_pubsub_proto_enumTypes[6].Descriptor()
}
func (CloudStorageConfig_State) Type() protoreflect.EnumType {
- return &file_google_pubsub_v1_pubsub_proto_enumTypes[4]
+ return &file_google_pubsub_v1_pubsub_proto_enumTypes[6]
}
func (x CloudStorageConfig_State) Number() protoreflect.EnumNumber {
@@ -365,7 +504,7 @@ func (x CloudStorageConfig_State) Number() protoreflect.EnumNumber {
// Deprecated: Use CloudStorageConfig_State.Descriptor instead.
func (CloudStorageConfig_State) EnumDescriptor() ([]byte, []int) {
- return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{24, 0}
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{26, 0}
}
// A policy constraining the storage of messages published to the topic.
@@ -529,7 +668,11 @@ type IngestionDataSourceSettings struct {
// Types that are assignable to Source:
//
// *IngestionDataSourceSettings_AwsKinesis_
+ // *IngestionDataSourceSettings_CloudStorage_
Source isIngestionDataSourceSettings_Source `protobuf_oneof:"source"`
+ // Optional. Platform Logs settings. If unset, no Platform Logs will be
+ // generated.
+ PlatformLogsSettings *PlatformLogsSettings `protobuf:"bytes,4,opt,name=platform_logs_settings,json=platformLogsSettings,proto3" json:"platform_logs_settings,omitempty"`
}
func (x *IngestionDataSourceSettings) Reset() {
@@ -578,6 +721,20 @@ func (x *IngestionDataSourceSettings) GetAwsKinesis() *IngestionDataSourceSettin
return nil
}
+func (x *IngestionDataSourceSettings) GetCloudStorage() *IngestionDataSourceSettings_CloudStorage {
+ if x, ok := x.GetSource().(*IngestionDataSourceSettings_CloudStorage_); ok {
+ return x.CloudStorage
+ }
+ return nil
+}
+
+func (x *IngestionDataSourceSettings) GetPlatformLogsSettings() *PlatformLogsSettings {
+ if x != nil {
+ return x.PlatformLogsSettings
+ }
+ return nil
+}
+
type isIngestionDataSourceSettings_Source interface {
isIngestionDataSourceSettings_Source()
}
@@ -587,8 +744,153 @@ type IngestionDataSourceSettings_AwsKinesis_ struct {
AwsKinesis *IngestionDataSourceSettings_AwsKinesis `protobuf:"bytes,1,opt,name=aws_kinesis,json=awsKinesis,proto3,oneof"`
}
+type IngestionDataSourceSettings_CloudStorage_ struct {
+ // Optional. Cloud Storage.
+ CloudStorage *IngestionDataSourceSettings_CloudStorage `protobuf:"bytes,2,opt,name=cloud_storage,json=cloudStorage,proto3,oneof"`
+}
+
func (*IngestionDataSourceSettings_AwsKinesis_) isIngestionDataSourceSettings_Source() {}
+func (*IngestionDataSourceSettings_CloudStorage_) isIngestionDataSourceSettings_Source() {}
+
+// Settings for Platform Logs produced by Pub/Sub.
+type PlatformLogsSettings struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Optional. The minimum severity level of Platform Logs that will be written.
+ Severity PlatformLogsSettings_Severity `protobuf:"varint,1,opt,name=severity,proto3,enum=google.pubsub.v1.PlatformLogsSettings_Severity" json:"severity,omitempty"`
+}
+
+func (x *PlatformLogsSettings) Reset() {
+ *x = PlatformLogsSettings{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *PlatformLogsSettings) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PlatformLogsSettings) ProtoMessage() {}
+
+func (x *PlatformLogsSettings) ProtoReflect() protoreflect.Message {
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PlatformLogsSettings.ProtoReflect.Descriptor instead.
+func (*PlatformLogsSettings) Descriptor() ([]byte, []int) {
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *PlatformLogsSettings) GetSeverity() PlatformLogsSettings_Severity {
+ if x != nil {
+ return x.Severity
+ }
+ return PlatformLogsSettings_SEVERITY_UNSPECIFIED
+}
+
+// Payload of the Platform Log entry sent when a failure is encountered while
+// ingesting.
+type IngestionFailureEvent struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. Name of the import topic. Format is:
+ // projects/{project_name}/topics/{topic_name}.
+ Topic string `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"`
+ // Required. Error details explaining why ingestion to Pub/Sub has failed.
+ ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"`
+ // Types that are assignable to Failure:
+ //
+ // *IngestionFailureEvent_CloudStorageFailure_
+ Failure isIngestionFailureEvent_Failure `protobuf_oneof:"failure"`
+}
+
+func (x *IngestionFailureEvent) Reset() {
+ *x = IngestionFailureEvent{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *IngestionFailureEvent) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*IngestionFailureEvent) ProtoMessage() {}
+
+func (x *IngestionFailureEvent) ProtoReflect() protoreflect.Message {
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use IngestionFailureEvent.ProtoReflect.Descriptor instead.
+func (*IngestionFailureEvent) Descriptor() ([]byte, []int) {
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *IngestionFailureEvent) GetTopic() string {
+ if x != nil {
+ return x.Topic
+ }
+ return ""
+}
+
+func (x *IngestionFailureEvent) GetErrorMessage() string {
+ if x != nil {
+ return x.ErrorMessage
+ }
+ return ""
+}
+
+func (m *IngestionFailureEvent) GetFailure() isIngestionFailureEvent_Failure {
+ if m != nil {
+ return m.Failure
+ }
+ return nil
+}
+
+func (x *IngestionFailureEvent) GetCloudStorageFailure() *IngestionFailureEvent_CloudStorageFailure {
+ if x, ok := x.GetFailure().(*IngestionFailureEvent_CloudStorageFailure_); ok {
+ return x.CloudStorageFailure
+ }
+ return nil
+}
+
+type isIngestionFailureEvent_Failure interface {
+ isIngestionFailureEvent_Failure()
+}
+
+type IngestionFailureEvent_CloudStorageFailure_ struct {
+ // Optional. Failure when ingesting from Cloud Storage.
+ CloudStorageFailure *IngestionFailureEvent_CloudStorageFailure `protobuf:"bytes,3,opt,name=cloud_storage_failure,json=cloudStorageFailure,proto3,oneof"`
+}
+
+func (*IngestionFailureEvent_CloudStorageFailure_) isIngestionFailureEvent_Failure() {}
+
// A topic resource.
type Topic struct {
state protoimpl.MessageState
@@ -638,7 +940,7 @@ type Topic struct {
func (x *Topic) Reset() {
*x = Topic{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[3]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -651,7 +953,7 @@ func (x *Topic) String() string {
func (*Topic) ProtoMessage() {}
func (x *Topic) ProtoReflect() protoreflect.Message {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[3]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[5]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -664,7 +966,7 @@ func (x *Topic) ProtoReflect() protoreflect.Message {
// Deprecated: Use Topic.ProtoReflect.Descriptor instead.
func (*Topic) Descriptor() ([]byte, []int) {
- return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{3}
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{5}
}
func (x *Topic) GetName() string {
@@ -773,7 +1075,7 @@ type PubsubMessage struct {
func (x *PubsubMessage) Reset() {
*x = PubsubMessage{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[4]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -786,7 +1088,7 @@ func (x *PubsubMessage) String() string {
func (*PubsubMessage) ProtoMessage() {}
func (x *PubsubMessage) ProtoReflect() protoreflect.Message {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[4]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[6]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -799,7 +1101,7 @@ func (x *PubsubMessage) ProtoReflect() protoreflect.Message {
// Deprecated: Use PubsubMessage.ProtoReflect.Descriptor instead.
func (*PubsubMessage) Descriptor() ([]byte, []int) {
- return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{4}
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{6}
}
func (x *PubsubMessage) GetData() []byte {
@@ -851,7 +1153,7 @@ type GetTopicRequest struct {
func (x *GetTopicRequest) Reset() {
*x = GetTopicRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[5]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -864,7 +1166,7 @@ func (x *GetTopicRequest) String() string {
func (*GetTopicRequest) ProtoMessage() {}
func (x *GetTopicRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[5]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[7]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -877,7 +1179,7 @@ func (x *GetTopicRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetTopicRequest.ProtoReflect.Descriptor instead.
func (*GetTopicRequest) Descriptor() ([]byte, []int) {
- return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{5}
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{7}
}
func (x *GetTopicRequest) GetTopic() string {
@@ -906,7 +1208,7 @@ type UpdateTopicRequest struct {
func (x *UpdateTopicRequest) Reset() {
*x = UpdateTopicRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[6]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -919,7 +1221,7 @@ func (x *UpdateTopicRequest) String() string {
func (*UpdateTopicRequest) ProtoMessage() {}
func (x *UpdateTopicRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[6]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[8]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -932,7 +1234,7 @@ func (x *UpdateTopicRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use UpdateTopicRequest.ProtoReflect.Descriptor instead.
func (*UpdateTopicRequest) Descriptor() ([]byte, []int) {
- return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{6}
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{8}
}
func (x *UpdateTopicRequest) GetTopic() *Topic {
@@ -965,7 +1267,7 @@ type PublishRequest struct {
func (x *PublishRequest) Reset() {
*x = PublishRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[7]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -978,7 +1280,7 @@ func (x *PublishRequest) String() string {
func (*PublishRequest) ProtoMessage() {}
func (x *PublishRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[7]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[9]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -991,7 +1293,7 @@ func (x *PublishRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use PublishRequest.ProtoReflect.Descriptor instead.
func (*PublishRequest) Descriptor() ([]byte, []int) {
- return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{7}
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{9}
}
func (x *PublishRequest) GetTopic() string {
@@ -1023,7 +1325,7 @@ type PublishResponse struct {
func (x *PublishResponse) Reset() {
*x = PublishResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[8]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1036,7 +1338,7 @@ func (x *PublishResponse) String() string {
func (*PublishResponse) ProtoMessage() {}
func (x *PublishResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[8]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[10]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1049,7 +1351,7 @@ func (x *PublishResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use PublishResponse.ProtoReflect.Descriptor instead.
func (*PublishResponse) Descriptor() ([]byte, []int) {
- return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{8}
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{10}
}
func (x *PublishResponse) GetMessageIds() []string {
@@ -1079,7 +1381,7 @@ type ListTopicsRequest struct {
func (x *ListTopicsRequest) Reset() {
*x = ListTopicsRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[9]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1092,7 +1394,7 @@ func (x *ListTopicsRequest) String() string {
func (*ListTopicsRequest) ProtoMessage() {}
func (x *ListTopicsRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[9]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[11]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1105,7 +1407,7 @@ func (x *ListTopicsRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ListTopicsRequest.ProtoReflect.Descriptor instead.
func (*ListTopicsRequest) Descriptor() ([]byte, []int) {
- return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{9}
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{11}
}
func (x *ListTopicsRequest) GetProject() string {
@@ -1145,7 +1447,7 @@ type ListTopicsResponse struct {
func (x *ListTopicsResponse) Reset() {
*x = ListTopicsResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[10]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1158,7 +1460,7 @@ func (x *ListTopicsResponse) String() string {
func (*ListTopicsResponse) ProtoMessage() {}
func (x *ListTopicsResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[10]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[12]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1171,7 +1473,7 @@ func (x *ListTopicsResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use ListTopicsResponse.ProtoReflect.Descriptor instead.
func (*ListTopicsResponse) Descriptor() ([]byte, []int) {
- return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{10}
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{12}
}
func (x *ListTopicsResponse) GetTopics() []*Topic {
@@ -1208,7 +1510,7 @@ type ListTopicSubscriptionsRequest struct {
func (x *ListTopicSubscriptionsRequest) Reset() {
*x = ListTopicSubscriptionsRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[11]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1221,7 +1523,7 @@ func (x *ListTopicSubscriptionsRequest) String() string {
func (*ListTopicSubscriptionsRequest) ProtoMessage() {}
func (x *ListTopicSubscriptionsRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[11]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[13]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1234,7 +1536,7 @@ func (x *ListTopicSubscriptionsRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ListTopicSubscriptionsRequest.ProtoReflect.Descriptor instead.
func (*ListTopicSubscriptionsRequest) Descriptor() ([]byte, []int) {
- return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{11}
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{13}
}
func (x *ListTopicSubscriptionsRequest) GetTopic() string {
@@ -1276,7 +1578,7 @@ type ListTopicSubscriptionsResponse struct {
func (x *ListTopicSubscriptionsResponse) Reset() {
*x = ListTopicSubscriptionsResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[12]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1289,7 +1591,7 @@ func (x *ListTopicSubscriptionsResponse) String() string {
func (*ListTopicSubscriptionsResponse) ProtoMessage() {}
func (x *ListTopicSubscriptionsResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[12]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[14]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1302,7 +1604,7 @@ func (x *ListTopicSubscriptionsResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use ListTopicSubscriptionsResponse.ProtoReflect.Descriptor instead.
func (*ListTopicSubscriptionsResponse) Descriptor() ([]byte, []int) {
- return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{12}
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{14}
}
func (x *ListTopicSubscriptionsResponse) GetSubscriptions() []string {
@@ -1339,7 +1641,7 @@ type ListTopicSnapshotsRequest struct {
func (x *ListTopicSnapshotsRequest) Reset() {
*x = ListTopicSnapshotsRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[13]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[15]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1352,7 +1654,7 @@ func (x *ListTopicSnapshotsRequest) String() string {
func (*ListTopicSnapshotsRequest) ProtoMessage() {}
func (x *ListTopicSnapshotsRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[13]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[15]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1365,7 +1667,7 @@ func (x *ListTopicSnapshotsRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ListTopicSnapshotsRequest.ProtoReflect.Descriptor instead.
func (*ListTopicSnapshotsRequest) Descriptor() ([]byte, []int) {
- return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{13}
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{15}
}
func (x *ListTopicSnapshotsRequest) GetTopic() string {
@@ -1406,7 +1708,7 @@ type ListTopicSnapshotsResponse struct {
func (x *ListTopicSnapshotsResponse) Reset() {
*x = ListTopicSnapshotsResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[14]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[16]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1419,7 +1721,7 @@ func (x *ListTopicSnapshotsResponse) String() string {
func (*ListTopicSnapshotsResponse) ProtoMessage() {}
func (x *ListTopicSnapshotsResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[14]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[16]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1432,7 +1734,7 @@ func (x *ListTopicSnapshotsResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use ListTopicSnapshotsResponse.ProtoReflect.Descriptor instead.
func (*ListTopicSnapshotsResponse) Descriptor() ([]byte, []int) {
- return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{14}
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{16}
}
func (x *ListTopicSnapshotsResponse) GetSnapshots() []string {
@@ -1463,7 +1765,7 @@ type DeleteTopicRequest struct {
func (x *DeleteTopicRequest) Reset() {
*x = DeleteTopicRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[15]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[17]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1476,7 +1778,7 @@ func (x *DeleteTopicRequest) String() string {
func (*DeleteTopicRequest) ProtoMessage() {}
func (x *DeleteTopicRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[15]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[17]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1489,7 +1791,7 @@ func (x *DeleteTopicRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use DeleteTopicRequest.ProtoReflect.Descriptor instead.
func (*DeleteTopicRequest) Descriptor() ([]byte, []int) {
- return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{15}
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{17}
}
func (x *DeleteTopicRequest) GetTopic() string {
@@ -1513,7 +1815,7 @@ type DetachSubscriptionRequest struct {
func (x *DetachSubscriptionRequest) Reset() {
*x = DetachSubscriptionRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[16]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[18]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1526,7 +1828,7 @@ func (x *DetachSubscriptionRequest) String() string {
func (*DetachSubscriptionRequest) ProtoMessage() {}
func (x *DetachSubscriptionRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[16]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[18]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1539,7 +1841,7 @@ func (x *DetachSubscriptionRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use DetachSubscriptionRequest.ProtoReflect.Descriptor instead.
func (*DetachSubscriptionRequest) Descriptor() ([]byte, []int) {
- return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{16}
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{18}
}
func (x *DetachSubscriptionRequest) GetSubscription() string {
@@ -1560,7 +1862,7 @@ type DetachSubscriptionResponse struct {
func (x *DetachSubscriptionResponse) Reset() {
*x = DetachSubscriptionResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[17]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[19]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1573,7 +1875,7 @@ func (x *DetachSubscriptionResponse) String() string {
func (*DetachSubscriptionResponse) ProtoMessage() {}
func (x *DetachSubscriptionResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[17]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[19]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1586,7 +1888,7 @@ func (x *DetachSubscriptionResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use DetachSubscriptionResponse.ProtoReflect.Descriptor instead.
func (*DetachSubscriptionResponse) Descriptor() ([]byte, []int) {
- return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{17}
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{19}
}
// A subscription resource. If none of `push_config`, `bigquery_config`, or
@@ -1649,7 +1951,7 @@ type Subscription struct {
// backlog, from the moment a message is published. If `retain_acked_messages`
// is true, then this also configures the retention of acknowledged messages,
// and thus configures how far back in time a `Seek` can be done. Defaults to
- // 7 days. Cannot be more than 7 days or less than 10 minutes.
+ // 7 days. Cannot be more than 31 days or less than 10 minutes.
MessageRetentionDuration *durationpb.Duration `protobuf:"bytes,8,opt,name=message_retention_duration,json=messageRetentionDuration,proto3" json:"message_retention_duration,omitempty"`
// Optional. See [Creating and managing
// labels](https://cloud.google.com/pubsub/docs/labels).
@@ -1719,12 +2021,15 @@ type Subscription struct {
// Output only. An output-only field indicating whether or not the
// subscription can receive messages.
State Subscription_State `protobuf:"varint,19,opt,name=state,proto3,enum=google.pubsub.v1.Subscription_State" json:"state,omitempty"`
+ // Output only. Information about the associated Analytics Hub subscription.
+ // Only set if the subscritpion is created by Analytics Hub.
+ AnalyticsHubSubscriptionInfo *Subscription_AnalyticsHubSubscriptionInfo `protobuf:"bytes,23,opt,name=analytics_hub_subscription_info,json=analyticsHubSubscriptionInfo,proto3" json:"analytics_hub_subscription_info,omitempty"`
}
func (x *Subscription) Reset() {
*x = Subscription{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[18]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[20]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1737,7 +2042,7 @@ func (x *Subscription) String() string {
func (*Subscription) ProtoMessage() {}
func (x *Subscription) ProtoReflect() protoreflect.Message {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[18]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[20]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1750,7 +2055,7 @@ func (x *Subscription) ProtoReflect() protoreflect.Message {
// Deprecated: Use Subscription.ProtoReflect.Descriptor instead.
func (*Subscription) Descriptor() ([]byte, []int) {
- return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{18}
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{20}
}
func (x *Subscription) GetName() string {
@@ -1879,6 +2184,13 @@ func (x *Subscription) GetState() Subscription_State {
return Subscription_STATE_UNSPECIFIED
}
+func (x *Subscription) GetAnalyticsHubSubscriptionInfo() *Subscription_AnalyticsHubSubscriptionInfo {
+ if x != nil {
+ return x.AnalyticsHubSubscriptionInfo
+ }
+ return nil
+}
+
// A policy that specifies how Pub/Sub retries message delivery.
//
// Retry delay will be exponential based on provided minimum and maximum
@@ -1907,7 +2219,7 @@ type RetryPolicy struct {
func (x *RetryPolicy) Reset() {
*x = RetryPolicy{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[19]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[21]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1920,7 +2232,7 @@ func (x *RetryPolicy) String() string {
func (*RetryPolicy) ProtoMessage() {}
func (x *RetryPolicy) ProtoReflect() protoreflect.Message {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[19]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[21]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1933,7 +2245,7 @@ func (x *RetryPolicy) ProtoReflect() protoreflect.Message {
// Deprecated: Use RetryPolicy.ProtoReflect.Descriptor instead.
func (*RetryPolicy) Descriptor() ([]byte, []int) {
- return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{19}
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{21}
}
func (x *RetryPolicy) GetMinimumBackoff() *durationpb.Duration {
@@ -1989,7 +2301,7 @@ type DeadLetterPolicy struct {
func (x *DeadLetterPolicy) Reset() {
*x = DeadLetterPolicy{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[20]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[22]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2002,7 +2314,7 @@ func (x *DeadLetterPolicy) String() string {
func (*DeadLetterPolicy) ProtoMessage() {}
func (x *DeadLetterPolicy) ProtoReflect() protoreflect.Message {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[20]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[22]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2015,7 +2327,7 @@ func (x *DeadLetterPolicy) ProtoReflect() protoreflect.Message {
// Deprecated: Use DeadLetterPolicy.ProtoReflect.Descriptor instead.
func (*DeadLetterPolicy) Descriptor() ([]byte, []int) {
- return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{20}
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{22}
}
func (x *DeadLetterPolicy) GetDeadLetterTopic() string {
@@ -2051,7 +2363,7 @@ type ExpirationPolicy struct {
func (x *ExpirationPolicy) Reset() {
*x = ExpirationPolicy{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[21]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[23]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2064,7 +2376,7 @@ func (x *ExpirationPolicy) String() string {
func (*ExpirationPolicy) ProtoMessage() {}
func (x *ExpirationPolicy) ProtoReflect() protoreflect.Message {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[21]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[23]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2077,7 +2389,7 @@ func (x *ExpirationPolicy) ProtoReflect() protoreflect.Message {
// Deprecated: Use ExpirationPolicy.ProtoReflect.Descriptor instead.
func (*ExpirationPolicy) Descriptor() ([]byte, []int) {
- return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{21}
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{23}
}
func (x *ExpirationPolicy) GetTtl() *durationpb.Duration {
@@ -2141,7 +2453,7 @@ type PushConfig struct {
func (x *PushConfig) Reset() {
*x = PushConfig{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[22]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[24]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2154,7 +2466,7 @@ func (x *PushConfig) String() string {
func (*PushConfig) ProtoMessage() {}
func (x *PushConfig) ProtoReflect() protoreflect.Message {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[22]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[24]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2167,7 +2479,7 @@ func (x *PushConfig) ProtoReflect() protoreflect.Message {
// Deprecated: Use PushConfig.ProtoReflect.Descriptor instead.
func (*PushConfig) Descriptor() ([]byte, []int) {
- return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{22}
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{24}
}
func (x *PushConfig) GetPushEndpoint() string {
@@ -2296,7 +2608,7 @@ type BigQueryConfig struct {
func (x *BigQueryConfig) Reset() {
*x = BigQueryConfig{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[23]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[25]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2309,7 +2621,7 @@ func (x *BigQueryConfig) String() string {
func (*BigQueryConfig) ProtoMessage() {}
func (x *BigQueryConfig) ProtoReflect() protoreflect.Message {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[23]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[25]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2322,7 +2634,7 @@ func (x *BigQueryConfig) ProtoReflect() protoreflect.Message {
// Deprecated: Use BigQueryConfig.ProtoReflect.Descriptor instead.
func (*BigQueryConfig) Descriptor() ([]byte, []int) {
- return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{23}
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{25}
}
func (x *BigQueryConfig) GetTable() string {
@@ -2429,7 +2741,7 @@ type CloudStorageConfig struct {
func (x *CloudStorageConfig) Reset() {
*x = CloudStorageConfig{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[24]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[26]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2442,7 +2754,7 @@ func (x *CloudStorageConfig) String() string {
func (*CloudStorageConfig) ProtoMessage() {}
func (x *CloudStorageConfig) ProtoReflect() protoreflect.Message {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[24]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[26]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2455,7 +2767,7 @@ func (x *CloudStorageConfig) ProtoReflect() protoreflect.Message {
// Deprecated: Use CloudStorageConfig.ProtoReflect.Descriptor instead.
func (*CloudStorageConfig) Descriptor() ([]byte, []int) {
- return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{24}
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{26}
}
func (x *CloudStorageConfig) GetBucket() string {
@@ -2594,7 +2906,7 @@ type ReceivedMessage struct {
func (x *ReceivedMessage) Reset() {
*x = ReceivedMessage{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[25]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[27]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2607,7 +2919,7 @@ func (x *ReceivedMessage) String() string {
func (*ReceivedMessage) ProtoMessage() {}
func (x *ReceivedMessage) ProtoReflect() protoreflect.Message {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[25]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[27]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2620,7 +2932,7 @@ func (x *ReceivedMessage) ProtoReflect() protoreflect.Message {
// Deprecated: Use ReceivedMessage.ProtoReflect.Descriptor instead.
func (*ReceivedMessage) Descriptor() ([]byte, []int) {
- return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{25}
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{27}
}
func (x *ReceivedMessage) GetAckId() string {
@@ -2658,7 +2970,7 @@ type GetSubscriptionRequest struct {
func (x *GetSubscriptionRequest) Reset() {
*x = GetSubscriptionRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[26]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[28]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2671,7 +2983,7 @@ func (x *GetSubscriptionRequest) String() string {
func (*GetSubscriptionRequest) ProtoMessage() {}
func (x *GetSubscriptionRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[26]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[28]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2684,7 +2996,7 @@ func (x *GetSubscriptionRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetSubscriptionRequest.ProtoReflect.Descriptor instead.
func (*GetSubscriptionRequest) Descriptor() ([]byte, []int) {
- return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{26}
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{28}
}
func (x *GetSubscriptionRequest) GetSubscription() string {
@@ -2710,7 +3022,7 @@ type UpdateSubscriptionRequest struct {
func (x *UpdateSubscriptionRequest) Reset() {
*x = UpdateSubscriptionRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[27]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[29]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2723,7 +3035,7 @@ func (x *UpdateSubscriptionRequest) String() string {
func (*UpdateSubscriptionRequest) ProtoMessage() {}
func (x *UpdateSubscriptionRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[27]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[29]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2736,7 +3048,7 @@ func (x *UpdateSubscriptionRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use UpdateSubscriptionRequest.ProtoReflect.Descriptor instead.
func (*UpdateSubscriptionRequest) Descriptor() ([]byte, []int) {
- return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{27}
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{29}
}
func (x *UpdateSubscriptionRequest) GetSubscription() *Subscription {
@@ -2773,7 +3085,7 @@ type ListSubscriptionsRequest struct {
func (x *ListSubscriptionsRequest) Reset() {
*x = ListSubscriptionsRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[28]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[30]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2786,7 +3098,7 @@ func (x *ListSubscriptionsRequest) String() string {
func (*ListSubscriptionsRequest) ProtoMessage() {}
func (x *ListSubscriptionsRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[28]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[30]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2799,7 +3111,7 @@ func (x *ListSubscriptionsRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ListSubscriptionsRequest.ProtoReflect.Descriptor instead.
func (*ListSubscriptionsRequest) Descriptor() ([]byte, []int) {
- return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{28}
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{30}
}
func (x *ListSubscriptionsRequest) GetProject() string {
@@ -2840,7 +3152,7 @@ type ListSubscriptionsResponse struct {
func (x *ListSubscriptionsResponse) Reset() {
*x = ListSubscriptionsResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[29]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[31]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2853,7 +3165,7 @@ func (x *ListSubscriptionsResponse) String() string {
func (*ListSubscriptionsResponse) ProtoMessage() {}
func (x *ListSubscriptionsResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[29]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[31]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2866,7 +3178,7 @@ func (x *ListSubscriptionsResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use ListSubscriptionsResponse.ProtoReflect.Descriptor instead.
func (*ListSubscriptionsResponse) Descriptor() ([]byte, []int) {
- return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{29}
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{31}
}
func (x *ListSubscriptionsResponse) GetSubscriptions() []*Subscription {
@@ -2897,7 +3209,7 @@ type DeleteSubscriptionRequest struct {
func (x *DeleteSubscriptionRequest) Reset() {
*x = DeleteSubscriptionRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[30]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[32]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2910,7 +3222,7 @@ func (x *DeleteSubscriptionRequest) String() string {
func (*DeleteSubscriptionRequest) ProtoMessage() {}
func (x *DeleteSubscriptionRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[30]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[32]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2923,7 +3235,7 @@ func (x *DeleteSubscriptionRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use DeleteSubscriptionRequest.ProtoReflect.Descriptor instead.
func (*DeleteSubscriptionRequest) Descriptor() ([]byte, []int) {
- return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{30}
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{32}
}
func (x *DeleteSubscriptionRequest) GetSubscription() string {
@@ -2954,7 +3266,7 @@ type ModifyPushConfigRequest struct {
func (x *ModifyPushConfigRequest) Reset() {
*x = ModifyPushConfigRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[31]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[33]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2967,7 +3279,7 @@ func (x *ModifyPushConfigRequest) String() string {
func (*ModifyPushConfigRequest) ProtoMessage() {}
func (x *ModifyPushConfigRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[31]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[33]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2980,7 +3292,7 @@ func (x *ModifyPushConfigRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ModifyPushConfigRequest.ProtoReflect.Descriptor instead.
func (*ModifyPushConfigRequest) Descriptor() ([]byte, []int) {
- return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{31}
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{33}
}
func (x *ModifyPushConfigRequest) GetSubscription() string {
@@ -3025,7 +3337,7 @@ type PullRequest struct {
func (x *PullRequest) Reset() {
*x = PullRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[32]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[34]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3038,7 +3350,7 @@ func (x *PullRequest) String() string {
func (*PullRequest) ProtoMessage() {}
func (x *PullRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[32]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[34]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3051,7 +3363,7 @@ func (x *PullRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use PullRequest.ProtoReflect.Descriptor instead.
func (*PullRequest) Descriptor() ([]byte, []int) {
- return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{32}
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{34}
}
func (x *PullRequest) GetSubscription() string {
@@ -3093,7 +3405,7 @@ type PullResponse struct {
func (x *PullResponse) Reset() {
*x = PullResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[33]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[35]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3106,7 +3418,7 @@ func (x *PullResponse) String() string {
func (*PullResponse) ProtoMessage() {}
func (x *PullResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[33]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[35]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3119,7 +3431,7 @@ func (x *PullResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use PullResponse.ProtoReflect.Descriptor instead.
func (*PullResponse) Descriptor() ([]byte, []int) {
- return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{33}
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{35}
}
func (x *PullResponse) GetReceivedMessages() []*ReceivedMessage {
@@ -3155,7 +3467,7 @@ type ModifyAckDeadlineRequest struct {
func (x *ModifyAckDeadlineRequest) Reset() {
*x = ModifyAckDeadlineRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[34]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[36]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3168,7 +3480,7 @@ func (x *ModifyAckDeadlineRequest) String() string {
func (*ModifyAckDeadlineRequest) ProtoMessage() {}
func (x *ModifyAckDeadlineRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[34]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[36]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3181,7 +3493,7 @@ func (x *ModifyAckDeadlineRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ModifyAckDeadlineRequest.ProtoReflect.Descriptor instead.
func (*ModifyAckDeadlineRequest) Descriptor() ([]byte, []int) {
- return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{34}
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{36}
}
func (x *ModifyAckDeadlineRequest) GetSubscription() string {
@@ -3223,7 +3535,7 @@ type AcknowledgeRequest struct {
func (x *AcknowledgeRequest) Reset() {
*x = AcknowledgeRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[35]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[37]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3236,7 +3548,7 @@ func (x *AcknowledgeRequest) String() string {
func (*AcknowledgeRequest) ProtoMessage() {}
func (x *AcknowledgeRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[35]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[37]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3249,7 +3561,7 @@ func (x *AcknowledgeRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use AcknowledgeRequest.ProtoReflect.Descriptor instead.
func (*AcknowledgeRequest) Descriptor() ([]byte, []int) {
- return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{35}
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{37}
}
func (x *AcknowledgeRequest) GetSubscription() string {
@@ -3341,7 +3653,7 @@ type StreamingPullRequest struct {
func (x *StreamingPullRequest) Reset() {
*x = StreamingPullRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[36]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[38]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3354,7 +3666,7 @@ func (x *StreamingPullRequest) String() string {
func (*StreamingPullRequest) ProtoMessage() {}
func (x *StreamingPullRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[36]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[38]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3367,7 +3679,7 @@ func (x *StreamingPullRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use StreamingPullRequest.ProtoReflect.Descriptor instead.
func (*StreamingPullRequest) Descriptor() ([]byte, []int) {
- return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{36}
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{38}
}
func (x *StreamingPullRequest) GetSubscription() string {
@@ -3448,7 +3760,7 @@ type StreamingPullResponse struct {
func (x *StreamingPullResponse) Reset() {
*x = StreamingPullResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[37]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[39]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3461,7 +3773,7 @@ func (x *StreamingPullResponse) String() string {
func (*StreamingPullResponse) ProtoMessage() {}
func (x *StreamingPullResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[37]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[39]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3474,7 +3786,7 @@ func (x *StreamingPullResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use StreamingPullResponse.ProtoReflect.Descriptor instead.
func (*StreamingPullResponse) Descriptor() ([]byte, []int) {
- return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{37}
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{39}
}
func (x *StreamingPullResponse) GetReceivedMessages() []*ReceivedMessage {
@@ -3538,7 +3850,7 @@ type CreateSnapshotRequest struct {
func (x *CreateSnapshotRequest) Reset() {
*x = CreateSnapshotRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[38]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[40]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3551,7 +3863,7 @@ func (x *CreateSnapshotRequest) String() string {
func (*CreateSnapshotRequest) ProtoMessage() {}
func (x *CreateSnapshotRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[38]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[40]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3564,7 +3876,7 @@ func (x *CreateSnapshotRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use CreateSnapshotRequest.ProtoReflect.Descriptor instead.
func (*CreateSnapshotRequest) Descriptor() ([]byte, []int) {
- return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{38}
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{40}
}
func (x *CreateSnapshotRequest) GetName() string {
@@ -3604,7 +3916,7 @@ type UpdateSnapshotRequest struct {
func (x *UpdateSnapshotRequest) Reset() {
*x = UpdateSnapshotRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[39]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[41]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3617,7 +3929,7 @@ func (x *UpdateSnapshotRequest) String() string {
func (*UpdateSnapshotRequest) ProtoMessage() {}
func (x *UpdateSnapshotRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[39]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[41]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3630,7 +3942,7 @@ func (x *UpdateSnapshotRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use UpdateSnapshotRequest.ProtoReflect.Descriptor instead.
func (*UpdateSnapshotRequest) Descriptor() ([]byte, []int) {
- return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{39}
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{41}
}
func (x *UpdateSnapshotRequest) GetSnapshot() *Snapshot {
@@ -3681,7 +3993,7 @@ type Snapshot struct {
func (x *Snapshot) Reset() {
*x = Snapshot{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[40]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[42]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3694,7 +4006,7 @@ func (x *Snapshot) String() string {
func (*Snapshot) ProtoMessage() {}
func (x *Snapshot) ProtoReflect() protoreflect.Message {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[40]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[42]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3707,7 +4019,7 @@ func (x *Snapshot) ProtoReflect() protoreflect.Message {
// Deprecated: Use Snapshot.ProtoReflect.Descriptor instead.
func (*Snapshot) Descriptor() ([]byte, []int) {
- return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{40}
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{42}
}
func (x *Snapshot) GetName() string {
@@ -3752,7 +4064,7 @@ type GetSnapshotRequest struct {
func (x *GetSnapshotRequest) Reset() {
*x = GetSnapshotRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[41]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[43]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3765,7 +4077,7 @@ func (x *GetSnapshotRequest) String() string {
func (*GetSnapshotRequest) ProtoMessage() {}
func (x *GetSnapshotRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[41]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[43]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3778,7 +4090,7 @@ func (x *GetSnapshotRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetSnapshotRequest.ProtoReflect.Descriptor instead.
func (*GetSnapshotRequest) Descriptor() ([]byte, []int) {
- return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{41}
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{43}
}
func (x *GetSnapshotRequest) GetSnapshot() string {
@@ -3808,7 +4120,7 @@ type ListSnapshotsRequest struct {
func (x *ListSnapshotsRequest) Reset() {
*x = ListSnapshotsRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[42]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[44]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3821,7 +4133,7 @@ func (x *ListSnapshotsRequest) String() string {
func (*ListSnapshotsRequest) ProtoMessage() {}
func (x *ListSnapshotsRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[42]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[44]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3834,7 +4146,7 @@ func (x *ListSnapshotsRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ListSnapshotsRequest.ProtoReflect.Descriptor instead.
func (*ListSnapshotsRequest) Descriptor() ([]byte, []int) {
- return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{42}
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{44}
}
func (x *ListSnapshotsRequest) GetProject() string {
@@ -3875,7 +4187,7 @@ type ListSnapshotsResponse struct {
func (x *ListSnapshotsResponse) Reset() {
*x = ListSnapshotsResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[43]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[45]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3888,7 +4200,7 @@ func (x *ListSnapshotsResponse) String() string {
func (*ListSnapshotsResponse) ProtoMessage() {}
func (x *ListSnapshotsResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[43]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[45]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3901,7 +4213,7 @@ func (x *ListSnapshotsResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use ListSnapshotsResponse.ProtoReflect.Descriptor instead.
func (*ListSnapshotsResponse) Descriptor() ([]byte, []int) {
- return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{43}
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{45}
}
func (x *ListSnapshotsResponse) GetSnapshots() []*Snapshot {
@@ -3932,7 +4244,7 @@ type DeleteSnapshotRequest struct {
func (x *DeleteSnapshotRequest) Reset() {
*x = DeleteSnapshotRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[44]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[46]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3945,7 +4257,7 @@ func (x *DeleteSnapshotRequest) String() string {
func (*DeleteSnapshotRequest) ProtoMessage() {}
func (x *DeleteSnapshotRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[44]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[46]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3958,7 +4270,7 @@ func (x *DeleteSnapshotRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use DeleteSnapshotRequest.ProtoReflect.Descriptor instead.
func (*DeleteSnapshotRequest) Descriptor() ([]byte, []int) {
- return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{44}
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{46}
}
func (x *DeleteSnapshotRequest) GetSnapshot() string {
@@ -3986,7 +4298,7 @@ type SeekRequest struct {
func (x *SeekRequest) Reset() {
*x = SeekRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[45]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[47]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3999,7 +4311,7 @@ func (x *SeekRequest) String() string {
func (*SeekRequest) ProtoMessage() {}
func (x *SeekRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[45]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[47]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4012,7 +4324,7 @@ func (x *SeekRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use SeekRequest.ProtoReflect.Descriptor instead.
func (*SeekRequest) Descriptor() ([]byte, []int) {
- return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{45}
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{47}
}
func (x *SeekRequest) GetSubscription() string {
@@ -4083,7 +4395,7 @@ type SeekResponse struct {
func (x *SeekResponse) Reset() {
*x = SeekResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[46]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[48]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4096,7 +4408,7 @@ func (x *SeekResponse) String() string {
func (*SeekResponse) ProtoMessage() {}
func (x *SeekResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[46]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[48]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4109,7 +4421,7 @@ func (x *SeekResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use SeekResponse.ProtoReflect.Descriptor instead.
func (*SeekResponse) Descriptor() ([]byte, []int) {
- return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{46}
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{48}
}
// Ingestion settings for Amazon Kinesis Data Streams.
@@ -4140,7 +4452,7 @@ type IngestionDataSourceSettings_AwsKinesis struct {
func (x *IngestionDataSourceSettings_AwsKinesis) Reset() {
*x = IngestionDataSourceSettings_AwsKinesis{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[47]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[49]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4153,7 +4465,7 @@ func (x *IngestionDataSourceSettings_AwsKinesis) String() string {
func (*IngestionDataSourceSettings_AwsKinesis) ProtoMessage() {}
func (x *IngestionDataSourceSettings_AwsKinesis) ProtoReflect() protoreflect.Message {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[47]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[49]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4204,47 +4516,53 @@ func (x *IngestionDataSourceSettings_AwsKinesis) GetGcpServiceAccount() string {
return ""
}
-// Contains information needed for generating an
-// [OpenID Connect
-// token](https://developers.google.com/identity/protocols/OpenIDConnect).
-type PushConfig_OidcToken struct {
+// Ingestion settings for Cloud Storage.
+type IngestionDataSourceSettings_CloudStorage struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // Optional. [Service account
- // email](https://cloud.google.com/iam/docs/service-accounts)
- // used for generating the OIDC token. For more information
- // on setting up authentication, see
- // [Push subscriptions](https://cloud.google.com/pubsub/docs/push).
- ServiceAccountEmail string `protobuf:"bytes,1,opt,name=service_account_email,json=serviceAccountEmail,proto3" json:"service_account_email,omitempty"`
- // Optional. Audience to be used when generating OIDC token. The audience
- // claim identifies the recipients that the JWT is intended for. The
- // audience value is a single case-sensitive string. Having multiple values
- // (array) for the audience field is not supported. More info about the OIDC
- // JWT token audience here:
- // https://tools.ietf.org/html/rfc7519#section-4.1.3 Note: if not specified,
- // the Push endpoint URL will be used.
- Audience string `protobuf:"bytes,2,opt,name=audience,proto3" json:"audience,omitempty"`
-}
-
-func (x *PushConfig_OidcToken) Reset() {
- *x = PushConfig_OidcToken{}
+ // Output only. An output-only field that indicates the state of the Cloud
+ // Storage ingestion source.
+ State IngestionDataSourceSettings_CloudStorage_State `protobuf:"varint,1,opt,name=state,proto3,enum=google.pubsub.v1.IngestionDataSourceSettings_CloudStorage_State" json:"state,omitempty"`
+ // Optional. Cloud Storage bucket. The bucket name must be without any
+ // prefix like "gs://". See the [bucket naming requirements]
+ // (https://cloud.google.com/storage/docs/buckets#naming).
+ Bucket string `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"`
+ // Defaults to text format.
+ //
+ // Types that are assignable to InputFormat:
+ //
+ // *IngestionDataSourceSettings_CloudStorage_TextFormat_
+ // *IngestionDataSourceSettings_CloudStorage_AvroFormat_
+ // *IngestionDataSourceSettings_CloudStorage_PubsubAvroFormat
+ InputFormat isIngestionDataSourceSettings_CloudStorage_InputFormat `protobuf_oneof:"input_format"`
+ // Optional. Only objects with a larger or equal creation timestamp will be
+ // ingested.
+ MinimumObjectCreateTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=minimum_object_create_time,json=minimumObjectCreateTime,proto3" json:"minimum_object_create_time,omitempty"`
+ // Optional. Glob pattern used to match objects that will be ingested. If
+ // unset, all objects will be ingested. See the [supported
+ // patterns](https://cloud.google.com/storage/docs/json_api/v1/objects/list#list-objects-and-prefixes-using-glob).
+ MatchGlob string `protobuf:"bytes,9,opt,name=match_glob,json=matchGlob,proto3" json:"match_glob,omitempty"`
+}
+
+func (x *IngestionDataSourceSettings_CloudStorage) Reset() {
+ *x = IngestionDataSourceSettings_CloudStorage{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[51]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[50]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *PushConfig_OidcToken) String() string {
+func (x *IngestionDataSourceSettings_CloudStorage) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*PushConfig_OidcToken) ProtoMessage() {}
+func (*IngestionDataSourceSettings_CloudStorage) ProtoMessage() {}
-func (x *PushConfig_OidcToken) ProtoReflect() protoreflect.Message {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[51]
+func (x *IngestionDataSourceSettings_CloudStorage) ProtoReflect() protoreflect.Message {
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[50]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4255,51 +4573,594 @@ func (x *PushConfig_OidcToken) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use PushConfig_OidcToken.ProtoReflect.Descriptor instead.
-func (*PushConfig_OidcToken) Descriptor() ([]byte, []int) {
- return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{22, 0}
+// Deprecated: Use IngestionDataSourceSettings_CloudStorage.ProtoReflect.Descriptor instead.
+func (*IngestionDataSourceSettings_CloudStorage) Descriptor() ([]byte, []int) {
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{2, 1}
}
-func (x *PushConfig_OidcToken) GetServiceAccountEmail() string {
+func (x *IngestionDataSourceSettings_CloudStorage) GetState() IngestionDataSourceSettings_CloudStorage_State {
if x != nil {
- return x.ServiceAccountEmail
+ return x.State
}
- return ""
+ return IngestionDataSourceSettings_CloudStorage_STATE_UNSPECIFIED
}
-func (x *PushConfig_OidcToken) GetAudience() string {
+func (x *IngestionDataSourceSettings_CloudStorage) GetBucket() string {
if x != nil {
- return x.Audience
+ return x.Bucket
}
return ""
}
-// The payload to the push endpoint is in the form of the JSON representation
-// of a PubsubMessage
-// (https://cloud.google.com/pubsub/docs/reference/rpc/google.pubsub.v1#pubsubmessage).
-type PushConfig_PubsubWrapper struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
+func (m *IngestionDataSourceSettings_CloudStorage) GetInputFormat() isIngestionDataSourceSettings_CloudStorage_InputFormat {
+ if m != nil {
+ return m.InputFormat
+ }
+ return nil
}
-func (x *PushConfig_PubsubWrapper) Reset() {
- *x = PushConfig_PubsubWrapper{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[52]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
+func (x *IngestionDataSourceSettings_CloudStorage) GetTextFormat() *IngestionDataSourceSettings_CloudStorage_TextFormat {
+ if x, ok := x.GetInputFormat().(*IngestionDataSourceSettings_CloudStorage_TextFormat_); ok {
+ return x.TextFormat
}
+ return nil
}
-func (x *PushConfig_PubsubWrapper) String() string {
- return protoimpl.X.MessageStringOf(x)
+func (x *IngestionDataSourceSettings_CloudStorage) GetAvroFormat() *IngestionDataSourceSettings_CloudStorage_AvroFormat {
+ if x, ok := x.GetInputFormat().(*IngestionDataSourceSettings_CloudStorage_AvroFormat_); ok {
+ return x.AvroFormat
+ }
+ return nil
}
-func (*PushConfig_PubsubWrapper) ProtoMessage() {}
-
+func (x *IngestionDataSourceSettings_CloudStorage) GetPubsubAvroFormat() *IngestionDataSourceSettings_CloudStorage_PubSubAvroFormat {
+ if x, ok := x.GetInputFormat().(*IngestionDataSourceSettings_CloudStorage_PubsubAvroFormat); ok {
+ return x.PubsubAvroFormat
+ }
+ return nil
+}
+
+func (x *IngestionDataSourceSettings_CloudStorage) GetMinimumObjectCreateTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.MinimumObjectCreateTime
+ }
+ return nil
+}
+
+func (x *IngestionDataSourceSettings_CloudStorage) GetMatchGlob() string {
+ if x != nil {
+ return x.MatchGlob
+ }
+ return ""
+}
+
+type isIngestionDataSourceSettings_CloudStorage_InputFormat interface {
+ isIngestionDataSourceSettings_CloudStorage_InputFormat()
+}
+
+type IngestionDataSourceSettings_CloudStorage_TextFormat_ struct {
+ // Optional. Data from Cloud Storage will be interpreted as text.
+ TextFormat *IngestionDataSourceSettings_CloudStorage_TextFormat `protobuf:"bytes,3,opt,name=text_format,json=textFormat,proto3,oneof"`
+}
+
+type IngestionDataSourceSettings_CloudStorage_AvroFormat_ struct {
+ // Optional. Data from Cloud Storage will be interpreted in Avro format.
+ AvroFormat *IngestionDataSourceSettings_CloudStorage_AvroFormat `protobuf:"bytes,4,opt,name=avro_format,json=avroFormat,proto3,oneof"`
+}
+
+type IngestionDataSourceSettings_CloudStorage_PubsubAvroFormat struct {
+ // Optional. It will be assumed data from Cloud Storage was written via
+ // [Cloud Storage
+ // subscriptions](https://cloud.google.com/pubsub/docs/cloudstorage).
+ PubsubAvroFormat *IngestionDataSourceSettings_CloudStorage_PubSubAvroFormat `protobuf:"bytes,5,opt,name=pubsub_avro_format,json=pubsubAvroFormat,proto3,oneof"`
+}
+
+func (*IngestionDataSourceSettings_CloudStorage_TextFormat_) isIngestionDataSourceSettings_CloudStorage_InputFormat() {
+}
+
+func (*IngestionDataSourceSettings_CloudStorage_AvroFormat_) isIngestionDataSourceSettings_CloudStorage_InputFormat() {
+}
+
+func (*IngestionDataSourceSettings_CloudStorage_PubsubAvroFormat) isIngestionDataSourceSettings_CloudStorage_InputFormat() {
+}
+
+// Configuration for reading Cloud Storage data in text format. Each line of
+// text as specified by the delimiter will be set to the `data` field of a
+// Pub/Sub message.
+type IngestionDataSourceSettings_CloudStorage_TextFormat struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Optional. When unset, '\n' is used.
+ Delimiter *string `protobuf:"bytes,1,opt,name=delimiter,proto3,oneof" json:"delimiter,omitempty"`
+}
+
+func (x *IngestionDataSourceSettings_CloudStorage_TextFormat) Reset() {
+ *x = IngestionDataSourceSettings_CloudStorage_TextFormat{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[51]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *IngestionDataSourceSettings_CloudStorage_TextFormat) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*IngestionDataSourceSettings_CloudStorage_TextFormat) ProtoMessage() {}
+
+func (x *IngestionDataSourceSettings_CloudStorage_TextFormat) ProtoReflect() protoreflect.Message {
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[51]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use IngestionDataSourceSettings_CloudStorage_TextFormat.ProtoReflect.Descriptor instead.
+func (*IngestionDataSourceSettings_CloudStorage_TextFormat) Descriptor() ([]byte, []int) {
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{2, 1, 0}
+}
+
+func (x *IngestionDataSourceSettings_CloudStorage_TextFormat) GetDelimiter() string {
+ if x != nil && x.Delimiter != nil {
+ return *x.Delimiter
+ }
+ return ""
+}
+
+// Configuration for reading Cloud Storage data in Avro binary format. The
+// bytes of each object will be set to the `data` field of a Pub/Sub
+// message.
+type IngestionDataSourceSettings_CloudStorage_AvroFormat struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *IngestionDataSourceSettings_CloudStorage_AvroFormat) Reset() {
+ *x = IngestionDataSourceSettings_CloudStorage_AvroFormat{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[52]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *IngestionDataSourceSettings_CloudStorage_AvroFormat) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*IngestionDataSourceSettings_CloudStorage_AvroFormat) ProtoMessage() {}
+
+func (x *IngestionDataSourceSettings_CloudStorage_AvroFormat) ProtoReflect() protoreflect.Message {
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[52]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use IngestionDataSourceSettings_CloudStorage_AvroFormat.ProtoReflect.Descriptor instead.
+func (*IngestionDataSourceSettings_CloudStorage_AvroFormat) Descriptor() ([]byte, []int) {
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{2, 1, 1}
+}
+
+// Configuration for reading Cloud Storage data written via [Cloud Storage
+// subscriptions](https://cloud.google.com/pubsub/docs/cloudstorage). The
+// data and attributes fields of the originally exported Pub/Sub message
+// will be restored when publishing.
+type IngestionDataSourceSettings_CloudStorage_PubSubAvroFormat struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *IngestionDataSourceSettings_CloudStorage_PubSubAvroFormat) Reset() {
+ *x = IngestionDataSourceSettings_CloudStorage_PubSubAvroFormat{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[53]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *IngestionDataSourceSettings_CloudStorage_PubSubAvroFormat) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*IngestionDataSourceSettings_CloudStorage_PubSubAvroFormat) ProtoMessage() {}
+
+func (x *IngestionDataSourceSettings_CloudStorage_PubSubAvroFormat) ProtoReflect() protoreflect.Message {
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[53]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use IngestionDataSourceSettings_CloudStorage_PubSubAvroFormat.ProtoReflect.Descriptor instead.
+func (*IngestionDataSourceSettings_CloudStorage_PubSubAvroFormat) Descriptor() ([]byte, []int) {
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{2, 1, 2}
+}
+
+// Specifies the reason why some data may have been left out of
+// the desired Pub/Sub message due to the API message limits
+// (https://cloud.google.com/pubsub/quotas#resource_limits). For example,
+// when the number of attributes is larger than 100, the number of
+// attributes is truncated to 100 to respect the limit on the attribute count.
+// Other attribute limits are treated similarly. When the size of the desired
+// message would've been larger than 10MB, the message won't be published at
+// all, and ingestion of the subsequent messages will proceed as normal.
+type IngestionFailureEvent_ApiViolationReason struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *IngestionFailureEvent_ApiViolationReason) Reset() {
+ *x = IngestionFailureEvent_ApiViolationReason{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[54]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *IngestionFailureEvent_ApiViolationReason) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*IngestionFailureEvent_ApiViolationReason) ProtoMessage() {}
+
+func (x *IngestionFailureEvent_ApiViolationReason) ProtoReflect() protoreflect.Message {
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[54]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use IngestionFailureEvent_ApiViolationReason.ProtoReflect.Descriptor instead.
+func (*IngestionFailureEvent_ApiViolationReason) Descriptor() ([]byte, []int) {
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{4, 0}
+}
+
+// Set when an Avro file is unsupported or its format is not valid. When this
+// occurs, one or more Avro objects won't be ingested.
+type IngestionFailureEvent_AvroFailureReason struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *IngestionFailureEvent_AvroFailureReason) Reset() {
+ *x = IngestionFailureEvent_AvroFailureReason{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[55]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *IngestionFailureEvent_AvroFailureReason) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*IngestionFailureEvent_AvroFailureReason) ProtoMessage() {}
+
+func (x *IngestionFailureEvent_AvroFailureReason) ProtoReflect() protoreflect.Message {
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[55]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use IngestionFailureEvent_AvroFailureReason.ProtoReflect.Descriptor instead.
+func (*IngestionFailureEvent_AvroFailureReason) Descriptor() ([]byte, []int) {
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{4, 1}
+}
+
+// Failure when ingesting from a Cloud Storage source.
+type IngestionFailureEvent_CloudStorageFailure struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Optional. Name of the Cloud Storage bucket used for ingestion.
+ Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"`
+ // Optional. Name of the Cloud Storage object which contained the section
+ // that couldn't be ingested.
+ ObjectName string `protobuf:"bytes,2,opt,name=object_name,json=objectName,proto3" json:"object_name,omitempty"`
+ // Optional. Generation of the Cloud Storage object which contained the
+ // section that couldn't be ingested.
+ ObjectGeneration int64 `protobuf:"varint,3,opt,name=object_generation,json=objectGeneration,proto3" json:"object_generation,omitempty"`
+ // Reason why ingestion failed for the specified object.
+ //
+ // Types that are assignable to Reason:
+ //
+ // *IngestionFailureEvent_CloudStorageFailure_AvroFailureReason
+ // *IngestionFailureEvent_CloudStorageFailure_ApiViolationReason
+ Reason isIngestionFailureEvent_CloudStorageFailure_Reason `protobuf_oneof:"reason"`
+}
+
+func (x *IngestionFailureEvent_CloudStorageFailure) Reset() {
+ *x = IngestionFailureEvent_CloudStorageFailure{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[56]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *IngestionFailureEvent_CloudStorageFailure) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*IngestionFailureEvent_CloudStorageFailure) ProtoMessage() {}
+
+func (x *IngestionFailureEvent_CloudStorageFailure) ProtoReflect() protoreflect.Message {
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[56]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use IngestionFailureEvent_CloudStorageFailure.ProtoReflect.Descriptor instead.
+func (*IngestionFailureEvent_CloudStorageFailure) Descriptor() ([]byte, []int) {
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{4, 2}
+}
+
+func (x *IngestionFailureEvent_CloudStorageFailure) GetBucket() string {
+ if x != nil {
+ return x.Bucket
+ }
+ return ""
+}
+
+func (x *IngestionFailureEvent_CloudStorageFailure) GetObjectName() string {
+ if x != nil {
+ return x.ObjectName
+ }
+ return ""
+}
+
+func (x *IngestionFailureEvent_CloudStorageFailure) GetObjectGeneration() int64 {
+ if x != nil {
+ return x.ObjectGeneration
+ }
+ return 0
+}
+
+func (m *IngestionFailureEvent_CloudStorageFailure) GetReason() isIngestionFailureEvent_CloudStorageFailure_Reason {
+ if m != nil {
+ return m.Reason
+ }
+ return nil
+}
+
+func (x *IngestionFailureEvent_CloudStorageFailure) GetAvroFailureReason() *IngestionFailureEvent_AvroFailureReason {
+ if x, ok := x.GetReason().(*IngestionFailureEvent_CloudStorageFailure_AvroFailureReason); ok {
+ return x.AvroFailureReason
+ }
+ return nil
+}
+
+func (x *IngestionFailureEvent_CloudStorageFailure) GetApiViolationReason() *IngestionFailureEvent_ApiViolationReason {
+ if x, ok := x.GetReason().(*IngestionFailureEvent_CloudStorageFailure_ApiViolationReason); ok {
+ return x.ApiViolationReason
+ }
+ return nil
+}
+
+type isIngestionFailureEvent_CloudStorageFailure_Reason interface {
+ isIngestionFailureEvent_CloudStorageFailure_Reason()
+}
+
+type IngestionFailureEvent_CloudStorageFailure_AvroFailureReason struct {
+ // Optional. Failure encountered when parsing an Avro file.
+ AvroFailureReason *IngestionFailureEvent_AvroFailureReason `protobuf:"bytes,5,opt,name=avro_failure_reason,json=avroFailureReason,proto3,oneof"`
+}
+
+type IngestionFailureEvent_CloudStorageFailure_ApiViolationReason struct {
+ // Optional. The Pub/Sub API limits prevented the desired message from
+ // being published.
+ ApiViolationReason *IngestionFailureEvent_ApiViolationReason `protobuf:"bytes,6,opt,name=api_violation_reason,json=apiViolationReason,proto3,oneof"`
+}
+
+func (*IngestionFailureEvent_CloudStorageFailure_AvroFailureReason) isIngestionFailureEvent_CloudStorageFailure_Reason() {
+}
+
+func (*IngestionFailureEvent_CloudStorageFailure_ApiViolationReason) isIngestionFailureEvent_CloudStorageFailure_Reason() {
+}
+
+// Information about an associated Analytics Hub subscription
+// (https://cloud.google.com/bigquery/docs/analytics-hub-manage-subscriptions).
+type Subscription_AnalyticsHubSubscriptionInfo struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Optional. The name of the associated Analytics Hub listing resource.
+ // Pattern:
+ // "projects/{project}/locations/{location}/dataExchanges/{data_exchange}/listings/{listing}"
+ Listing string `protobuf:"bytes,1,opt,name=listing,proto3" json:"listing,omitempty"`
+ // Optional. The name of the associated Analytics Hub subscription resource.
+ // Pattern:
+ // "projects/{project}/locations/{location}/subscriptions/{subscription}"
+ Subscription string `protobuf:"bytes,2,opt,name=subscription,proto3" json:"subscription,omitempty"`
+}
+
+func (x *Subscription_AnalyticsHubSubscriptionInfo) Reset() {
+ *x = Subscription_AnalyticsHubSubscriptionInfo{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[59]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Subscription_AnalyticsHubSubscriptionInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Subscription_AnalyticsHubSubscriptionInfo) ProtoMessage() {}
+
+func (x *Subscription_AnalyticsHubSubscriptionInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[59]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Subscription_AnalyticsHubSubscriptionInfo.ProtoReflect.Descriptor instead.
+func (*Subscription_AnalyticsHubSubscriptionInfo) Descriptor() ([]byte, []int) {
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{20, 0}
+}
+
+func (x *Subscription_AnalyticsHubSubscriptionInfo) GetListing() string {
+ if x != nil {
+ return x.Listing
+ }
+ return ""
+}
+
+func (x *Subscription_AnalyticsHubSubscriptionInfo) GetSubscription() string {
+ if x != nil {
+ return x.Subscription
+ }
+ return ""
+}
+
+// Contains information needed for generating an
+// [OpenID Connect
+// token](https://developers.google.com/identity/protocols/OpenIDConnect).
+type PushConfig_OidcToken struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Optional. [Service account
+ // email](https://cloud.google.com/iam/docs/service-accounts)
+ // used for generating the OIDC token. For more information
+ // on setting up authentication, see
+ // [Push subscriptions](https://cloud.google.com/pubsub/docs/push).
+ ServiceAccountEmail string `protobuf:"bytes,1,opt,name=service_account_email,json=serviceAccountEmail,proto3" json:"service_account_email,omitempty"`
+ // Optional. Audience to be used when generating OIDC token. The audience
+ // claim identifies the recipients that the JWT is intended for. The
+ // audience value is a single case-sensitive string. Having multiple values
+ // (array) for the audience field is not supported. More info about the OIDC
+ // JWT token audience here:
+ // https://tools.ietf.org/html/rfc7519#section-4.1.3 Note: if not specified,
+ // the Push endpoint URL will be used.
+ Audience string `protobuf:"bytes,2,opt,name=audience,proto3" json:"audience,omitempty"`
+}
+
+func (x *PushConfig_OidcToken) Reset() {
+ *x = PushConfig_OidcToken{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[61]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *PushConfig_OidcToken) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PushConfig_OidcToken) ProtoMessage() {}
+
+func (x *PushConfig_OidcToken) ProtoReflect() protoreflect.Message {
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[61]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PushConfig_OidcToken.ProtoReflect.Descriptor instead.
+func (*PushConfig_OidcToken) Descriptor() ([]byte, []int) {
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{24, 0}
+}
+
+func (x *PushConfig_OidcToken) GetServiceAccountEmail() string {
+ if x != nil {
+ return x.ServiceAccountEmail
+ }
+ return ""
+}
+
+func (x *PushConfig_OidcToken) GetAudience() string {
+ if x != nil {
+ return x.Audience
+ }
+ return ""
+}
+
+// The payload to the push endpoint is in the form of the JSON representation
+// of a PubsubMessage
+// (https://cloud.google.com/pubsub/docs/reference/rpc/google.pubsub.v1#pubsubmessage).
+type PushConfig_PubsubWrapper struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *PushConfig_PubsubWrapper) Reset() {
+ *x = PushConfig_PubsubWrapper{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[62]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *PushConfig_PubsubWrapper) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PushConfig_PubsubWrapper) ProtoMessage() {}
+
func (x *PushConfig_PubsubWrapper) ProtoReflect() protoreflect.Message {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[52]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[62]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4312,7 +5173,7 @@ func (x *PushConfig_PubsubWrapper) ProtoReflect() protoreflect.Message {
// Deprecated: Use PushConfig_PubsubWrapper.ProtoReflect.Descriptor instead.
func (*PushConfig_PubsubWrapper) Descriptor() ([]byte, []int) {
- return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{22, 1}
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{24, 1}
}
// Sets the `data` field as the HTTP body for delivery.
@@ -4330,7 +5191,7 @@ type PushConfig_NoWrapper struct {
func (x *PushConfig_NoWrapper) Reset() {
*x = PushConfig_NoWrapper{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[53]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[63]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4343,7 +5204,7 @@ func (x *PushConfig_NoWrapper) String() string {
func (*PushConfig_NoWrapper) ProtoMessage() {}
func (x *PushConfig_NoWrapper) ProtoReflect() protoreflect.Message {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[53]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[63]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4356,7 +5217,7 @@ func (x *PushConfig_NoWrapper) ProtoReflect() protoreflect.Message {
// Deprecated: Use PushConfig_NoWrapper.ProtoReflect.Descriptor instead.
func (*PushConfig_NoWrapper) Descriptor() ([]byte, []int) {
- return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{22, 2}
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{24, 2}
}
func (x *PushConfig_NoWrapper) GetWriteMetadata() bool {
@@ -4378,7 +5239,7 @@ type CloudStorageConfig_TextConfig struct {
func (x *CloudStorageConfig_TextConfig) Reset() {
*x = CloudStorageConfig_TextConfig{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[55]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[65]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4391,7 +5252,7 @@ func (x *CloudStorageConfig_TextConfig) String() string {
func (*CloudStorageConfig_TextConfig) ProtoMessage() {}
func (x *CloudStorageConfig_TextConfig) ProtoReflect() protoreflect.Message {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[55]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[65]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4404,7 +5265,7 @@ func (x *CloudStorageConfig_TextConfig) ProtoReflect() protoreflect.Message {
// Deprecated: Use CloudStorageConfig_TextConfig.ProtoReflect.Descriptor instead.
func (*CloudStorageConfig_TextConfig) Descriptor() ([]byte, []int) {
- return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{24, 0}
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{26, 0}
}
// Configuration for writing message data in Avro format.
@@ -4429,7 +5290,7 @@ type CloudStorageConfig_AvroConfig struct {
func (x *CloudStorageConfig_AvroConfig) Reset() {
*x = CloudStorageConfig_AvroConfig{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[56]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[66]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4442,7 +5303,7 @@ func (x *CloudStorageConfig_AvroConfig) String() string {
func (*CloudStorageConfig_AvroConfig) ProtoMessage() {}
func (x *CloudStorageConfig_AvroConfig) ProtoReflect() protoreflect.Message {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[56]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[66]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4455,7 +5316,7 @@ func (x *CloudStorageConfig_AvroConfig) ProtoReflect() protoreflect.Message {
// Deprecated: Use CloudStorageConfig_AvroConfig.ProtoReflect.Descriptor instead.
func (*CloudStorageConfig_AvroConfig) Descriptor() ([]byte, []int) {
- return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{24, 1}
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{26, 1}
}
func (x *CloudStorageConfig_AvroConfig) GetWriteMetadata() bool {
@@ -4494,7 +5355,7 @@ type StreamingPullResponse_AcknowledgeConfirmation struct {
func (x *StreamingPullResponse_AcknowledgeConfirmation) Reset() {
*x = StreamingPullResponse_AcknowledgeConfirmation{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[57]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[67]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4507,7 +5368,7 @@ func (x *StreamingPullResponse_AcknowledgeConfirmation) String() string {
func (*StreamingPullResponse_AcknowledgeConfirmation) ProtoMessage() {}
func (x *StreamingPullResponse_AcknowledgeConfirmation) ProtoReflect() protoreflect.Message {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[57]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[67]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4520,7 +5381,7 @@ func (x *StreamingPullResponse_AcknowledgeConfirmation) ProtoReflect() protorefl
// Deprecated: Use StreamingPullResponse_AcknowledgeConfirmation.ProtoReflect.Descriptor instead.
func (*StreamingPullResponse_AcknowledgeConfirmation) Descriptor() ([]byte, []int) {
- return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{37, 0}
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{39, 0}
}
func (x *StreamingPullResponse_AcknowledgeConfirmation) GetAckIds() []string {
@@ -4571,7 +5432,7 @@ type StreamingPullResponse_ModifyAckDeadlineConfirmation struct {
func (x *StreamingPullResponse_ModifyAckDeadlineConfirmation) Reset() {
*x = StreamingPullResponse_ModifyAckDeadlineConfirmation{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[58]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[68]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4584,7 +5445,7 @@ func (x *StreamingPullResponse_ModifyAckDeadlineConfirmation) String() string {
func (*StreamingPullResponse_ModifyAckDeadlineConfirmation) ProtoMessage() {}
func (x *StreamingPullResponse_ModifyAckDeadlineConfirmation) ProtoReflect() protoreflect.Message {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[58]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[68]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4597,7 +5458,7 @@ func (x *StreamingPullResponse_ModifyAckDeadlineConfirmation) ProtoReflect() pro
// Deprecated: Use StreamingPullResponse_ModifyAckDeadlineConfirmation.ProtoReflect.Descriptor instead.
func (*StreamingPullResponse_ModifyAckDeadlineConfirmation) Descriptor() ([]byte, []int) {
- return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{37, 1}
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{39, 1}
}
func (x *StreamingPullResponse_ModifyAckDeadlineConfirmation) GetAckIds() []string {
@@ -4637,7 +5498,7 @@ type StreamingPullResponse_SubscriptionProperties struct {
func (x *StreamingPullResponse_SubscriptionProperties) Reset() {
*x = StreamingPullResponse_SubscriptionProperties{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[59]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[69]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4650,7 +5511,7 @@ func (x *StreamingPullResponse_SubscriptionProperties) String() string {
func (*StreamingPullResponse_SubscriptionProperties) ProtoMessage() {}
func (x *StreamingPullResponse_SubscriptionProperties) ProtoReflect() protoreflect.Message {
- mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[59]
+ mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[69]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4663,7 +5524,7 @@ func (x *StreamingPullResponse_SubscriptionProperties) ProtoReflect() protorefle
// Deprecated: Use StreamingPullResponse_SubscriptionProperties.ProtoReflect.Descriptor instead.
func (*StreamingPullResponse_SubscriptionProperties) Descriptor() ([]byte, []int) {
- return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{37, 2}
+ return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{39, 2}
}
func (x *StreamingPullResponse_SubscriptionProperties) GetExactlyOnceDeliveryEnabled() bool {
@@ -4727,7 +5588,7 @@ var file_google_pubsub_v1_pubsub_proto_rawDesc = []byte{
0x6e, 0x49, 0x64, 0x12, 0x2d, 0x0a, 0x10, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x76, 0x69,
0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0,
0x41, 0x01, 0x52, 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x52, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e,
- 0x49, 0x64, 0x22, 0xb4, 0x04, 0x0a, 0x1b, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e,
+ 0x49, 0x64, 0x22, 0x80, 0x0d, 0x0a, 0x1b, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e,
0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e,
0x67, 0x73, 0x12, 0x60, 0x0a, 0x0b, 0x61, 0x77, 0x73, 0x5f, 0x6b, 0x69, 0x6e, 0x65, 0x73, 0x69,
0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
@@ -4735,1021 +5596,1158 @@ var file_google_pubsub_v1_pubsub_proto_rawDesc = []byte{
0x74, 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65,
0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x41, 0x77, 0x73, 0x4b, 0x69, 0x6e, 0x65, 0x73, 0x69,
0x73, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x00, 0x52, 0x0a, 0x61, 0x77, 0x73, 0x4b, 0x69, 0x6e,
- 0x65, 0x73, 0x69, 0x73, 0x1a, 0xa8, 0x03, 0x0a, 0x0a, 0x41, 0x77, 0x73, 0x4b, 0x69, 0x6e, 0x65,
- 0x73, 0x69, 0x73, 0x12, 0x59, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73,
- 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x44,
- 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67,
- 0x73, 0x2e, 0x41, 0x77, 0x73, 0x4b, 0x69, 0x6e, 0x65, 0x73, 0x69, 0x73, 0x2e, 0x53, 0x74, 0x61,
- 0x74, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x22,
- 0x0a, 0x0a, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x61, 0x72, 0x6e, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x09, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x41,
- 0x72, 0x6e, 0x12, 0x26, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x72, 0x5f, 0x61,
- 0x72, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x63,
- 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x72, 0x41, 0x72, 0x6e, 0x12, 0x25, 0x0a, 0x0c, 0x61, 0x77,
- 0x73, 0x5f, 0x72, 0x6f, 0x6c, 0x65, 0x5f, 0x61, 0x72, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09,
- 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x61, 0x77, 0x73, 0x52, 0x6f, 0x6c, 0x65, 0x41, 0x72,
- 0x6e, 0x12, 0x33, 0x0a, 0x13, 0x67, 0x63, 0x70, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
- 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03,
- 0xe0, 0x41, 0x02, 0x52, 0x11, 0x67, 0x63, 0x70, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41,
- 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x96, 0x01, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65,
- 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43,
- 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, 0x56,
- 0x45, 0x10, 0x01, 0x12, 0x1d, 0x0a, 0x19, 0x4b, 0x49, 0x4e, 0x45, 0x53, 0x49, 0x53, 0x5f, 0x50,
- 0x45, 0x52, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44,
- 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x19, 0x50, 0x55, 0x42, 0x4c, 0x49, 0x53, 0x48, 0x5f, 0x50, 0x45,
- 0x52, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, 0x10,
- 0x03, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x54, 0x52, 0x45, 0x41, 0x4d, 0x5f, 0x4e, 0x4f, 0x54, 0x5f,
- 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x04, 0x12, 0x16, 0x0a, 0x12, 0x43, 0x4f, 0x4e, 0x53, 0x55,
- 0x4d, 0x45, 0x52, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x05, 0x42,
- 0x08, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0xd2, 0x06, 0x0a, 0x05, 0x54, 0x6f,
- 0x70, 0x69, 0x63, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x40, 0x0a, 0x06,
- 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67,
+ 0x65, 0x73, 0x69, 0x73, 0x12, 0x66, 0x0a, 0x0d, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x5f, 0x73, 0x74,
+ 0x6f, 0x72, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x49,
+ 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64,
+ 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x00, 0x52, 0x0c,
+ 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x61, 0x0a, 0x16,
+ 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x5f, 0x6c, 0x6f, 0x67, 0x73, 0x5f, 0x73, 0x65,
+ 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e,
- 0x54, 0x6f, 0x70, 0x69, 0x63, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72,
- 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x61,
- 0x0a, 0x16, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
- 0x65, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76,
- 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
- 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x14, 0x6d, 0x65, 0x73,
- 0x73, 0x61, 0x67, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63,
- 0x79, 0x12, 0x25, 0x0a, 0x0c, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6e, 0x61, 0x6d,
- 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0a, 0x6b, 0x6d,
- 0x73, 0x4b, 0x65, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x4e, 0x0a, 0x0f, 0x73, 0x63, 0x68, 0x65,
- 0x6d, 0x61, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75,
- 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69,
- 0x6e, 0x67, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0e, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61,
- 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x28, 0x0a, 0x0d, 0x73, 0x61, 0x74, 0x69,
- 0x73, 0x66, 0x69, 0x65, 0x73, 0x5f, 0x70, 0x7a, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x42,
- 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0c, 0x73, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x73, 0x50,
- 0x7a, 0x73, 0x12, 0x5c, 0x0a, 0x1a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65,
- 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x18, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52,
- 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x12, 0x38, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32,
- 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e,
- 0x76, 0x31, 0x2e, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x03,
- 0xe0, 0x41, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x77, 0x0a, 0x1e, 0x69, 0x6e,
- 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x73, 0x6f, 0x75,
- 0x72, 0x63, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x0a, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73,
- 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x44,
- 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67,
- 0x73, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x1b, 0x69, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f,
- 0x6e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69,
- 0x6e, 0x67, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74,
- 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x48,
- 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x45,
- 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a,
- 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x01, 0x12, 0x1c, 0x0a, 0x18, 0x49, 0x4e,
- 0x47, 0x45, 0x53, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45,
- 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x02, 0x3a, 0x54, 0xea, 0x41, 0x51, 0x0a, 0x1b, 0x70,
- 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
- 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x21, 0x70, 0x72, 0x6f, 0x6a,
- 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x74,
- 0x6f, 0x70, 0x69, 0x63, 0x73, 0x2f, 0x7b, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x7d, 0x12, 0x0f, 0x5f,
- 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x2d, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x5f, 0x22, 0xc3,
- 0x02, 0x0a, 0x0d, 0x50, 0x75, 0x62, 0x73, 0x75, 0x62, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
- 0x12, 0x17, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03,
- 0xe0, 0x41, 0x01, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x54, 0x0a, 0x0a, 0x61, 0x74, 0x74,
- 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e,
+ 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x4c, 0x6f, 0x67, 0x73, 0x53, 0x65, 0x74, 0x74,
+ 0x69, 0x6e, 0x67, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x14, 0x70, 0x6c, 0x61, 0x74, 0x66,
+ 0x6f, 0x72, 0x6d, 0x4c, 0x6f, 0x67, 0x73, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x1a,
+ 0xa8, 0x03, 0x0a, 0x0a, 0x41, 0x77, 0x73, 0x4b, 0x69, 0x6e, 0x65, 0x73, 0x69, 0x73, 0x12, 0x59,
+ 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31,
- 0x2e, 0x50, 0x75, 0x62, 0x73, 0x75, 0x62, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x41,
- 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x03,
- 0xe0, 0x41, 0x01, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12,
- 0x1d, 0x0a, 0x0a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x64, 0x12, 0x3d,
- 0x0a, 0x0c, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70,
- 0x52, 0x0b, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x26, 0x0a,
- 0x0c, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, 0x20,
- 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0b, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x69,
- 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x1a, 0x3d, 0x0a, 0x0f, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75,
- 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61,
- 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
- 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4c, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x23, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1d, 0x0a, 0x1b,
- 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
- 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x05, 0x74, 0x6f, 0x70,
- 0x69, 0x63, 0x22, 0x8a, 0x01, 0x0a, 0x12, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x6f, 0x70,
- 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x32, 0x0a, 0x05, 0x74, 0x6f, 0x70,
- 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x6f, 0x70, 0x69,
- 0x63, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x40, 0x0a,
- 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03,
- 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22,
- 0x8d, 0x01, 0x0a, 0x0e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x2e, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x41, 0x77, 0x73,
+ 0x4b, 0x69, 0x6e, 0x65, 0x73, 0x69, 0x73, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x03, 0xe0,
+ 0x41, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x73, 0x74, 0x72,
+ 0x65, 0x61, 0x6d, 0x5f, 0x61, 0x72, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0,
+ 0x41, 0x02, 0x52, 0x09, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x41, 0x72, 0x6e, 0x12, 0x26, 0x0a,
+ 0x0c, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x72, 0x5f, 0x61, 0x72, 0x6e, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d,
+ 0x65, 0x72, 0x41, 0x72, 0x6e, 0x12, 0x25, 0x0a, 0x0c, 0x61, 0x77, 0x73, 0x5f, 0x72, 0x6f, 0x6c,
+ 0x65, 0x5f, 0x61, 0x72, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02,
+ 0x52, 0x0a, 0x61, 0x77, 0x73, 0x52, 0x6f, 0x6c, 0x65, 0x41, 0x72, 0x6e, 0x12, 0x33, 0x0a, 0x13,
+ 0x67, 0x63, 0x70, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f,
+ 0x75, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x11,
+ 0x67, 0x63, 0x70, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e,
+ 0x74, 0x22, 0x96, 0x01, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53,
+ 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44,
+ 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x01, 0x12, 0x1d,
+ 0x0a, 0x19, 0x4b, 0x49, 0x4e, 0x45, 0x53, 0x49, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x4d, 0x49, 0x53,
+ 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, 0x10, 0x02, 0x12, 0x1d, 0x0a,
+ 0x19, 0x50, 0x55, 0x42, 0x4c, 0x49, 0x53, 0x48, 0x5f, 0x50, 0x45, 0x52, 0x4d, 0x49, 0x53, 0x53,
+ 0x49, 0x4f, 0x4e, 0x5f, 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, 0x10, 0x03, 0x12, 0x14, 0x0a, 0x10,
+ 0x53, 0x54, 0x52, 0x45, 0x41, 0x4d, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44,
+ 0x10, 0x04, 0x12, 0x16, 0x0a, 0x12, 0x43, 0x4f, 0x4e, 0x53, 0x55, 0x4d, 0x45, 0x52, 0x5f, 0x4e,
+ 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x05, 0x1a, 0xfe, 0x06, 0x0a, 0x0c, 0x43,
+ 0x6c, 0x6f, 0x75, 0x64, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x5b, 0x0a, 0x05, 0x73,
+ 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x40, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e,
+ 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x53,
+ 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x03, 0xe0, 0x41,
+ 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x1b, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b,
+ 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x62,
+ 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x6d, 0x0a, 0x0b, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x66, 0x6f,
+ 0x72, 0x6d, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e,
+ 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x53,
+ 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x54, 0x65, 0x78, 0x74, 0x46, 0x6f, 0x72, 0x6d, 0x61,
+ 0x74, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x00, 0x52, 0x0a, 0x74, 0x65, 0x78, 0x74, 0x46, 0x6f,
+ 0x72, 0x6d, 0x61, 0x74, 0x12, 0x6d, 0x0a, 0x0b, 0x61, 0x76, 0x72, 0x6f, 0x5f, 0x66, 0x6f, 0x72,
+ 0x6d, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x67,
+ 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x53, 0x74,
+ 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x41, 0x76, 0x72, 0x6f, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74,
+ 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x00, 0x52, 0x0a, 0x61, 0x76, 0x72, 0x6f, 0x46, 0x6f, 0x72,
+ 0x6d, 0x61, 0x74, 0x12, 0x80, 0x01, 0x0a, 0x12, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x5f, 0x61,
+ 0x76, 0x72, 0x6f, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x4b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62,
+ 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74,
+ 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e,
+ 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x50, 0x75, 0x62,
+ 0x53, 0x75, 0x62, 0x41, 0x76, 0x72, 0x6f, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, 0x03, 0xe0,
+ 0x41, 0x01, 0x48, 0x00, 0x52, 0x10, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x41, 0x76, 0x72, 0x6f,
+ 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x5c, 0x0a, 0x1a, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75,
+ 0x6d, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f,
+ 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
+ 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x17, 0x6d, 0x69, 0x6e,
+ 0x69, 0x6d, 0x75, 0x6d, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65,
+ 0x54, 0x69, 0x6d, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x67, 0x6c,
+ 0x6f, 0x62, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x6d,
+ 0x61, 0x74, 0x63, 0x68, 0x47, 0x6c, 0x6f, 0x62, 0x1a, 0x42, 0x0a, 0x0a, 0x54, 0x65, 0x78, 0x74,
+ 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x26, 0x0a, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69,
+ 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x00,
+ 0x52, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x88, 0x01, 0x01, 0x42, 0x0c,
+ 0x0a, 0x0a, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x1a, 0x0c, 0x0a, 0x0a,
+ 0x41, 0x76, 0x72, 0x6f, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x1a, 0x12, 0x0a, 0x10, 0x50, 0x75,
+ 0x62, 0x53, 0x75, 0x62, 0x41, 0x76, 0x72, 0x6f, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x9a,
+ 0x01, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54,
+ 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12,
+ 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x01, 0x12, 0x23, 0x0a, 0x1f, 0x43,
+ 0x4c, 0x4f, 0x55, 0x44, 0x5f, 0x53, 0x54, 0x4f, 0x52, 0x41, 0x47, 0x45, 0x5f, 0x50, 0x45, 0x52,
+ 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, 0x10, 0x02,
+ 0x12, 0x1d, 0x0a, 0x19, 0x50, 0x55, 0x42, 0x4c, 0x49, 0x53, 0x48, 0x5f, 0x50, 0x45, 0x52, 0x4d,
+ 0x49, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, 0x10, 0x03, 0x12,
+ 0x14, 0x0a, 0x10, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f,
+ 0x55, 0x4e, 0x44, 0x10, 0x04, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x4f, 0x4f, 0x5f, 0x4d, 0x41, 0x4e,
+ 0x59, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x53, 0x10, 0x05, 0x42, 0x0e, 0x0a, 0x0c, 0x69,
+ 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, 0x08, 0x0a, 0x06, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0xc9, 0x01, 0x0a, 0x14, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f,
+ 0x72, 0x6d, 0x4c, 0x6f, 0x67, 0x73, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x50,
+ 0x0a, 0x08, 0x73, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e,
+ 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62,
+ 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x4c, 0x6f, 0x67, 0x73,
+ 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x53, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74,
+ 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x73, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79,
+ 0x22, 0x5f, 0x0a, 0x08, 0x53, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x12, 0x18, 0x0a, 0x14,
+ 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49,
+ 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x49, 0x53, 0x41, 0x42, 0x4c,
+ 0x45, 0x44, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x44, 0x45, 0x42, 0x55, 0x47, 0x10, 0x02, 0x12,
+ 0x08, 0x0a, 0x04, 0x49, 0x4e, 0x46, 0x4f, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x57, 0x41, 0x52,
+ 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x04, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10,
+ 0x05, 0x22, 0x88, 0x05, 0x0a, 0x15, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x46,
+ 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x19, 0x0a, 0x05, 0x74,
+ 0x6f, 0x70, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52,
+ 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x28, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f,
+ 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0,
+ 0x41, 0x02, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
+ 0x12, 0x76, 0x0a, 0x15, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
+ 0x65, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e,
+ 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c,
+ 0x75, 0x72, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x53, 0x74,
+ 0x6f, 0x72, 0x61, 0x67, 0x65, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x42, 0x03, 0xe0, 0x41,
+ 0x01, 0x48, 0x00, 0x52, 0x13, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67,
+ 0x65, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x1a, 0x14, 0x0a, 0x12, 0x41, 0x70, 0x69, 0x56,
+ 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x1a, 0x13,
+ 0x0a, 0x11, 0x41, 0x76, 0x72, 0x6f, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x52, 0x65, 0x61,
+ 0x73, 0x6f, 0x6e, 0x1a, 0xfb, 0x02, 0x0a, 0x13, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x53, 0x74, 0x6f,
+ 0x72, 0x61, 0x67, 0x65, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x1b, 0x0a, 0x06, 0x62,
+ 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01,
+ 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x24, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0,
+ 0x41, 0x01, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x30,
+ 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10,
+ 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x12, 0x70, 0x0a, 0x13, 0x61, 0x76, 0x72, 0x6f, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65,
+ 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31,
+ 0x2e, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72,
+ 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x76, 0x72, 0x6f, 0x46, 0x61, 0x69, 0x6c, 0x75,
+ 0x72, 0x65, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x00, 0x52,
+ 0x11, 0x61, 0x76, 0x72, 0x6f, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x52, 0x65, 0x61, 0x73,
+ 0x6f, 0x6e, 0x12, 0x73, 0x0a, 0x14, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x3a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62,
+ 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69,
+ 0x6c, 0x75, 0x72, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x70, 0x69, 0x56, 0x69, 0x6f,
+ 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41,
+ 0x01, 0x48, 0x00, 0x52, 0x12, 0x61, 0x70, 0x69, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x42, 0x08, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f,
+ 0x6e, 0x42, 0x09, 0x0a, 0x07, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x22, 0xd2, 0x06, 0x0a,
+ 0x05, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12,
+ 0x40, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e,
+ 0x76, 0x31, 0x2e, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45,
+ 0x6e, 0x74, 0x72, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c,
+ 0x73, 0x12, 0x61, 0x0a, 0x16, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x74, 0x6f,
+ 0x72, 0x61, 0x67, 0x65, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75,
+ 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x74, 0x6f, 0x72,
+ 0x61, 0x67, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x14,
+ 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, 0x6f,
+ 0x6c, 0x69, 0x63, 0x79, 0x12, 0x25, 0x0a, 0x0c, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x5f,
+ 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52,
+ 0x0a, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x4e, 0x0a, 0x0f, 0x73,
+ 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x06,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75,
+ 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x65,
+ 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0e, 0x73, 0x63, 0x68,
+ 0x65, 0x6d, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x28, 0x0a, 0x0d, 0x73,
+ 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x73, 0x5f, 0x70, 0x7a, 0x73, 0x18, 0x07, 0x20, 0x01,
+ 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0c, 0x73, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69,
+ 0x65, 0x73, 0x50, 0x7a, 0x73, 0x12, 0x5c, 0x0a, 0x1a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
+ 0x5f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x18, 0x6d, 0x65, 0x73, 0x73, 0x61,
+ 0x67, 0x65, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x12, 0x38, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x09, 0x20, 0x01,
+ 0x28, 0x0e, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73,
+ 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74,
+ 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x77, 0x0a,
+ 0x1e, 0x69, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18,
+ 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69,
+ 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x74, 0x74,
+ 0x69, 0x6e, 0x67, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x1b, 0x69, 0x6e, 0x67, 0x65, 0x73,
+ 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65,
+ 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
+ 0x01, 0x22, 0x48, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54,
+ 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10,
+ 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x01, 0x12, 0x1c, 0x0a,
+ 0x18, 0x49, 0x4e, 0x47, 0x45, 0x53, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x45, 0x53, 0x4f, 0x55,
+ 0x52, 0x43, 0x45, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x02, 0x3a, 0x54, 0xea, 0x41, 0x51,
+ 0x0a, 0x1b, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
+ 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x21, 0x70,
+ 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
+ 0x7d, 0x2f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x2f, 0x7b, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x7d,
+ 0x12, 0x0f, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x2d, 0x74, 0x6f, 0x70, 0x69, 0x63,
+ 0x5f, 0x22, 0xc3, 0x02, 0x0a, 0x0d, 0x50, 0x75, 0x62, 0x73, 0x75, 0x62, 0x4d, 0x65, 0x73, 0x73,
+ 0x61, 0x67, 0x65, 0x12, 0x17, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x54, 0x0a, 0x0a,
+ 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62,
+ 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x62, 0x73, 0x75, 0x62, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67,
+ 0x65, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72,
+ 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74,
+ 0x65, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x69, 0x64,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49,
+ 0x64, 0x12, 0x3d, 0x0a, 0x0c, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x5f, 0x74, 0x69, 0x6d,
+ 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74,
+ 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x54, 0x69, 0x6d, 0x65,
+ 0x12, 0x26, 0x0a, 0x0c, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x6b, 0x65, 0x79,
+ 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0b, 0x6f, 0x72, 0x64,
+ 0x65, 0x72, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x1a, 0x3d, 0x0a, 0x0f, 0x41, 0x74, 0x74, 0x72,
+ 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b,
+ 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a,
+ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4c, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x54, 0x6f,
+ 0x70, 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, 0x0a, 0x05, 0x74, 0x6f,
+ 0x70, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x23, 0xe0, 0x41, 0x02, 0xfa, 0x41,
+ 0x1d, 0x0a, 0x1b, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x05,
+ 0x74, 0x6f, 0x70, 0x69, 0x63, 0x22, 0x8a, 0x01, 0x0a, 0x12, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65,
+ 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x32, 0x0a, 0x05,
+ 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x54,
+ 0x6f, 0x70, 0x69, 0x63, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63,
+ 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73,
+ 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61,
+ 0x73, 0x6b, 0x22, 0x8d, 0x01, 0x0a, 0x0e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x42, 0x23, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1d, 0x0a, 0x1b, 0x70, 0x75,
+ 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
+ 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63,
+ 0x12, 0x40, 0x0a, 0x08, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73,
+ 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x62, 0x73, 0x75, 0x62, 0x4d, 0x65, 0x73, 0x73,
+ 0x61, 0x67, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
+ 0x65, 0x73, 0x22, 0x37, 0x0a, 0x0f, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
+ 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52,
+ 0x0a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x64, 0x73, 0x22, 0xa8, 0x01, 0x0a, 0x11,
+ 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75,
+ 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
+ 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
+ 0x12, 0x20, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69,
+ 0x7a, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x70, 0x61, 0x67,
+ 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x77, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f,
+ 0x70, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x06,
+ 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e,
+ 0x54, 0x6f, 0x70, 0x69, 0x63, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x74, 0x6f, 0x70, 0x69,
+ 0x63, 0x73, 0x12, 0x2b, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f,
+ 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01,
+ 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22,
+ 0xa0, 0x01, 0x0a, 0x1d, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53, 0x75, 0x62,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x39, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x42, 0x23, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1d, 0x0a, 0x1b, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
+ 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x20, 0x0a, 0x09,
+ 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x42,
+ 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x22,
+ 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b,
+ 0x65, 0x6e, 0x22, 0x9f, 0x01, 0x0a, 0x1e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63,
+ 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x50, 0x0a, 0x0d, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41,
+ 0x01, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x75, 0x62, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2b, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f,
+ 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54,
+ 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x9c, 0x01, 0x0a, 0x19, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70,
+ 0x69, 0x63, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
0x73, 0x74, 0x12, 0x39, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28,
0x09, 0x42, 0x23, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1d, 0x0a, 0x1b, 0x70, 0x75, 0x62, 0x73, 0x75,
0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d,
- 0x2f, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x40, 0x0a,
- 0x08, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32,
- 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e,
- 0x76, 0x31, 0x2e, 0x50, 0x75, 0x62, 0x73, 0x75, 0x62, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
- 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x22,
- 0x37, 0x0a, 0x0f, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x12, 0x24, 0x0a, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x69, 0x64,
- 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0a, 0x6d, 0x65,
- 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x64, 0x73, 0x22, 0xa8, 0x01, 0x0a, 0x11, 0x4c, 0x69, 0x73,
- 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d,
- 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42,
- 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65,
- 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f,
- 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a,
+ 0x2f, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x20, 0x0a,
0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05,
0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12,
0x22, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20,
0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f,
- 0x6b, 0x65, 0x6e, 0x22, 0x77, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63,
- 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x74, 0x6f, 0x70,
- 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x6f, 0x70,
- 0x69, 0x63, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x12,
- 0x2b, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b,
- 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0d, 0x6e,
- 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xa0, 0x01, 0x0a,
- 0x1d, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72,
- 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39,
- 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x23, 0xe0,
- 0x41, 0x02, 0xfa, 0x41, 0x1d, 0x0a, 0x1b, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x6f, 0x70,
- 0x69, 0x63, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x20, 0x0a, 0x09, 0x70, 0x61, 0x67,
- 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41,
- 0x01, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x70,
- 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42,
- 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22,
- 0x9f, 0x01, 0x0a, 0x1e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53, 0x75, 0x62,
- 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x12, 0x50, 0x0a, 0x0d, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69,
- 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x01, 0xfa, 0x41,
- 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2b, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67,
+ 0x6b, 0x65, 0x6e, 0x22, 0x6c, 0x0a, 0x1a, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63,
+ 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x12, 0x21, 0x0a, 0x09, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x18, 0x01,
+ 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x73, 0x6e, 0x61, 0x70, 0x73,
+ 0x68, 0x6f, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67,
0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0,
0x41, 0x01, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65,
- 0x6e, 0x22, 0x9c, 0x01, 0x0a, 0x19, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53,
- 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
- 0x39, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x23,
- 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1d, 0x0a, 0x1b, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x6f,
- 0x70, 0x69, 0x63, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x20, 0x0a, 0x09, 0x70, 0x61,
- 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0,
- 0x41, 0x01, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x22, 0x0a, 0x0a,
- 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
- 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e,
- 0x22, 0x6c, 0x0a, 0x1a, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53, 0x6e, 0x61,
- 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21,
- 0x0a, 0x09, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28,
- 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74,
- 0x73, 0x12, 0x2b, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74,
- 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52,
- 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x4f,
- 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x09, 0x42, 0x23, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1d, 0x0a, 0x1b, 0x70, 0x75, 0x62,
- 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63,
- 0x6f, 0x6d, 0x2f, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x22,
- 0x6b, 0x0a, 0x19, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x0c,
- 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73,
- 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f,
- 0x6d, 0x2f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c,
- 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x1c, 0x0a, 0x1a,
- 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69,
- 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x9e, 0x0b, 0x0a, 0x0c, 0x53,
- 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x04, 0x6e,
- 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04,
- 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x09, 0x42, 0x23, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1d, 0x0a, 0x1b, 0x70, 0x75, 0x62,
+ 0x6e, 0x22, 0x4f, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x23, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1d, 0x0a, 0x1b,
+ 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
+ 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x05, 0x74, 0x6f, 0x70,
+ 0x69, 0x63, 0x22, 0x6b, 0x0a, 0x19, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x53, 0x75, 0x62, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x4e, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x70,
+ 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
+ 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22,
+ 0x1c, 0x0a, 0x1a, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x90, 0x0d,
+ 0x0a, 0x0c, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x17,
+ 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41,
+ 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x23, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1d, 0x0a, 0x1b,
+ 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
+ 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x05, 0x74, 0x6f, 0x70,
+ 0x69, 0x63, 0x12, 0x42, 0x0a, 0x0b, 0x70, 0x75, 0x73, 0x68, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0a, 0x70, 0x75, 0x73, 0x68,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4e, 0x0a, 0x0f, 0x62, 0x69, 0x67, 0x71, 0x75, 0x65,
+ 0x72, 0x79, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e,
+ 0x76, 0x31, 0x2e, 0x42, 0x69, 0x67, 0x51, 0x75, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0e, 0x62, 0x69, 0x67, 0x71, 0x75, 0x65, 0x72, 0x79,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x5b, 0x0a, 0x14, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x5f,
+ 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x16,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75,
+ 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x53, 0x74, 0x6f,
+ 0x72, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52,
+ 0x12, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x12, 0x35, 0x0a, 0x14, 0x61, 0x63, 0x6b, 0x5f, 0x64, 0x65, 0x61, 0x64, 0x6c,
+ 0x69, 0x6e, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28,
+ 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x12, 0x61, 0x63, 0x6b, 0x44, 0x65, 0x61, 0x64, 0x6c,
+ 0x69, 0x6e, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x37, 0x0a, 0x15, 0x72, 0x65,
+ 0x74, 0x61, 0x69, 0x6e, 0x5f, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61,
+ 0x67, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x13,
+ 0x72, 0x65, 0x74, 0x61, 0x69, 0x6e, 0x41, 0x63, 0x6b, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61,
+ 0x67, 0x65, 0x73, 0x12, 0x5c, 0x0a, 0x1a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x72,
+ 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x18, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
+ 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x12, 0x47, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75,
+ 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x03, 0xe0,
+ 0x41, 0x01, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x3b, 0x0a, 0x17, 0x65, 0x6e,
+ 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x6f, 0x72, 0x64,
+ 0x65, 0x72, 0x69, 0x6e, 0x67, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01,
+ 0x52, 0x15, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f,
+ 0x72, 0x64, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x54, 0x0a, 0x11, 0x65, 0x78, 0x70, 0x69, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x0b, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73,
+ 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x65, 0x78, 0x70,
+ 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x1b, 0x0a,
+ 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0,
+ 0x41, 0x01, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x55, 0x0a, 0x12, 0x64, 0x65,
+ 0x61, 0x64, 0x5f, 0x6c, 0x65, 0x74, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79,
+ 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x61, 0x64, 0x4c, 0x65,
+ 0x74, 0x74, 0x65, 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52,
+ 0x10, 0x64, 0x65, 0x61, 0x64, 0x4c, 0x65, 0x74, 0x74, 0x65, 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63,
+ 0x79, 0x12, 0x45, 0x0a, 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63,
+ 0x79, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79,
+ 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0b, 0x72, 0x65, 0x74,
+ 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x1f, 0x0a, 0x08, 0x64, 0x65, 0x74, 0x61,
+ 0x63, 0x68, 0x65, 0x64, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52,
+ 0x08, 0x64, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x12, 0x44, 0x0a, 0x1c, 0x65, 0x6e, 0x61,
+ 0x62, 0x6c, 0x65, 0x5f, 0x65, 0x78, 0x61, 0x63, 0x74, 0x6c, 0x79, 0x5f, 0x6f, 0x6e, 0x63, 0x65,
+ 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x42,
+ 0x03, 0xe0, 0x41, 0x01, 0x52, 0x19, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x45, 0x78, 0x61, 0x63,
+ 0x74, 0x6c, 0x79, 0x4f, 0x6e, 0x63, 0x65, 0x44, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x12,
+ 0x67, 0x0a, 0x20, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
+ 0x5f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x1d, 0x74, 0x6f, 0x70, 0x69, 0x63,
+ 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e,
+ 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74,
+ 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63,
+ 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x03, 0xe0,
+ 0x41, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x87, 0x01, 0x0a, 0x1f, 0x61, 0x6e,
+ 0x61, 0x6c, 0x79, 0x74, 0x69, 0x63, 0x73, 0x5f, 0x68, 0x75, 0x62, 0x5f, 0x73, 0x75, 0x62, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x17, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62,
+ 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x2e, 0x41, 0x6e, 0x61, 0x6c, 0x79, 0x74, 0x69, 0x63, 0x73, 0x48, 0x75, 0x62,
+ 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f,
+ 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x1c, 0x61, 0x6e, 0x61, 0x6c, 0x79, 0x74, 0x69, 0x63, 0x73,
+ 0x48, 0x75, 0x62, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49,
+ 0x6e, 0x66, 0x6f, 0x1a, 0x66, 0x0a, 0x1c, 0x41, 0x6e, 0x61, 0x6c, 0x79, 0x74, 0x69, 0x63, 0x73,
+ 0x48, 0x75, 0x62, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49,
+ 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x07, 0x6c, 0x69, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x07, 0x6c, 0x69, 0x73, 0x74, 0x69,
+ 0x6e, 0x67, 0x12, 0x27, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0c, 0x73,
+ 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x39, 0x0a, 0x0b, 0x4c,
+ 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65,
+ 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x3e, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12,
+ 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49,
+ 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45,
+ 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x45,
+ 0x52, 0x52, 0x4f, 0x52, 0x10, 0x02, 0x3a, 0x58, 0xea, 0x41, 0x55, 0x0a, 0x22, 0x70, 0x75, 0x62,
0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63,
- 0x6f, 0x6d, 0x2f, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12,
- 0x42, 0x0a, 0x0b, 0x70, 0x75, 0x73, 0x68, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75,
- 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66,
- 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0a, 0x70, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e,
- 0x66, 0x69, 0x67, 0x12, 0x4e, 0x0a, 0x0f, 0x62, 0x69, 0x67, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f,
- 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e,
- 0x42, 0x69, 0x67, 0x51, 0x75, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03,
- 0xe0, 0x41, 0x01, 0x52, 0x0e, 0x62, 0x69, 0x67, 0x71, 0x75, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6e,
- 0x66, 0x69, 0x67, 0x12, 0x5b, 0x0a, 0x14, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x5f, 0x73, 0x74, 0x6f,
- 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x16, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75,
- 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67,
- 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x12, 0x63, 0x6c,
- 0x6f, 0x75, 0x64, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
- 0x12, 0x35, 0x0a, 0x14, 0x61, 0x63, 0x6b, 0x5f, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65,
- 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03,
- 0xe0, 0x41, 0x01, 0x52, 0x12, 0x61, 0x63, 0x6b, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65,
- 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x37, 0x0a, 0x15, 0x72, 0x65, 0x74, 0x61, 0x69,
- 0x6e, 0x5f, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73,
- 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x13, 0x72, 0x65, 0x74,
- 0x61, 0x69, 0x6e, 0x41, 0x63, 0x6b, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73,
- 0x12, 0x5c, 0x0a, 0x1a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x74, 0x65,
- 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42,
- 0x03, 0xe0, 0x41, 0x01, 0x52, 0x18, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x74,
- 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x47,
- 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76,
- 0x31, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c,
- 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52,
- 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x3b, 0x0a, 0x17, 0x65, 0x6e, 0x61, 0x62, 0x6c,
- 0x65, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x69,
- 0x6e, 0x67, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x15, 0x65,
- 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x72, 0x64, 0x65,
- 0x72, 0x69, 0x6e, 0x67, 0x12, 0x54, 0x0a, 0x11, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e,
- 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c,
- 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x1b, 0x0a, 0x06, 0x66, 0x69,
- 0x6c, 0x74, 0x65, 0x72, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52,
- 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x55, 0x0a, 0x12, 0x64, 0x65, 0x61, 0x64, 0x5f,
- 0x6c, 0x65, 0x74, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x0d, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62,
- 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x61, 0x64, 0x4c, 0x65, 0x74, 0x74, 0x65,
- 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x64, 0x65,
- 0x61, 0x64, 0x4c, 0x65, 0x74, 0x74, 0x65, 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x45,
- 0x0a, 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x0e,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75,
- 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c,
- 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0b, 0x72, 0x65, 0x74, 0x72, 0x79, 0x50,
- 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x1f, 0x0a, 0x08, 0x64, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65,
- 0x64, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x64, 0x65,
- 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x12, 0x44, 0x0a, 0x1c, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65,
- 0x5f, 0x65, 0x78, 0x61, 0x63, 0x74, 0x6c, 0x79, 0x5f, 0x6f, 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x65,
- 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41,
- 0x01, 0x52, 0x19, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x45, 0x78, 0x61, 0x63, 0x74, 0x6c, 0x79,
- 0x4f, 0x6e, 0x63, 0x65, 0x44, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x12, 0x67, 0x0a, 0x20,
- 0x74, 0x6f, 0x70, 0x69, 0x63, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65,
- 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x1d, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x4d, 0x65, 0x73,
- 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x13,
- 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75,
- 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52,
- 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73,
- 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
- 0x01, 0x22, 0x3e, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54,
- 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10,
- 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x01, 0x12, 0x12, 0x0a,
- 0x0e, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10,
- 0x02, 0x3a, 0x58, 0xea, 0x41, 0x55, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x75,
- 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2f, 0x70, 0x72, 0x6f, 0x6a,
- 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x73,
- 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x73, 0x75,
- 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x22, 0x9f, 0x01, 0x0a, 0x0b,
- 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x47, 0x0a, 0x0f, 0x6d,
- 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42,
- 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x42, 0x61, 0x63,
- 0x6b, 0x6f, 0x66, 0x66, 0x12, 0x47, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f,
- 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
- 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0e, 0x6d,
- 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x22, 0x7c, 0x0a,
- 0x10, 0x44, 0x65, 0x61, 0x64, 0x4c, 0x65, 0x74, 0x74, 0x65, 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63,
- 0x79, 0x12, 0x2f, 0x0a, 0x11, 0x64, 0x65, 0x61, 0x64, 0x5f, 0x6c, 0x65, 0x74, 0x74, 0x65, 0x72,
- 0x5f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41,
- 0x01, 0x52, 0x0f, 0x64, 0x65, 0x61, 0x64, 0x4c, 0x65, 0x74, 0x74, 0x65, 0x72, 0x54, 0x6f, 0x70,
- 0x69, 0x63, 0x12, 0x37, 0x0a, 0x15, 0x6d, 0x61, 0x78, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65,
- 0x72, 0x79, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x13, 0x6d, 0x61, 0x78, 0x44, 0x65, 0x6c, 0x69, 0x76,
- 0x65, 0x72, 0x79, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, 0x22, 0x44, 0x0a, 0x10, 0x45,
- 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12,
- 0x30, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44,
- 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x03, 0x74, 0x74,
- 0x6c, 0x22, 0x93, 0x05, 0x0a, 0x0a, 0x50, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
- 0x12, 0x28, 0x0a, 0x0d, 0x70, 0x75, 0x73, 0x68, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e,
- 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0c, 0x70, 0x75,
- 0x73, 0x68, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x51, 0x0a, 0x0a, 0x61, 0x74,
- 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76,
- 0x31, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x41, 0x74, 0x74,
- 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x03, 0xe0, 0x41,
- 0x01, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x4c, 0x0a,
- 0x0a, 0x6f, 0x69, 0x64, 0x63, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75,
- 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e,
- 0x4f, 0x69, 0x64, 0x63, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x00,
- 0x52, 0x09, 0x6f, 0x69, 0x64, 0x63, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x58, 0x0a, 0x0e, 0x70,
- 0x75, 0x62, 0x73, 0x75, 0x62, 0x5f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x18, 0x04, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62,
+ 0x6f, 0x6d, 0x2f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12,
+ 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65,
+ 0x63, 0x74, 0x7d, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x2f, 0x7b, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x7d,
+ 0x22, 0x9f, 0x01, 0x0a, 0x0b, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79,
+ 0x12, 0x47, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x62, 0x61, 0x63, 0x6b,
+ 0x6f, 0x66, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d,
+ 0x75, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x12, 0x47, 0x0a, 0x0f, 0x6d, 0x61, 0x78,
+ 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0,
+ 0x41, 0x01, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x6f,
+ 0x66, 0x66, 0x22, 0x7c, 0x0a, 0x10, 0x44, 0x65, 0x61, 0x64, 0x4c, 0x65, 0x74, 0x74, 0x65, 0x72,
+ 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x2f, 0x0a, 0x11, 0x64, 0x65, 0x61, 0x64, 0x5f, 0x6c,
+ 0x65, 0x74, 0x74, 0x65, 0x72, 0x5f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0f, 0x64, 0x65, 0x61, 0x64, 0x4c, 0x65, 0x74, 0x74,
+ 0x65, 0x72, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x37, 0x0a, 0x15, 0x6d, 0x61, 0x78, 0x5f, 0x64,
+ 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x13, 0x6d, 0x61, 0x78,
+ 0x44, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73,
+ 0x22, 0x44, 0x0a, 0x10, 0x45, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f,
+ 0x6c, 0x69, 0x63, 0x79, 0x12, 0x30, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41,
+ 0x01, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x22, 0x93, 0x05, 0x0a, 0x0a, 0x50, 0x75, 0x73, 0x68, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x28, 0x0a, 0x0d, 0x70, 0x75, 0x73, 0x68, 0x5f, 0x65, 0x6e,
+ 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41,
+ 0x01, 0x52, 0x0c, 0x70, 0x75, 0x73, 0x68, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12,
+ 0x51, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62,
0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69,
- 0x67, 0x2e, 0x50, 0x75, 0x62, 0x73, 0x75, 0x62, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x42,
- 0x03, 0xe0, 0x41, 0x01, 0x48, 0x01, 0x52, 0x0d, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x57, 0x72,
- 0x61, 0x70, 0x70, 0x65, 0x72, 0x12, 0x4c, 0x0a, 0x0a, 0x6e, 0x6f, 0x5f, 0x77, 0x72, 0x61, 0x70,
- 0x70, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x73,
- 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4e, 0x6f, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65,
- 0x72, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x01, 0x52, 0x09, 0x6e, 0x6f, 0x57, 0x72, 0x61, 0x70,
- 0x70, 0x65, 0x72, 0x1a, 0x65, 0x0a, 0x09, 0x4f, 0x69, 0x64, 0x63, 0x54, 0x6f, 0x6b, 0x65, 0x6e,
- 0x12, 0x37, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f,
- 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42,
- 0x03, 0xe0, 0x41, 0x01, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63,
- 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x1f, 0x0a, 0x08, 0x61, 0x75, 0x64,
- 0x69, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01,
- 0x52, 0x08, 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x1a, 0x0f, 0x0a, 0x0d, 0x50, 0x75,
- 0x62, 0x73, 0x75, 0x62, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x1a, 0x37, 0x0a, 0x09, 0x4e,
- 0x6f, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x12, 0x2a, 0x0a, 0x0e, 0x77, 0x72, 0x69, 0x74,
- 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08,
- 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0d, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4d, 0x65, 0x74, 0x61,
- 0x64, 0x61, 0x74, 0x61, 0x1a, 0x3d, 0x0a, 0x0f, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74,
- 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c,
- 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a,
- 0x02, 0x38, 0x01, 0x42, 0x17, 0x0a, 0x15, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x42, 0x09, 0x0a, 0x07,
- 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x22, 0xf3, 0x03, 0x0a, 0x0e, 0x42, 0x69, 0x67, 0x51,
- 0x75, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x19, 0x0a, 0x05, 0x74, 0x61,
- 0x62, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x05,
- 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x2d, 0x0a, 0x10, 0x75, 0x73, 0x65, 0x5f, 0x74, 0x6f, 0x70,
- 0x69, 0x63, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x42,
- 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0e, 0x75, 0x73, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53, 0x63,
- 0x68, 0x65, 0x6d, 0x61, 0x12, 0x2a, 0x0a, 0x0e, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6d, 0x65,
- 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41,
- 0x01, 0x52, 0x0d, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61,
- 0x12, 0x33, 0x0a, 0x13, 0x64, 0x72, 0x6f, 0x70, 0x5f, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e,
- 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0,
- 0x41, 0x01, 0x52, 0x11, 0x64, 0x72, 0x6f, 0x70, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x46,
- 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x05,
- 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75,
- 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x69, 0x67, 0x51, 0x75, 0x65, 0x72, 0x79,
- 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x03, 0xe0, 0x41,
- 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2d, 0x0a, 0x10, 0x75, 0x73, 0x65, 0x5f,
- 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x06, 0x20, 0x01,
- 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0e, 0x75, 0x73, 0x65, 0x54, 0x61, 0x62, 0x6c,
- 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x37, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69,
+ 0x67, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72,
+ 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74,
+ 0x65, 0x73, 0x12, 0x4c, 0x0a, 0x0a, 0x6f, 0x69, 0x64, 0x63, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x43, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4f, 0x69, 0x64, 0x63, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x03,
+ 0xe0, 0x41, 0x01, 0x48, 0x00, 0x52, 0x09, 0x6f, 0x69, 0x64, 0x63, 0x54, 0x6f, 0x6b, 0x65, 0x6e,
+ 0x12, 0x58, 0x0a, 0x0e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x5f, 0x77, 0x72, 0x61, 0x70, 0x70,
+ 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x73, 0x68,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x75, 0x62, 0x73, 0x75, 0x62, 0x57, 0x72, 0x61,
+ 0x70, 0x70, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x01, 0x52, 0x0d, 0x70, 0x75, 0x62,
+ 0x73, 0x75, 0x62, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x12, 0x4c, 0x0a, 0x0a, 0x6e, 0x6f,
+ 0x5f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76,
+ 0x31, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4e, 0x6f, 0x57,
+ 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x01, 0x52, 0x09, 0x6e,
+ 0x6f, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x1a, 0x65, 0x0a, 0x09, 0x4f, 0x69, 0x64, 0x63,
+ 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x37, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x1f,
+ 0x0a, 0x08, 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x1a,
+ 0x0f, 0x0a, 0x0d, 0x50, 0x75, 0x62, 0x73, 0x75, 0x62, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72,
+ 0x1a, 0x37, 0x0a, 0x09, 0x4e, 0x6f, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x12, 0x2a, 0x0a,
+ 0x0e, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0d, 0x77, 0x72, 0x69, 0x74,
+ 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x3d, 0x0a, 0x0f, 0x41, 0x74, 0x74,
+ 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03,
+ 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14,
+ 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x17, 0x0a, 0x15, 0x61, 0x75, 0x74, 0x68,
+ 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f,
+ 0x64, 0x42, 0x09, 0x0a, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x22, 0xf3, 0x03, 0x0a,
+ 0x0e, 0x42, 0x69, 0x67, 0x51, 0x75, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12,
+ 0x19, 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03,
+ 0xe0, 0x41, 0x01, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x2d, 0x0a, 0x10, 0x75, 0x73,
+ 0x65, 0x5f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0e, 0x75, 0x73, 0x65, 0x54, 0x6f,
+ 0x70, 0x69, 0x63, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x2a, 0x0a, 0x0e, 0x77, 0x72, 0x69,
+ 0x74, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0d, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4d, 0x65, 0x74,
+ 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x33, 0x0a, 0x13, 0x64, 0x72, 0x6f, 0x70, 0x5f, 0x75, 0x6e,
+ 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x11, 0x64, 0x72, 0x6f, 0x70, 0x55, 0x6e, 0x6b,
+ 0x6e, 0x6f, 0x77, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x73, 0x74,
+ 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x69, 0x67,
+ 0x51, 0x75, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x53, 0x74, 0x61, 0x74,
+ 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2d, 0x0a,
+ 0x10, 0x75, 0x73, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d,
+ 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0e, 0x75, 0x73,
+ 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x37, 0x0a, 0x15,
+ 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f,
+ 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01,
+ 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74,
+ 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x22, 0x8a, 0x01, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12,
+ 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49,
+ 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45,
+ 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x50, 0x45, 0x52, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4f, 0x4e,
+ 0x5f, 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x4e, 0x4f, 0x54,
+ 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x03, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x43, 0x48, 0x45,
+ 0x4d, 0x41, 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x04, 0x12, 0x23, 0x0a,
+ 0x1f, 0x49, 0x4e, 0x5f, 0x54, 0x52, 0x41, 0x4e, 0x53, 0x49, 0x54, 0x5f, 0x4c, 0x4f, 0x43, 0x41,
+ 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x45, 0x53, 0x54, 0x52, 0x49, 0x43, 0x54, 0x49, 0x4f, 0x4e,
+ 0x10, 0x05, 0x22, 0xa0, 0x07, 0x0a, 0x12, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x53, 0x74, 0x6f, 0x72,
+ 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1b, 0x0a, 0x06, 0x62, 0x75, 0x63,
+ 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06,
+ 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x2c, 0x0a, 0x0f, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61,
+ 0x6d, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42,
+ 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0e, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x72,
+ 0x65, 0x66, 0x69, 0x78, 0x12, 0x2c, 0x0a, 0x0f, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65,
+ 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0,
+ 0x41, 0x01, 0x52, 0x0e, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x53, 0x75, 0x66, 0x66,
+ 0x69, 0x78, 0x12, 0x3d, 0x0a, 0x18, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x64,
+ 0x61, 0x74, 0x65, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x0a,
+ 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x16, 0x66, 0x69, 0x6c, 0x65, 0x6e,
+ 0x61, 0x6d, 0x65, 0x44, 0x61, 0x74, 0x65, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61,
+ 0x74, 0x12, 0x57, 0x0a, 0x0b, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x53,
+ 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x65, 0x78,
+ 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x00, 0x52, 0x0a,
+ 0x74, 0x65, 0x78, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x57, 0x0a, 0x0b, 0x61, 0x76,
+ 0x72, 0x6f, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e,
+ 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x41, 0x76, 0x72, 0x6f, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x00, 0x52, 0x0a, 0x61, 0x76, 0x72, 0x6f, 0x43, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x12, 0x41, 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x44, 0x75,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x62, 0x79,
+ 0x74, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08,
+ 0x6d, 0x61, 0x78, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f,
+ 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03,
+ 0xe0, 0x41, 0x01, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73,
+ 0x12, 0x45, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32,
+ 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e,
+ 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03,
+ 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69,
0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c,
- 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x13, 0x73, 0x65, 0x72,
+ 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x13, 0x73, 0x65, 0x72,
0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c,
- 0x22, 0x8a, 0x01, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54,
- 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10,
- 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x01, 0x12, 0x15, 0x0a,
- 0x11, 0x50, 0x45, 0x52, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x44, 0x45, 0x4e, 0x49,
- 0x45, 0x44, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e,
- 0x44, 0x10, 0x03, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x43, 0x48, 0x45, 0x4d, 0x41, 0x5f, 0x4d, 0x49,
- 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x04, 0x12, 0x23, 0x0a, 0x1f, 0x49, 0x4e, 0x5f, 0x54,
- 0x52, 0x41, 0x4e, 0x53, 0x49, 0x54, 0x5f, 0x4c, 0x4f, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f,
- 0x52, 0x45, 0x53, 0x54, 0x52, 0x49, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x05, 0x22, 0xa0, 0x07,
- 0x0a, 0x12, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6f,
- 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1b, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65,
- 0x74, 0x12, 0x2c, 0x0a, 0x0f, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x72,
- 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52,
- 0x0e, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12,
- 0x2c, 0x0a, 0x0f, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x75, 0x66, 0x66,
- 0x69, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0e, 0x66,
- 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x12, 0x3d, 0x0a,
- 0x18, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x64, 0x61, 0x74, 0x65, 0x74, 0x69,
- 0x6d, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x42,
- 0x03, 0xe0, 0x41, 0x01, 0x52, 0x16, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x44, 0x61,
- 0x74, 0x65, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x57, 0x0a, 0x0b,
- 0x74, 0x65, 0x78, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75,
- 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67,
- 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x65, 0x78, 0x74, 0x43, 0x6f, 0x6e, 0x66,
- 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x00, 0x52, 0x0a, 0x74, 0x65, 0x78, 0x74, 0x43,
- 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x57, 0x0a, 0x0b, 0x61, 0x76, 0x72, 0x6f, 0x5f, 0x63, 0x6f,
- 0x6e, 0x66, 0x69, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c,
- 0x6f, 0x75, 0x64, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
- 0x2e, 0x41, 0x76, 0x72, 0x6f, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x01,
- 0x48, 0x00, 0x52, 0x0a, 0x61, 0x76, 0x72, 0x6f, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x41,
- 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42,
- 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x12, 0x20, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x07,
- 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x42, 0x79,
- 0x74, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61,
- 0x67, 0x65, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0b,
- 0x6d, 0x61, 0x78, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x12, 0x45, 0x0a, 0x05, 0x73,
- 0x74, 0x61, 0x74, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c,
- 0x6f, 0x75, 0x64, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
- 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61,
- 0x74, 0x65, 0x12, 0x37, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63,
- 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x0b, 0x20, 0x01, 0x28,
- 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41,
- 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x1a, 0x0c, 0x0a, 0x0a, 0x54,
- 0x65, 0x78, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x67, 0x0a, 0x0a, 0x41, 0x76, 0x72,
- 0x6f, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2a, 0x0a, 0x0e, 0x77, 0x72, 0x69, 0x74, 0x65,
- 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42,
- 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0d, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64,
- 0x61, 0x74, 0x61, 0x12, 0x2d, 0x0a, 0x10, 0x75, 0x73, 0x65, 0x5f, 0x74, 0x6f, 0x70, 0x69, 0x63,
- 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0,
- 0x41, 0x01, 0x52, 0x0e, 0x75, 0x73, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53, 0x63, 0x68, 0x65,
- 0x6d, 0x61, 0x22, 0x8a, 0x01, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x15, 0x0a, 0x11,
- 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45,
- 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x01, 0x12,
- 0x15, 0x0a, 0x11, 0x50, 0x45, 0x52, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x44, 0x45,
- 0x4e, 0x49, 0x45, 0x44, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f,
- 0x55, 0x4e, 0x44, 0x10, 0x03, 0x12, 0x23, 0x0a, 0x1f, 0x49, 0x4e, 0x5f, 0x54, 0x52, 0x41, 0x4e,
- 0x53, 0x49, 0x54, 0x5f, 0x4c, 0x4f, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x45, 0x53,
- 0x54, 0x52, 0x49, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x04, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x43,
- 0x48, 0x45, 0x4d, 0x41, 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x05, 0x42,
- 0x0f, 0x0a, 0x0d, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74,
- 0x22, 0x9d, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x4d, 0x65, 0x73,
- 0x73, 0x61, 0x67, 0x65, 0x12, 0x1a, 0x0a, 0x06, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x05, 0x61, 0x63, 0x6b, 0x49, 0x64,
- 0x12, 0x3e, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75,
- 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x62, 0x73, 0x75, 0x62, 0x4d, 0x65, 0x73, 0x73, 0x61,
- 0x67, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
- 0x12, 0x2e, 0x0a, 0x10, 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x5f, 0x61, 0x74, 0x74,
- 0x65, 0x6d, 0x70, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52,
- 0x0f, 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74,
- 0x22, 0x68, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x0c, 0x73, 0x75,
- 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
- 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
- 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x73, 0x75,
- 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xa6, 0x01, 0x0a, 0x19, 0x55,
- 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x47, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x73,
- 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76,
- 0x31, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03,
- 0xe0, 0x41, 0x02, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61,
- 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d,
- 0x61, 0x73, 0x6b, 0x22, 0xaf, 0x01, 0x0a, 0x18, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x75, 0x62, 0x73,
- 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64,
- 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50,
- 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12,
- 0x20, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a,
- 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65,
- 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x93, 0x01, 0x0a, 0x19, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x75,
- 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x12, 0x49, 0x0a, 0x0d, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x75,
- 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52,
- 0x0d, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2b,
- 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65,
- 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0d, 0x6e, 0x65,
- 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x6b, 0x0a, 0x19, 0x44,
- 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x73,
- 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a,
- 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x75,
- 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73,
- 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xad, 0x01, 0x0a, 0x17, 0x4d, 0x6f, 0x64,
- 0x69, 0x66, 0x79, 0x50, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa,
- 0x41, 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72,
- 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x42, 0x0a, 0x0b, 0x70, 0x75, 0x73, 0x68, 0x5f, 0x63, 0x6f, 0x6e,
- 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x73,
- 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x70, 0x75,
- 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xbb, 0x01, 0x0a, 0x0b, 0x50, 0x75, 0x6c,
- 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x73,
- 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a,
- 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x75,
- 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73,
- 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x34, 0x0a, 0x12, 0x72, 0x65, 0x74, 0x75,
- 0x72, 0x6e, 0x5f, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x6c, 0x79, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x08, 0x42, 0x05, 0xe0, 0x41, 0x01, 0x18, 0x01, 0x52, 0x11, 0x72, 0x65, 0x74,
- 0x75, 0x72, 0x6e, 0x49, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x6c, 0x79, 0x12, 0x26,
- 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x4d, 0x65,
- 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x22, 0x63, 0x0a, 0x0c, 0x50, 0x75, 0x6c, 0x6c, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x53, 0x0a, 0x11, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76,
- 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28,
- 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75,
- 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x4d, 0x65, 0x73,
- 0x73, 0x61, 0x67, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x72, 0x65, 0x63, 0x65, 0x69,
- 0x76, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x22, 0xbf, 0x01, 0x0a, 0x18,
- 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x41, 0x63, 0x6b, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e,
- 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x73,
- 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a,
- 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x75,
- 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73,
- 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x07, 0x61, 0x63, 0x6b, 0x5f,
- 0x69, 0x64, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06,
- 0x61, 0x63, 0x6b, 0x49, 0x64, 0x73, 0x12, 0x35, 0x0a, 0x14, 0x61, 0x63, 0x6b, 0x5f, 0x64, 0x65,
- 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x12, 0x61, 0x63, 0x6b, 0x44, 0x65,
- 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x22, 0x82, 0x01,
- 0x0a, 0x12, 0x41, 0x63, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa,
- 0x41, 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72,
- 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x07, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x18,
- 0x02, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x61, 0x63, 0x6b, 0x49,
- 0x64, 0x73, 0x22, 0xdb, 0x03, 0x0a, 0x14, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67,
- 0x50, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x0c, 0x73,
- 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, 0x75,
- 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d,
- 0x2f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x73,
- 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x07, 0x61,
- 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41,
- 0x01, 0x52, 0x06, 0x61, 0x63, 0x6b, 0x49, 0x64, 0x73, 0x12, 0x3b, 0x0a, 0x17, 0x6d, 0x6f, 0x64,
- 0x69, 0x66, 0x79, 0x5f, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x73, 0x65, 0x63,
- 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52,
- 0x15, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x53,
- 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x3a, 0x0a, 0x17, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x79,
- 0x5f, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64,
- 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x14, 0x6d, 0x6f,
- 0x64, 0x69, 0x66, 0x79, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x41, 0x63, 0x6b, 0x49,
- 0x64, 0x73, 0x12, 0x42, 0x0a, 0x1b, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x61, 0x63, 0x6b,
- 0x5f, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64,
- 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x18, 0x73, 0x74,
- 0x72, 0x65, 0x61, 0x6d, 0x41, 0x63, 0x6b, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x53,
- 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x20, 0x0a, 0x09, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74,
- 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08,
- 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x3d, 0x0a, 0x18, 0x6d, 0x61, 0x78, 0x5f,
- 0x6f, 0x75, 0x74, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x6d, 0x65, 0x73, 0x73,
- 0x61, 0x67, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52,
- 0x16, 0x6d, 0x61, 0x78, 0x4f, 0x75, 0x74, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x4d,
- 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x15, 0x6d, 0x61, 0x78, 0x5f, 0x6f,
- 0x75, 0x74, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73,
- 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x13, 0x6d, 0x61, 0x78,
- 0x4f, 0x75, 0x74, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x79, 0x74, 0x65, 0x73,
- 0x22, 0xa4, 0x08, 0x0a, 0x15, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x50, 0x75,
- 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x53, 0x0a, 0x11, 0x72, 0x65,
- 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x18,
- 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65,
- 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x72,
- 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x12,
- 0x7f, 0x0a, 0x18, 0x61, 0x63, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x5f, 0x63,
- 0x6f, 0x6e, 0x66, 0x69, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x3f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75,
- 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x50, 0x75,
- 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x41, 0x63, 0x6b, 0x6e, 0x6f,
- 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x72, 0x6d, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x17, 0x61, 0x63, 0x6b, 0x6e, 0x6f, 0x77, 0x6c,
- 0x65, 0x64, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x12, 0x93, 0x01, 0x0a, 0x20, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x5f, 0x61, 0x63, 0x6b, 0x5f,
- 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x72, 0x6d,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53,
- 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x50, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x41, 0x63, 0x6b, 0x44, 0x65,
- 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x72, 0x6d, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x1d, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x41,
- 0x63, 0x6b, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x72,
- 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7c, 0x0a, 0x17, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72,
- 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65,
- 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61,
- 0x6d, 0x69, 0x6e, 0x67, 0x50, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f,
- 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x16, 0x73, 0x75,
- 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72,
- 0x74, 0x69, 0x65, 0x73, 0x1a, 0xd3, 0x01, 0x0a, 0x17, 0x41, 0x63, 0x6b, 0x6e, 0x6f, 0x77, 0x6c,
- 0x65, 0x64, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x12, 0x1c, 0x0a, 0x07, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28,
- 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x61, 0x63, 0x6b, 0x49, 0x64, 0x73, 0x12, 0x2b,
- 0x0a, 0x0f, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64,
- 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0d, 0x69, 0x6e,
- 0x76, 0x61, 0x6c, 0x69, 0x64, 0x41, 0x63, 0x6b, 0x49, 0x64, 0x73, 0x12, 0x2f, 0x0a, 0x11, 0x75,
- 0x6e, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73,
- 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0f, 0x75, 0x6e, 0x6f,
- 0x72, 0x64, 0x65, 0x72, 0x65, 0x64, 0x41, 0x63, 0x6b, 0x49, 0x64, 0x73, 0x12, 0x3c, 0x0a, 0x18,
- 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64,
- 0x5f, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03,
- 0xe0, 0x41, 0x01, 0x52, 0x15, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x46, 0x61,
- 0x69, 0x6c, 0x65, 0x64, 0x41, 0x63, 0x6b, 0x49, 0x64, 0x73, 0x1a, 0xa8, 0x01, 0x0a, 0x1d, 0x4d,
- 0x6f, 0x64, 0x69, 0x66, 0x79, 0x41, 0x63, 0x6b, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65,
- 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x07,
- 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0,
- 0x41, 0x01, 0x52, 0x06, 0x61, 0x63, 0x6b, 0x49, 0x64, 0x73, 0x12, 0x2b, 0x0a, 0x0f, 0x69, 0x6e,
- 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20,
- 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0d, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69,
- 0x64, 0x41, 0x63, 0x6b, 0x49, 0x64, 0x73, 0x12, 0x3c, 0x0a, 0x18, 0x74, 0x65, 0x6d, 0x70, 0x6f,
- 0x72, 0x61, 0x72, 0x79, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6b, 0x5f,
- 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x15,
- 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x41,
- 0x63, 0x6b, 0x49, 0x64, 0x73, 0x1a, 0x9f, 0x01, 0x0a, 0x16, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72,
- 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73,
- 0x12, 0x46, 0x0a, 0x1d, 0x65, 0x78, 0x61, 0x63, 0x74, 0x6c, 0x79, 0x5f, 0x6f, 0x6e, 0x63, 0x65,
- 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65,
- 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x1a, 0x65, 0x78,
- 0x61, 0x63, 0x74, 0x6c, 0x79, 0x4f, 0x6e, 0x63, 0x65, 0x44, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72,
- 0x79, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x3d, 0x0a, 0x18, 0x6d, 0x65, 0x73, 0x73,
- 0x61, 0x67, 0x65, 0x5f, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x65, 0x6e, 0x61,
- 0x62, 0x6c, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52,
- 0x16, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x69, 0x6e, 0x67,
- 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0xb0, 0x02, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61,
- 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x12, 0x3a, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42,
- 0x26, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x20, 0x0a, 0x1e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e,
+ 0x1a, 0x0c, 0x0a, 0x0a, 0x54, 0x65, 0x78, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x67,
+ 0x0a, 0x0a, 0x41, 0x76, 0x72, 0x6f, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2a, 0x0a, 0x0e,
+ 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0d, 0x77, 0x72, 0x69, 0x74, 0x65,
+ 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x2d, 0x0a, 0x10, 0x75, 0x73, 0x65, 0x5f,
+ 0x74, 0x6f, 0x70, 0x69, 0x63, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0e, 0x75, 0x73, 0x65, 0x54, 0x6f, 0x70, 0x69,
+ 0x63, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x8a, 0x01, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74,
+ 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45,
+ 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49,
+ 0x56, 0x45, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x50, 0x45, 0x52, 0x4d, 0x49, 0x53, 0x53, 0x49,
+ 0x4f, 0x4e, 0x5f, 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x4e,
+ 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x03, 0x12, 0x23, 0x0a, 0x1f, 0x49, 0x4e,
+ 0x5f, 0x54, 0x52, 0x41, 0x4e, 0x53, 0x49, 0x54, 0x5f, 0x4c, 0x4f, 0x43, 0x41, 0x54, 0x49, 0x4f,
+ 0x4e, 0x5f, 0x52, 0x45, 0x53, 0x54, 0x52, 0x49, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x04, 0x12,
+ 0x13, 0x0a, 0x0f, 0x53, 0x43, 0x48, 0x45, 0x4d, 0x41, 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54,
+ 0x43, 0x48, 0x10, 0x05, 0x42, 0x0f, 0x0a, 0x0d, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x66,
+ 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x9d, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76,
+ 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1a, 0x0a, 0x06, 0x61, 0x63, 0x6b,
+ 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x05,
+ 0x61, 0x63, 0x6b, 0x49, 0x64, 0x12, 0x3e, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x62, 0x73, 0x75, 0x62,
+ 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x07, 0x6d, 0x65,
+ 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2e, 0x0a, 0x10, 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72,
+ 0x79, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x42,
+ 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0f, 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x41, 0x74,
+ 0x74, 0x65, 0x6d, 0x70, 0x74, 0x22, 0x68, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x4e, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x70,
+ 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
+ 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22,
+ 0xa6, 0x01, 0x0a, 0x19, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x47, 0x0a,
+ 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62,
+ 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65,
+ 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69,
+ 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70,
+ 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0xaf, 0x01, 0x0a, 0x18, 0x4c, 0x69, 0x73,
+ 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b,
+ 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e,
+ 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
+ 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f,
+ 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x70, 0x61,
+ 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74,
+ 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52,
+ 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x93, 0x01, 0x0a, 0x19, 0x4c,
+ 0x69, 0x73, 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x49, 0x0a, 0x0d, 0x73, 0x75, 0x62, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e,
+ 0x76, 0x31, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42,
+ 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0d, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x12, 0x2b, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65,
+ 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41,
+ 0x01, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e,
+ 0x22, 0x6b, 0x0a, 0x19, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a,
+ 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62,
+ 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63,
+ 0x6f, 0x6d, 0x2f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52,
+ 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xad, 0x01,
+ 0x0a, 0x17, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x50, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x0c, 0x73, 0x75, 0x62,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42,
+ 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53,
- 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4e, 0x0a,
- 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20,
+ 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x73, 0x75, 0x62,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x42, 0x0a, 0x0b, 0x70, 0x75, 0x73,
+ 0x68, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76,
+ 0x31, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41,
+ 0x02, 0x52, 0x0a, 0x70, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xbb, 0x01,
+ 0x0a, 0x0b, 0x50, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a,
+ 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20,
0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62,
0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63,
0x6f, 0x6d, 0x2f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52,
- 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x50, 0x0a,
- 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31,
- 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74,
- 0x72, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a,
- 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10,
- 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79,
- 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x96, 0x01, 0x0a, 0x15, 0x55,
- 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68,
- 0x6f, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f,
- 0x74, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61,
- 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d,
- 0x61, 0x73, 0x6b, 0x22, 0xee, 0x02, 0x0a, 0x08, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74,
- 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03,
- 0xe0, 0x41, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x05, 0x74, 0x6f, 0x70,
- 0x69, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x23, 0xe0, 0x41, 0x01, 0xfa, 0x41, 0x1d,
- 0x0a, 0x1b, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
- 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x05, 0x74,
- 0x6f, 0x70, 0x69, 0x63, 0x12, 0x40, 0x0a, 0x0b, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74,
- 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65,
- 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69,
- 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x43, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73,
- 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68,
- 0x6f, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x03,
- 0xe0, 0x41, 0x01, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c,
- 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65,
- 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05,
- 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c,
- 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x4c, 0xea, 0x41, 0x49, 0x0a, 0x1e, 0x70, 0x75, 0x62,
+ 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x34, 0x0a,
+ 0x12, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x5f, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74,
+ 0x65, 0x6c, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x42, 0x05, 0xe0, 0x41, 0x01, 0x18, 0x01,
+ 0x52, 0x11, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x49, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74,
+ 0x65, 0x6c, 0x79, 0x12, 0x26, 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61,
+ 0x67, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b,
+ 0x6d, 0x61, 0x78, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x22, 0x63, 0x0a, 0x0c, 0x50,
+ 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x53, 0x0a, 0x11, 0x72,
+ 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73,
+ 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76,
+ 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10,
+ 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73,
+ 0x22, 0xbf, 0x01, 0x0a, 0x18, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x41, 0x63, 0x6b, 0x44, 0x65,
+ 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a,
+ 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62,
0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63,
- 0x6f, 0x6d, 0x2f, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x27, 0x70, 0x72, 0x6f,
- 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f,
- 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2f, 0x7b, 0x73, 0x6e, 0x61, 0x70, 0x73,
- 0x68, 0x6f, 0x74, 0x7d, 0x22, 0x58, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73,
- 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x08, 0x73, 0x6e,
- 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xe0, 0x41,
- 0x02, 0xfa, 0x41, 0x20, 0x0a, 0x1e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6e, 0x61, 0x70,
- 0x73, 0x68, 0x6f, 0x74, 0x52, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x22, 0xab,
- 0x01, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65,
- 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d,
- 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d,
- 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
- 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70,
- 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73,
- 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08,
- 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65,
- 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41,
- 0x01, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x83, 0x01, 0x0a,
- 0x15, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3d, 0x0a, 0x09, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68,
- 0x6f, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x6e, 0x61,
- 0x70, 0x73, 0x68, 0x6f, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x73, 0x6e, 0x61, 0x70,
- 0x73, 0x68, 0x6f, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61,
- 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03,
- 0xe0, 0x41, 0x01, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b,
- 0x65, 0x6e, 0x22, 0x5b, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70,
- 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x08, 0x73,
- 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xe0,
- 0x41, 0x02, 0xfa, 0x41, 0x20, 0x0a, 0x1e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6e, 0x61,
- 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x22,
- 0xe4, 0x01, 0x0a, 0x0b, 0x53, 0x65, 0x65, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
- 0x4e, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x70,
- 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
- 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12,
- 0x35, 0x0a, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
- 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x00,
- 0x52, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x44, 0x0a, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68,
- 0x6f, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xe0, 0x41, 0x01, 0xfa, 0x41, 0x20,
+ 0x6f, 0x6d, 0x2f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52,
+ 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a,
+ 0x07, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03,
+ 0xe0, 0x41, 0x02, 0x52, 0x06, 0x61, 0x63, 0x6b, 0x49, 0x64, 0x73, 0x12, 0x35, 0x0a, 0x14, 0x61,
+ 0x63, 0x6b, 0x5f, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f,
+ 0x6e, 0x64, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x12,
+ 0x61, 0x63, 0x6b, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e,
+ 0x64, 0x73, 0x22, 0x82, 0x01, 0x0a, 0x12, 0x41, 0x63, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64,
+ 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x0c, 0x73, 0x75, 0x62,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42,
+ 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53,
+ 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x73, 0x75, 0x62,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x07, 0x61, 0x63, 0x6b,
+ 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52,
+ 0x06, 0x61, 0x63, 0x6b, 0x49, 0x64, 0x73, 0x22, 0xdb, 0x03, 0x0a, 0x14, 0x53, 0x74, 0x72, 0x65,
+ 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x50, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x12, 0x4e, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22,
+ 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
+ 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x12, 0x1c, 0x0a, 0x07, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28,
+ 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x61, 0x63, 0x6b, 0x49, 0x64, 0x73, 0x12, 0x3b,
+ 0x0a, 0x17, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x5f, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e,
+ 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x05, 0x42,
+ 0x03, 0xe0, 0x41, 0x01, 0x52, 0x15, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x44, 0x65, 0x61, 0x64,
+ 0x6c, 0x69, 0x6e, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x3a, 0x0a, 0x17, 0x6d,
+ 0x6f, 0x64, 0x69, 0x66, 0x79, 0x5f, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x61,
+ 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41,
+ 0x01, 0x52, 0x14, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e,
+ 0x65, 0x41, 0x63, 0x6b, 0x49, 0x64, 0x73, 0x12, 0x42, 0x0a, 0x1b, 0x73, 0x74, 0x72, 0x65, 0x61,
+ 0x6d, 0x5f, 0x61, 0x63, 0x6b, 0x5f, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x73,
+ 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41,
+ 0x02, 0x52, 0x18, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x41, 0x63, 0x6b, 0x44, 0x65, 0x61, 0x64,
+ 0x6c, 0x69, 0x6e, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x20, 0x0a, 0x09, 0x63,
+ 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03,
+ 0xe0, 0x41, 0x01, 0x52, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x3d, 0x0a,
+ 0x18, 0x6d, 0x61, 0x78, 0x5f, 0x6f, 0x75, 0x74, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x69, 0x6e, 0x67,
+ 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x42,
+ 0x03, 0xe0, 0x41, 0x01, 0x52, 0x16, 0x6d, 0x61, 0x78, 0x4f, 0x75, 0x74, 0x73, 0x74, 0x61, 0x6e,
+ 0x64, 0x69, 0x6e, 0x67, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x15,
+ 0x6d, 0x61, 0x78, 0x5f, 0x6f, 0x75, 0x74, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x5f,
+ 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x01,
+ 0x52, 0x13, 0x6d, 0x61, 0x78, 0x4f, 0x75, 0x74, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x69, 0x6e, 0x67,
+ 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xa4, 0x08, 0x0a, 0x15, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d,
+ 0x69, 0x6e, 0x67, 0x50, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
+ 0x53, 0x0a, 0x11, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x73, 0x73,
+ 0x61, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65,
+ 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x03, 0xe0,
+ 0x41, 0x01, 0x52, 0x10, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73,
+ 0x61, 0x67, 0x65, 0x73, 0x12, 0x7f, 0x0a, 0x18, 0x61, 0x63, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65,
+ 0x64, 0x67, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d,
+ 0x69, 0x6e, 0x67, 0x50, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e,
+ 0x41, 0x63, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69,
+ 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x17, 0x61, 0x63,
+ 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x72, 0x6d,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x93, 0x01, 0x0a, 0x20, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x79,
+ 0x5f, 0x61, 0x63, 0x6b, 0x5f, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x45, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62,
+ 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x50, 0x75, 0x6c,
+ 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79,
+ 0x41, 0x63, 0x6b, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69,
+ 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x1d, 0x6d, 0x6f,
+ 0x64, 0x69, 0x66, 0x79, 0x41, 0x63, 0x6b, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7c, 0x0a, 0x17, 0x73,
+ 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x70,
+ 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e,
+ 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x50, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x42, 0x03, 0xe0, 0x41,
+ 0x01, 0x52, 0x16, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x50,
+ 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x1a, 0xd3, 0x01, 0x0a, 0x17, 0x41, 0x63,
+ 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x72, 0x6d,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x07, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73,
+ 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x61, 0x63, 0x6b,
+ 0x49, 0x64, 0x73, 0x12, 0x2b, 0x0a, 0x0f, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x61,
+ 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41,
+ 0x01, 0x52, 0x0d, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x41, 0x63, 0x6b, 0x49, 0x64, 0x73,
+ 0x12, 0x2f, 0x0a, 0x11, 0x75, 0x6e, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x65, 0x64, 0x5f, 0x61, 0x63,
+ 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01,
+ 0x52, 0x0f, 0x75, 0x6e, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x65, 0x64, 0x41, 0x63, 0x6b, 0x49, 0x64,
+ 0x73, 0x12, 0x3c, 0x0a, 0x18, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x66,
+ 0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x04, 0x20,
+ 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x15, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72,
+ 0x61, 0x72, 0x79, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x41, 0x63, 0x6b, 0x49, 0x64, 0x73, 0x1a,
+ 0xa8, 0x01, 0x0a, 0x1d, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x41, 0x63, 0x6b, 0x44, 0x65, 0x61,
+ 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x12, 0x1c, 0x0a, 0x07, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03,
+ 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x61, 0x63, 0x6b, 0x49, 0x64, 0x73, 0x12,
+ 0x2b, 0x0a, 0x0f, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x61, 0x63, 0x6b, 0x5f, 0x69,
+ 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0d, 0x69,
+ 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x41, 0x63, 0x6b, 0x49, 0x64, 0x73, 0x12, 0x3c, 0x0a, 0x18,
+ 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64,
+ 0x5f, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03,
+ 0xe0, 0x41, 0x01, 0x52, 0x15, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x46, 0x61,
+ 0x69, 0x6c, 0x65, 0x64, 0x41, 0x63, 0x6b, 0x49, 0x64, 0x73, 0x1a, 0x9f, 0x01, 0x0a, 0x16, 0x53,
+ 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65,
+ 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x46, 0x0a, 0x1d, 0x65, 0x78, 0x61, 0x63, 0x74, 0x6c, 0x79,
+ 0x5f, 0x6f, 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x5f, 0x65,
+ 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41,
+ 0x01, 0x52, 0x1a, 0x65, 0x78, 0x61, 0x63, 0x74, 0x6c, 0x79, 0x4f, 0x6e, 0x63, 0x65, 0x44, 0x65,
+ 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x3d, 0x0a,
+ 0x18, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x69, 0x6e,
+ 0x67, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x42,
+ 0x03, 0xe0, 0x41, 0x01, 0x52, 0x16, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x72, 0x64,
+ 0x65, 0x72, 0x69, 0x6e, 0x67, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0xb0, 0x02, 0x0a,
+ 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3a, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x20, 0x0a, 0x1e, 0x70, 0x75,
+ 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
+ 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x12, 0x4e, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24,
+ 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
+ 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x12, 0x50, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73,
+ 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70,
+ 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65,
+ 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x6c, 0x61,
+ 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e,
+ 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22,
+ 0x96, 0x01, 0x0a, 0x15, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68,
+ 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x08, 0x73, 0x6e, 0x61,
+ 0x70, 0x73, 0x68, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53,
+ 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x73, 0x6e,
+ 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65,
+ 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69,
+ 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70,
+ 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0xee, 0x02, 0x0a, 0x08, 0x53, 0x6e, 0x61,
+ 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39,
+ 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x23, 0xe0,
+ 0x41, 0x01, 0xfa, 0x41, 0x1d, 0x0a, 0x1b, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x6f, 0x70,
+ 0x69, 0x63, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x40, 0x0a, 0x0b, 0x65, 0x78, 0x70,
+ 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52,
+ 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x43, 0x0a, 0x06, 0x6c,
+ 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53,
+ 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e,
+ 0x74, 0x72, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73,
+ 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12,
+ 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65,
+ 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x4c, 0xea, 0x41, 0x49,
0x0a, 0x1e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74,
- 0x48, 0x00, 0x52, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x42, 0x08, 0x0a, 0x06,
- 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x22, 0x0e, 0x0a, 0x0c, 0x53, 0x65, 0x65, 0x6b, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xb8, 0x0b, 0x0a, 0x09, 0x50, 0x75, 0x62, 0x6c, 0x69,
- 0x73, 0x68, 0x65, 0x72, 0x12, 0x71, 0x0a, 0x0b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x6f,
- 0x70, 0x69, 0x63, 0x12, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62,
- 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x1a, 0x17, 0x2e, 0x67,
+ 0x12, 0x27, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a,
+ 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2f, 0x7b,
+ 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x7d, 0x22, 0x58, 0x0a, 0x12, 0x47, 0x65, 0x74,
+ 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x42, 0x0a, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x42, 0x26, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x20, 0x0a, 0x1e, 0x70, 0x75, 0x62, 0x73, 0x75,
+ 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x2f, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73,
+ 0x68, 0x6f, 0x74, 0x22, 0xab, 0x01, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70,
+ 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07,
+ 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0,
+ 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65,
+ 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x70,
+ 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03,
+ 0xe0, 0x41, 0x01, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x22, 0x0a,
+ 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65,
+ 0x6e, 0x22, 0x83, 0x01, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68,
+ 0x6f, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3d, 0x0a, 0x09, 0x73,
+ 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76,
+ 0x31, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52,
+ 0x09, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x0f, 0x6e, 0x65,
+ 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61,
+ 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x5b, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74,
+ 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x12, 0x42, 0x0a, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x42, 0x26, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x20, 0x0a, 0x1e, 0x70, 0x75, 0x62, 0x73,
+ 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f,
+ 0x6d, 0x2f, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x08, 0x73, 0x6e, 0x61, 0x70,
+ 0x73, 0x68, 0x6f, 0x74, 0x22, 0xe4, 0x01, 0x0a, 0x0b, 0x53, 0x65, 0x65, 0x6b, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa,
+ 0x41, 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x35, 0x0a, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03,
+ 0xe0, 0x41, 0x01, 0x48, 0x00, 0x52, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x44, 0x0a, 0x08, 0x73,
+ 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xe0,
+ 0x41, 0x01, 0xfa, 0x41, 0x20, 0x0a, 0x1e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6e, 0x61,
+ 0x70, 0x73, 0x68, 0x6f, 0x74, 0x48, 0x00, 0x52, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f,
+ 0x74, 0x42, 0x08, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x22, 0x0e, 0x0a, 0x0c, 0x53,
+ 0x65, 0x65, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xb8, 0x0b, 0x0a, 0x09,
+ 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x65, 0x72, 0x12, 0x71, 0x0a, 0x0b, 0x43, 0x72, 0x65,
+ 0x61, 0x74, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x6f, 0x70, 0x69,
+ 0x63, 0x1a, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75,
+ 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x22, 0x30, 0xda, 0x41, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x3a, 0x01, 0x2a, 0x1a, 0x1e, 0x2f, 0x76,
+ 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73,
+ 0x2f, 0x2a, 0x2f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x91, 0x01, 0x0a,
+ 0x0b, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x24, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e,
+ 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73,
+ 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x22, 0x43, 0xda, 0x41, 0x11,
+ 0x74, 0x6f, 0x70, 0x69, 0x63, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73,
+ 0x6b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x29, 0x3a, 0x01, 0x2a, 0x32, 0x24, 0x2f, 0x76, 0x31, 0x2f,
+ 0x7b, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a,
+ 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x2f, 0x2a, 0x7d,
+ 0x12, 0x93, 0x01, 0x0a, 0x07, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x12, 0x20, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e,
+ 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76,
+ 0x31, 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x22, 0x43, 0xda, 0x41, 0x0e, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x2c, 0x6d, 0x65, 0x73, 0x73,
+ 0x61, 0x67, 0x65, 0x73, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x3a, 0x01, 0x2a, 0x22, 0x27, 0x2f,
+ 0x76, 0x31, 0x2f, 0x7b, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
+ 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x70,
+ 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x12, 0x77, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70,
+ 0x69, 0x63, 0x12, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73,
+ 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x22, 0x2f,
+ 0xda, 0x41, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x21, 0x12, 0x1f,
+ 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65,
+ 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x2f, 0x2a, 0x7d, 0x12,
+ 0x8a, 0x01, 0x0a, 0x0a, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x12, 0x23,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76,
+ 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62,
+ 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63,
+ 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x31, 0xda, 0x41, 0x07, 0x70, 0x72,
+ 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x21, 0x12, 0x1f, 0x2f, 0x76, 0x31,
+ 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
+ 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x12, 0xba, 0x01, 0x0a,
+ 0x16, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54,
+ 0x6f, 0x70, 0x69, 0x63, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74,
+ 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3d, 0xda, 0x41, 0x05, 0x74,
+ 0x6f, 0x70, 0x69, 0x63, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, 0x2f, 0x76, 0x31, 0x2f,
+ 0x7b, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f,
+ 0x2a, 0x2f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x75, 0x62, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0xaa, 0x01, 0x0a, 0x12, 0x4c, 0x69,
+ 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73,
+ 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62,
+ 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53, 0x6e, 0x61,
+ 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31,
+ 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68,
+ 0x6f, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x39, 0xda, 0x41, 0x05,
+ 0x74, 0x6f, 0x70, 0x69, 0x63, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2b, 0x12, 0x29, 0x2f, 0x76, 0x31,
+ 0x2f, 0x7b, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73,
+ 0x2f, 0x2a, 0x2f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x6e, 0x61,
+ 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x12, 0x7c, 0x0a, 0x0b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65,
+ 0x54, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54,
+ 0x6f, 0x70, 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d,
+ 0x70, 0x74, 0x79, 0x22, 0x2f, 0xda, 0x41, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x82, 0xd3, 0xe4,
+ 0x93, 0x02, 0x21, 0x2a, 0x1f, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x3d,
+ 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x6f, 0x70, 0x69, 0x63,
+ 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xad, 0x01, 0x0a, 0x12, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x53,
+ 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x44,
+ 0x65, 0x74, 0x61, 0x63, 0x68, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x74, 0x61,
+ 0x63, 0x68, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3c, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x36, 0x22, 0x34,
+ 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x75, 0x62,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x64, 0x65,
+ 0x74, 0x61, 0x63, 0x68, 0x1a, 0x70, 0xca, 0x41, 0x15, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41,
+ 0x55, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68,
+ 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c,
+ 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f,
+ 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x32, 0xd2, 0x15, 0x0a, 0x0a, 0x53, 0x75, 0x62, 0x73, 0x63,
+ 0x72, 0x69, 0x62, 0x65, 0x72, 0x12, 0xb4, 0x01, 0x0a, 0x12, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65,
+ 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e,
+ 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x1e, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e,
- 0x54, 0x6f, 0x70, 0x69, 0x63, 0x22, 0x30, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3,
- 0xe4, 0x93, 0x02, 0x23, 0x3a, 0x01, 0x2a, 0x1a, 0x1e, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61,
- 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x6f,
- 0x70, 0x69, 0x63, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x91, 0x01, 0x0a, 0x0b, 0x55, 0x70, 0x64, 0x61,
- 0x74, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x5e, 0xda, 0x41,
+ 0x2b, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x2c, 0x70, 0x75, 0x73, 0x68,
+ 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2c, 0x61, 0x63, 0x6b, 0x5f, 0x64, 0x65, 0x61, 0x64,
+ 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x82, 0xd3, 0xe4, 0x93,
+ 0x02, 0x2a, 0x3a, 0x01, 0x2a, 0x1a, 0x25, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65,
+ 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x75, 0x62, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xa1, 0x01, 0x0a,
+ 0x0f, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62,
+ 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x75,
+ 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x44, 0xda, 0x41, 0x0c, 0x73,
+ 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x82, 0xd3, 0xe4, 0x93, 0x02,
+ 0x2f, 0x12, 0x2d, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f,
+ 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d,
+ 0x12, 0xbb, 0x01, 0x0a, 0x12, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x75, 0x62, 0x73, 0x63,
+ 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74,
- 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31,
- 0x2e, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x22, 0x43, 0xda, 0x41, 0x11, 0x74, 0x6f, 0x70, 0x69, 0x63,
- 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x82, 0xd3, 0xe4, 0x93,
- 0x02, 0x29, 0x3a, 0x01, 0x2a, 0x32, 0x24, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x74, 0x6f, 0x70, 0x69,
- 0x63, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f,
- 0x2a, 0x2f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x93, 0x01, 0x0a, 0x07,
- 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x12, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69,
- 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x62,
- 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x43, 0xda, 0x41,
- 0x0e, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x2c, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x82,
- 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x3a, 0x01, 0x2a, 0x22, 0x27, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x74,
- 0x6f, 0x70, 0x69, 0x63, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f,
- 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x73,
- 0x68, 0x12, 0x77, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x21, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31,
- 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62,
- 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x22, 0x2f, 0xda, 0x41, 0x05, 0x74, 0x6f,
- 0x70, 0x69, 0x63, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x21, 0x12, 0x1f, 0x2f, 0x76, 0x31, 0x2f, 0x7b,
- 0x74, 0x6f, 0x70, 0x69, 0x63, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a,
- 0x2f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x8a, 0x01, 0x0a, 0x0a, 0x4c,
- 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73,
- 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24,
+ 0x65, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75,
+ 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x58, 0xda, 0x41, 0x18, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73,
+ 0x6b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x37, 0x3a, 0x01, 0x2a, 0x32, 0x32, 0x2f, 0x76, 0x31, 0x2f,
+ 0x7b, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x6e, 0x61,
+ 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x75,
+ 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xa6,
+ 0x01, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75,
+ 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x75, 0x62, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62,
+ 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x38, 0xda,
+ 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x28, 0x12,
+ 0x26, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x70, 0x72,
+ 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x9f, 0x01, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65,
+ 0x74, 0x65, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b,
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76,
- 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x31, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
- 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x21, 0x12, 0x1f, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x72, 0x6f,
- 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d,
- 0x2f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x12, 0xba, 0x01, 0x0a, 0x16, 0x4c, 0x69, 0x73, 0x74,
- 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73,
- 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53,
- 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62,
- 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63,
- 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3d, 0xda, 0x41, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x82,
- 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x74, 0x6f, 0x70, 0x69,
- 0x63, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x6f, 0x70,
- 0x69, 0x63, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x73, 0x12, 0xaa, 0x01, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70,
- 0x69, 0x63, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x12, 0x2b, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x4c,
- 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74,
- 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74,
- 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x39, 0xda, 0x41, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63,
- 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2b, 0x12, 0x29, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x74, 0x6f, 0x70,
- 0x69, 0x63, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x6f,
- 0x70, 0x69, 0x63, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74,
- 0x73, 0x12, 0x7c, 0x0a, 0x0b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63,
- 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62,
- 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x2f,
- 0xda, 0x41, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x21, 0x2a, 0x1f,
- 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65,
- 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x2f, 0x2a, 0x7d, 0x12,
- 0xad, 0x01, 0x0a, 0x12, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72,
- 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68,
- 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62,
- 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x53, 0x75, 0x62,
- 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x22, 0x3c, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x36, 0x22, 0x34, 0x2f, 0x76, 0x31, 0x2f, 0x7b,
- 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x3d, 0x70, 0x72, 0x6f,
- 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x64, 0x65, 0x74, 0x61, 0x63, 0x68, 0x1a,
- 0x70, 0xca, 0x41, 0x15, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x55, 0x68, 0x74, 0x74, 0x70,
- 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70,
- 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75,
- 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73,
- 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
- 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x70, 0x75, 0x62, 0x73, 0x75,
- 0x62, 0x32, 0xd2, 0x15, 0x0a, 0x0a, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x72,
- 0x12, 0xb4, 0x01, 0x0a, 0x12, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x75, 0x62, 0x73, 0x63,
- 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63,
- 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63,
- 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x5e, 0xda, 0x41, 0x2b, 0x6e, 0x61, 0x6d, 0x65,
- 0x2c, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x2c, 0x70, 0x75, 0x73, 0x68, 0x5f, 0x63, 0x6f, 0x6e, 0x66,
- 0x69, 0x67, 0x2c, 0x61, 0x63, 0x6b, 0x5f, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x5f,
- 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x3a, 0x01, 0x2a,
- 0x1a, 0x25, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a,
- 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xa1, 0x01, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x53,
- 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x47,
- 0x65, 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x44, 0xda, 0x41, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72,
- 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, 0x2f, 0x76,
- 0x31, 0x2f, 0x7b, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x3d,
- 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63,
- 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xbb, 0x01, 0x0a, 0x12,
- 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69,
- 0x6f, 0x6e, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73,
- 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x75, 0x62, 0x73,
- 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
- 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e,
- 0x76, 0x31, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22,
- 0x58, 0xda, 0x41, 0x18, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x82, 0xd3, 0xe4, 0x93,
- 0x02, 0x37, 0x3a, 0x01, 0x2a, 0x32, 0x32, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x75, 0x62, 0x73,
- 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72,
+ 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d,
+ 0x70, 0x74, 0x79, 0x22, 0x44, 0xda, 0x41, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x2a, 0x2d, 0x2f, 0x76, 0x31, 0x2f,
+ 0x7b, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x3d, 0x70, 0x72,
0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xa6, 0x01, 0x0a, 0x11, 0x4c, 0x69,
- 0x73, 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xcf, 0x01, 0x0a, 0x11, 0x4d, 0x6f,
+ 0x64, 0x69, 0x66, 0x79, 0x41, 0x63, 0x6b, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12,
0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e,
- 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x4c,
- 0x69, 0x73, 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x38, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f,
- 0x6a, 0x65, 0x63, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x28, 0x12, 0x26, 0x2f, 0x76, 0x31, 0x2f,
- 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
- 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0x12, 0x9f, 0x01, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x75, 0x62,
- 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c,
- 0x65, 0x74, 0x65, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x44,
- 0xda, 0x41, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x82,
- 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x2a, 0x2d, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x75, 0x62, 0x73,
- 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
- 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xcf, 0x01, 0x0a, 0x11, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x41,
- 0x63, 0x6b, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x6f,
- 0x64, 0x69, 0x66, 0x79, 0x41, 0x63, 0x6b, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x76,
- 0xda, 0x41, 0x29, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2c,
- 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x2c, 0x61, 0x63, 0x6b, 0x5f, 0x64, 0x65, 0x61, 0x64,
- 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x82, 0xd3, 0xe4, 0x93,
- 0x02, 0x44, 0x3a, 0x01, 0x2a, 0x22, 0x3f, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x75, 0x62, 0x73,
- 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
- 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x41, 0x63, 0x6b, 0x44, 0x65,
- 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0xa8, 0x01, 0x0a, 0x0b, 0x41, 0x63, 0x6b, 0x6e, 0x6f,
- 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x63, 0x6b, 0x6e, 0x6f, 0x77,
- 0x6c, 0x65, 0x64, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45,
- 0x6d, 0x70, 0x74, 0x79, 0x22, 0x5b, 0xda, 0x41, 0x14, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2c, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x82, 0xd3, 0xe4,
- 0x93, 0x02, 0x3e, 0x3a, 0x01, 0x2a, 0x22, 0x39, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x75, 0x62,
- 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
- 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x61, 0x63, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67,
- 0x65, 0x12, 0xd0, 0x01, 0x0a, 0x04, 0x50, 0x75, 0x6c, 0x6c, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75,
- 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x6c,
- 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x88, 0x01, 0xda, 0x41, 0x2c, 0x73,
- 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2c, 0x72, 0x65, 0x74, 0x75,
- 0x72, 0x6e, 0x5f, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x6c, 0x79, 0x2c, 0x6d,
- 0x61, 0x78, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0xda, 0x41, 0x19, 0x73, 0x75,
- 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2c, 0x6d, 0x61, 0x78, 0x5f, 0x6d,
- 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x37, 0x3a, 0x01, 0x2a,
- 0x22, 0x32, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73,
- 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a,
- 0x70, 0x75, 0x6c, 0x6c, 0x12, 0x66, 0x0a, 0x0d, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e,
- 0x67, 0x50, 0x75, 0x6c, 0x6c, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69,
- 0x6e, 0x67, 0x50, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31,
- 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x50, 0x75, 0x6c, 0x6c, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0xbb, 0x01, 0x0a,
- 0x10, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x50, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69,
- 0x67, 0x12, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75,
- 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x50, 0x75, 0x73, 0x68, 0x43,
- 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45,
- 0x6d, 0x70, 0x74, 0x79, 0x22, 0x64, 0xda, 0x41, 0x18, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2c, 0x70, 0x75, 0x73, 0x68, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69,
- 0x67, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x43, 0x3a, 0x01, 0x2a, 0x22, 0x3e, 0x2f, 0x76, 0x31, 0x2f,
+ 0x76, 0x31, 0x2e, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x41, 0x63, 0x6b, 0x44, 0x65, 0x61, 0x64,
+ 0x6c, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d,
+ 0x70, 0x74, 0x79, 0x22, 0x76, 0xda, 0x41, 0x29, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x2c, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x2c, 0x61, 0x63, 0x6b,
+ 0x5f, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64,
+ 0x73, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x44, 0x3a, 0x01, 0x2a, 0x22, 0x3f, 0x2f, 0x76, 0x31, 0x2f,
0x7b, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x3d, 0x70, 0x72,
0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69,
0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x79,
- 0x50, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x89, 0x01, 0x0a, 0x0b, 0x47,
- 0x65, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65,
- 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62,
- 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x22, 0x38, 0xda, 0x41,
- 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x27, 0x12,
- 0x25, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x3d, 0x70,
- 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68,
- 0x6f, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x96, 0x01, 0x0a, 0x0d, 0x4c, 0x69, 0x73, 0x74, 0x53,
- 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74,
- 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62,
- 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74,
- 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x34, 0xda, 0x41, 0x07, 0x70, 0x72,
- 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x24, 0x12, 0x22, 0x2f, 0x76, 0x31,
- 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
- 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x12,
- 0x97, 0x01, 0x0a, 0x0e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68,
- 0x6f, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73,
- 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70,
- 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53,
- 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x22, 0x40, 0xda, 0x41, 0x11, 0x6e, 0x61, 0x6d, 0x65,
- 0x2c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x82, 0xd3, 0xe4,
- 0x93, 0x02, 0x26, 0x3a, 0x01, 0x2a, 0x1a, 0x21, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d,
- 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x6e, 0x61,
- 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xa3, 0x01, 0x0a, 0x0e, 0x55, 0x70,
- 0x64, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x27, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e,
- 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65,
+ 0x41, 0x63, 0x6b, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0xa8, 0x01, 0x0a, 0x0b,
+ 0x41, 0x63, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x12, 0x24, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x41,
+ 0x63, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x5b, 0xda, 0x41, 0x14, 0x73, 0x75,
+ 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2c, 0x61, 0x63, 0x6b, 0x5f, 0x69,
+ 0x64, 0x73, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3e, 0x3a, 0x01, 0x2a, 0x22, 0x39, 0x2f, 0x76, 0x31,
+ 0x2f, 0x7b, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x3d, 0x70,
+ 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x61, 0x63, 0x6b, 0x6e, 0x6f,
+ 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x12, 0xd0, 0x01, 0x0a, 0x04, 0x50, 0x75, 0x6c, 0x6c, 0x12,
+ 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e,
+ 0x76, 0x31, 0x2e, 0x50, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76,
+ 0x31, 0x2e, 0x50, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x88,
+ 0x01, 0xda, 0x41, 0x2c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x2c, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x5f, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74,
+ 0x65, 0x6c, 0x79, 0x2c, 0x6d, 0x61, 0x78, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73,
+ 0xda, 0x41, 0x19, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2c,
+ 0x6d, 0x61, 0x78, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x82, 0xd3, 0xe4, 0x93,
+ 0x02, 0x37, 0x3a, 0x01, 0x2a, 0x22, 0x32, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x75, 0x62, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
+ 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x70, 0x75, 0x6c, 0x6c, 0x12, 0x66, 0x0a, 0x0d, 0x53, 0x74, 0x72,
+ 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x50, 0x75, 0x6c, 0x6c, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74,
+ 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x50, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73,
+ 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x50,
+ 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30,
+ 0x01, 0x12, 0xbb, 0x01, 0x0a, 0x10, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x50, 0x75, 0x73, 0x68,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79,
+ 0x50, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x64, 0xda, 0x41, 0x18, 0x73, 0x75,
+ 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2c, 0x70, 0x75, 0x73, 0x68, 0x5f,
+ 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x43, 0x3a, 0x01, 0x2a, 0x22,
+ 0x3e, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x75,
+ 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x6d,
+ 0x6f, 0x64, 0x69, 0x66, 0x79, 0x50, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12,
+ 0x89, 0x01, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12,
+ 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e,
+ 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65,
0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f,
- 0x74, 0x22, 0x4c, 0xda, 0x41, 0x14, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x2c, 0x75,
- 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2f,
- 0x3a, 0x01, 0x2a, 0x32, 0x2a, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68,
- 0x6f, 0x74, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73,
- 0x2f, 0x2a, 0x2f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x12,
- 0x8b, 0x01, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68,
- 0x6f, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73,
- 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70,
- 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d,
- 0x70, 0x74, 0x79, 0x22, 0x38, 0xda, 0x41, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74,
- 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x27, 0x2a, 0x25, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x6e, 0x61,
- 0x70, 0x73, 0x68, 0x6f, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a,
- 0x2f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x84, 0x01,
- 0x0a, 0x04, 0x53, 0x65, 0x65, 0x6b, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x65, 0x6b, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x65, 0x6b, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x37, 0x3a, 0x01, 0x2a,
- 0x22, 0x32, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73,
- 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a,
- 0x73, 0x65, 0x65, 0x6b, 0x1a, 0x70, 0xca, 0x41, 0x15, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41,
- 0x55, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68,
- 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c,
- 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f,
- 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x42, 0xaa, 0x01, 0x0a, 0x14, 0x63, 0x6f, 0x6d, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x42,
- 0x0b, 0x50, 0x75, 0x62, 0x73, 0x75, 0x62, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32,
- 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d,
- 0x2f, 0x67, 0x6f, 0x2f, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31,
- 0x2f, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x70, 0x62, 0x3b, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62,
- 0x70, 0x62, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x16, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43,
- 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x50, 0x75, 0x62, 0x53, 0x75, 0x62, 0x2e, 0x56, 0x31, 0xca, 0x02,
- 0x16, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x50, 0x75,
- 0x62, 0x53, 0x75, 0x62, 0x5c, 0x56, 0x31, 0xea, 0x02, 0x19, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x50, 0x75, 0x62, 0x53, 0x75, 0x62, 0x3a,
- 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x74, 0x22, 0x38, 0xda, 0x41, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x82, 0xd3,
+ 0xe4, 0x93, 0x02, 0x27, 0x12, 0x25, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x6e, 0x61, 0x70, 0x73,
+ 0x68, 0x6f, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73,
+ 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x96, 0x01, 0x0a, 0x0d,
+ 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x12, 0x26, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31,
+ 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61,
+ 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x34,
+ 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x24,
+ 0x12, 0x22, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x70,
+ 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x6e, 0x61, 0x70, 0x73,
+ 0x68, 0x6f, 0x74, 0x73, 0x12, 0x97, 0x01, 0x0a, 0x0e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53,
+ 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74,
+ 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62,
+ 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x22, 0x40, 0xda, 0x41,
+ 0x11, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x26, 0x3a, 0x01, 0x2a, 0x1a, 0x21, 0x2f, 0x76, 0x31,
+ 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f,
+ 0x2a, 0x2f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xa3,
+ 0x01, 0x0a, 0x0e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f,
+ 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75,
+ 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73,
+ 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x6e,
+ 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x22, 0x4c, 0xda, 0x41, 0x14, 0x73, 0x6e, 0x61, 0x70, 0x73,
+ 0x68, 0x6f, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x82,
+ 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x3a, 0x01, 0x2a, 0x32, 0x2a, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73,
+ 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f,
+ 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74,
+ 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x8b, 0x01, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53,
+ 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74,
+ 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x38, 0xda, 0x41, 0x08, 0x73, 0x6e, 0x61,
+ 0x70, 0x73, 0x68, 0x6f, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x27, 0x2a, 0x25, 0x2f, 0x76, 0x31,
+ 0x2f, 0x7b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65,
+ 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2f,
+ 0x2a, 0x7d, 0x12, 0x84, 0x01, 0x0a, 0x04, 0x53, 0x65, 0x65, 0x6b, 0x12, 0x1d, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53,
+ 0x65, 0x65, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65,
+ 0x65, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3d, 0x82, 0xd3, 0xe4, 0x93,
+ 0x02, 0x37, 0x3a, 0x01, 0x2a, 0x22, 0x32, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x75, 0x62, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
+ 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x65, 0x6b, 0x1a, 0x70, 0xca, 0x41, 0x15, 0x70, 0x75,
+ 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
+ 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x55, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77,
+ 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74,
+ 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
+ 0x61, 0x75, 0x74, 0x68, 0x2f, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x42, 0xaa, 0x01, 0x0a, 0x14,
+ 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75,
+ 0x62, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x50, 0x75, 0x62, 0x73, 0x75, 0x62, 0x50, 0x72, 0x6f, 0x74,
+ 0x6f, 0x50, 0x01, 0x5a, 0x32, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2f,
+ 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x70, 0x62, 0x3b, 0x70,
+ 0x75, 0x62, 0x73, 0x75, 0x62, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x16, 0x47, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x50, 0x75, 0x62, 0x53, 0x75, 0x62,
+ 0x2e, 0x56, 0x31, 0xca, 0x02, 0x16, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f,
+ 0x75, 0x64, 0x5c, 0x50, 0x75, 0x62, 0x53, 0x75, 0x62, 0x5c, 0x56, 0x31, 0xea, 0x02, 0x19, 0x47,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x50, 0x75,
+ 0x62, 0x53, 0x75, 0x62, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@@ -5764,192 +6762,216 @@ func file_google_pubsub_v1_pubsub_proto_rawDescGZIP() []byte {
return file_google_pubsub_v1_pubsub_proto_rawDescData
}
-var file_google_pubsub_v1_pubsub_proto_enumTypes = make([]protoimpl.EnumInfo, 5)
-var file_google_pubsub_v1_pubsub_proto_msgTypes = make([]protoimpl.MessageInfo, 62)
+var file_google_pubsub_v1_pubsub_proto_enumTypes = make([]protoimpl.EnumInfo, 7)
+var file_google_pubsub_v1_pubsub_proto_msgTypes = make([]protoimpl.MessageInfo, 72)
var file_google_pubsub_v1_pubsub_proto_goTypes = []any{
- (IngestionDataSourceSettings_AwsKinesis_State)(0), // 0: google.pubsub.v1.IngestionDataSourceSettings.AwsKinesis.State
- (Topic_State)(0), // 1: google.pubsub.v1.Topic.State
- (Subscription_State)(0), // 2: google.pubsub.v1.Subscription.State
- (BigQueryConfig_State)(0), // 3: google.pubsub.v1.BigQueryConfig.State
- (CloudStorageConfig_State)(0), // 4: google.pubsub.v1.CloudStorageConfig.State
- (*MessageStoragePolicy)(nil), // 5: google.pubsub.v1.MessageStoragePolicy
- (*SchemaSettings)(nil), // 6: google.pubsub.v1.SchemaSettings
- (*IngestionDataSourceSettings)(nil), // 7: google.pubsub.v1.IngestionDataSourceSettings
- (*Topic)(nil), // 8: google.pubsub.v1.Topic
- (*PubsubMessage)(nil), // 9: google.pubsub.v1.PubsubMessage
- (*GetTopicRequest)(nil), // 10: google.pubsub.v1.GetTopicRequest
- (*UpdateTopicRequest)(nil), // 11: google.pubsub.v1.UpdateTopicRequest
- (*PublishRequest)(nil), // 12: google.pubsub.v1.PublishRequest
- (*PublishResponse)(nil), // 13: google.pubsub.v1.PublishResponse
- (*ListTopicsRequest)(nil), // 14: google.pubsub.v1.ListTopicsRequest
- (*ListTopicsResponse)(nil), // 15: google.pubsub.v1.ListTopicsResponse
- (*ListTopicSubscriptionsRequest)(nil), // 16: google.pubsub.v1.ListTopicSubscriptionsRequest
- (*ListTopicSubscriptionsResponse)(nil), // 17: google.pubsub.v1.ListTopicSubscriptionsResponse
- (*ListTopicSnapshotsRequest)(nil), // 18: google.pubsub.v1.ListTopicSnapshotsRequest
- (*ListTopicSnapshotsResponse)(nil), // 19: google.pubsub.v1.ListTopicSnapshotsResponse
- (*DeleteTopicRequest)(nil), // 20: google.pubsub.v1.DeleteTopicRequest
- (*DetachSubscriptionRequest)(nil), // 21: google.pubsub.v1.DetachSubscriptionRequest
- (*DetachSubscriptionResponse)(nil), // 22: google.pubsub.v1.DetachSubscriptionResponse
- (*Subscription)(nil), // 23: google.pubsub.v1.Subscription
- (*RetryPolicy)(nil), // 24: google.pubsub.v1.RetryPolicy
- (*DeadLetterPolicy)(nil), // 25: google.pubsub.v1.DeadLetterPolicy
- (*ExpirationPolicy)(nil), // 26: google.pubsub.v1.ExpirationPolicy
- (*PushConfig)(nil), // 27: google.pubsub.v1.PushConfig
- (*BigQueryConfig)(nil), // 28: google.pubsub.v1.BigQueryConfig
- (*CloudStorageConfig)(nil), // 29: google.pubsub.v1.CloudStorageConfig
- (*ReceivedMessage)(nil), // 30: google.pubsub.v1.ReceivedMessage
- (*GetSubscriptionRequest)(nil), // 31: google.pubsub.v1.GetSubscriptionRequest
- (*UpdateSubscriptionRequest)(nil), // 32: google.pubsub.v1.UpdateSubscriptionRequest
- (*ListSubscriptionsRequest)(nil), // 33: google.pubsub.v1.ListSubscriptionsRequest
- (*ListSubscriptionsResponse)(nil), // 34: google.pubsub.v1.ListSubscriptionsResponse
- (*DeleteSubscriptionRequest)(nil), // 35: google.pubsub.v1.DeleteSubscriptionRequest
- (*ModifyPushConfigRequest)(nil), // 36: google.pubsub.v1.ModifyPushConfigRequest
- (*PullRequest)(nil), // 37: google.pubsub.v1.PullRequest
- (*PullResponse)(nil), // 38: google.pubsub.v1.PullResponse
- (*ModifyAckDeadlineRequest)(nil), // 39: google.pubsub.v1.ModifyAckDeadlineRequest
- (*AcknowledgeRequest)(nil), // 40: google.pubsub.v1.AcknowledgeRequest
- (*StreamingPullRequest)(nil), // 41: google.pubsub.v1.StreamingPullRequest
- (*StreamingPullResponse)(nil), // 42: google.pubsub.v1.StreamingPullResponse
- (*CreateSnapshotRequest)(nil), // 43: google.pubsub.v1.CreateSnapshotRequest
- (*UpdateSnapshotRequest)(nil), // 44: google.pubsub.v1.UpdateSnapshotRequest
- (*Snapshot)(nil), // 45: google.pubsub.v1.Snapshot
- (*GetSnapshotRequest)(nil), // 46: google.pubsub.v1.GetSnapshotRequest
- (*ListSnapshotsRequest)(nil), // 47: google.pubsub.v1.ListSnapshotsRequest
- (*ListSnapshotsResponse)(nil), // 48: google.pubsub.v1.ListSnapshotsResponse
- (*DeleteSnapshotRequest)(nil), // 49: google.pubsub.v1.DeleteSnapshotRequest
- (*SeekRequest)(nil), // 50: google.pubsub.v1.SeekRequest
- (*SeekResponse)(nil), // 51: google.pubsub.v1.SeekResponse
- (*IngestionDataSourceSettings_AwsKinesis)(nil), // 52: google.pubsub.v1.IngestionDataSourceSettings.AwsKinesis
- nil, // 53: google.pubsub.v1.Topic.LabelsEntry
- nil, // 54: google.pubsub.v1.PubsubMessage.AttributesEntry
- nil, // 55: google.pubsub.v1.Subscription.LabelsEntry
- (*PushConfig_OidcToken)(nil), // 56: google.pubsub.v1.PushConfig.OidcToken
- (*PushConfig_PubsubWrapper)(nil), // 57: google.pubsub.v1.PushConfig.PubsubWrapper
- (*PushConfig_NoWrapper)(nil), // 58: google.pubsub.v1.PushConfig.NoWrapper
- nil, // 59: google.pubsub.v1.PushConfig.AttributesEntry
- (*CloudStorageConfig_TextConfig)(nil), // 60: google.pubsub.v1.CloudStorageConfig.TextConfig
- (*CloudStorageConfig_AvroConfig)(nil), // 61: google.pubsub.v1.CloudStorageConfig.AvroConfig
- (*StreamingPullResponse_AcknowledgeConfirmation)(nil), // 62: google.pubsub.v1.StreamingPullResponse.AcknowledgeConfirmation
- (*StreamingPullResponse_ModifyAckDeadlineConfirmation)(nil), // 63: google.pubsub.v1.StreamingPullResponse.ModifyAckDeadlineConfirmation
- (*StreamingPullResponse_SubscriptionProperties)(nil), // 64: google.pubsub.v1.StreamingPullResponse.SubscriptionProperties
- nil, // 65: google.pubsub.v1.CreateSnapshotRequest.LabelsEntry
- nil, // 66: google.pubsub.v1.Snapshot.LabelsEntry
- (Encoding)(0), // 67: google.pubsub.v1.Encoding
- (*durationpb.Duration)(nil), // 68: google.protobuf.Duration
- (*timestamppb.Timestamp)(nil), // 69: google.protobuf.Timestamp
- (*fieldmaskpb.FieldMask)(nil), // 70: google.protobuf.FieldMask
- (*emptypb.Empty)(nil), // 71: google.protobuf.Empty
+ (IngestionDataSourceSettings_AwsKinesis_State)(0), // 0: google.pubsub.v1.IngestionDataSourceSettings.AwsKinesis.State
+ (IngestionDataSourceSettings_CloudStorage_State)(0), // 1: google.pubsub.v1.IngestionDataSourceSettings.CloudStorage.State
+ (PlatformLogsSettings_Severity)(0), // 2: google.pubsub.v1.PlatformLogsSettings.Severity
+ (Topic_State)(0), // 3: google.pubsub.v1.Topic.State
+ (Subscription_State)(0), // 4: google.pubsub.v1.Subscription.State
+ (BigQueryConfig_State)(0), // 5: google.pubsub.v1.BigQueryConfig.State
+ (CloudStorageConfig_State)(0), // 6: google.pubsub.v1.CloudStorageConfig.State
+ (*MessageStoragePolicy)(nil), // 7: google.pubsub.v1.MessageStoragePolicy
+ (*SchemaSettings)(nil), // 8: google.pubsub.v1.SchemaSettings
+ (*IngestionDataSourceSettings)(nil), // 9: google.pubsub.v1.IngestionDataSourceSettings
+ (*PlatformLogsSettings)(nil), // 10: google.pubsub.v1.PlatformLogsSettings
+ (*IngestionFailureEvent)(nil), // 11: google.pubsub.v1.IngestionFailureEvent
+ (*Topic)(nil), // 12: google.pubsub.v1.Topic
+ (*PubsubMessage)(nil), // 13: google.pubsub.v1.PubsubMessage
+ (*GetTopicRequest)(nil), // 14: google.pubsub.v1.GetTopicRequest
+ (*UpdateTopicRequest)(nil), // 15: google.pubsub.v1.UpdateTopicRequest
+ (*PublishRequest)(nil), // 16: google.pubsub.v1.PublishRequest
+ (*PublishResponse)(nil), // 17: google.pubsub.v1.PublishResponse
+ (*ListTopicsRequest)(nil), // 18: google.pubsub.v1.ListTopicsRequest
+ (*ListTopicsResponse)(nil), // 19: google.pubsub.v1.ListTopicsResponse
+ (*ListTopicSubscriptionsRequest)(nil), // 20: google.pubsub.v1.ListTopicSubscriptionsRequest
+ (*ListTopicSubscriptionsResponse)(nil), // 21: google.pubsub.v1.ListTopicSubscriptionsResponse
+ (*ListTopicSnapshotsRequest)(nil), // 22: google.pubsub.v1.ListTopicSnapshotsRequest
+ (*ListTopicSnapshotsResponse)(nil), // 23: google.pubsub.v1.ListTopicSnapshotsResponse
+ (*DeleteTopicRequest)(nil), // 24: google.pubsub.v1.DeleteTopicRequest
+ (*DetachSubscriptionRequest)(nil), // 25: google.pubsub.v1.DetachSubscriptionRequest
+ (*DetachSubscriptionResponse)(nil), // 26: google.pubsub.v1.DetachSubscriptionResponse
+ (*Subscription)(nil), // 27: google.pubsub.v1.Subscription
+ (*RetryPolicy)(nil), // 28: google.pubsub.v1.RetryPolicy
+ (*DeadLetterPolicy)(nil), // 29: google.pubsub.v1.DeadLetterPolicy
+ (*ExpirationPolicy)(nil), // 30: google.pubsub.v1.ExpirationPolicy
+ (*PushConfig)(nil), // 31: google.pubsub.v1.PushConfig
+ (*BigQueryConfig)(nil), // 32: google.pubsub.v1.BigQueryConfig
+ (*CloudStorageConfig)(nil), // 33: google.pubsub.v1.CloudStorageConfig
+ (*ReceivedMessage)(nil), // 34: google.pubsub.v1.ReceivedMessage
+ (*GetSubscriptionRequest)(nil), // 35: google.pubsub.v1.GetSubscriptionRequest
+ (*UpdateSubscriptionRequest)(nil), // 36: google.pubsub.v1.UpdateSubscriptionRequest
+ (*ListSubscriptionsRequest)(nil), // 37: google.pubsub.v1.ListSubscriptionsRequest
+ (*ListSubscriptionsResponse)(nil), // 38: google.pubsub.v1.ListSubscriptionsResponse
+ (*DeleteSubscriptionRequest)(nil), // 39: google.pubsub.v1.DeleteSubscriptionRequest
+ (*ModifyPushConfigRequest)(nil), // 40: google.pubsub.v1.ModifyPushConfigRequest
+ (*PullRequest)(nil), // 41: google.pubsub.v1.PullRequest
+ (*PullResponse)(nil), // 42: google.pubsub.v1.PullResponse
+ (*ModifyAckDeadlineRequest)(nil), // 43: google.pubsub.v1.ModifyAckDeadlineRequest
+ (*AcknowledgeRequest)(nil), // 44: google.pubsub.v1.AcknowledgeRequest
+ (*StreamingPullRequest)(nil), // 45: google.pubsub.v1.StreamingPullRequest
+ (*StreamingPullResponse)(nil), // 46: google.pubsub.v1.StreamingPullResponse
+ (*CreateSnapshotRequest)(nil), // 47: google.pubsub.v1.CreateSnapshotRequest
+ (*UpdateSnapshotRequest)(nil), // 48: google.pubsub.v1.UpdateSnapshotRequest
+ (*Snapshot)(nil), // 49: google.pubsub.v1.Snapshot
+ (*GetSnapshotRequest)(nil), // 50: google.pubsub.v1.GetSnapshotRequest
+ (*ListSnapshotsRequest)(nil), // 51: google.pubsub.v1.ListSnapshotsRequest
+ (*ListSnapshotsResponse)(nil), // 52: google.pubsub.v1.ListSnapshotsResponse
+ (*DeleteSnapshotRequest)(nil), // 53: google.pubsub.v1.DeleteSnapshotRequest
+ (*SeekRequest)(nil), // 54: google.pubsub.v1.SeekRequest
+ (*SeekResponse)(nil), // 55: google.pubsub.v1.SeekResponse
+ (*IngestionDataSourceSettings_AwsKinesis)(nil), // 56: google.pubsub.v1.IngestionDataSourceSettings.AwsKinesis
+ (*IngestionDataSourceSettings_CloudStorage)(nil), // 57: google.pubsub.v1.IngestionDataSourceSettings.CloudStorage
+ (*IngestionDataSourceSettings_CloudStorage_TextFormat)(nil), // 58: google.pubsub.v1.IngestionDataSourceSettings.CloudStorage.TextFormat
+ (*IngestionDataSourceSettings_CloudStorage_AvroFormat)(nil), // 59: google.pubsub.v1.IngestionDataSourceSettings.CloudStorage.AvroFormat
+ (*IngestionDataSourceSettings_CloudStorage_PubSubAvroFormat)(nil), // 60: google.pubsub.v1.IngestionDataSourceSettings.CloudStorage.PubSubAvroFormat
+ (*IngestionFailureEvent_ApiViolationReason)(nil), // 61: google.pubsub.v1.IngestionFailureEvent.ApiViolationReason
+ (*IngestionFailureEvent_AvroFailureReason)(nil), // 62: google.pubsub.v1.IngestionFailureEvent.AvroFailureReason
+ (*IngestionFailureEvent_CloudStorageFailure)(nil), // 63: google.pubsub.v1.IngestionFailureEvent.CloudStorageFailure
+ nil, // 64: google.pubsub.v1.Topic.LabelsEntry
+ nil, // 65: google.pubsub.v1.PubsubMessage.AttributesEntry
+ (*Subscription_AnalyticsHubSubscriptionInfo)(nil), // 66: google.pubsub.v1.Subscription.AnalyticsHubSubscriptionInfo
+ nil, // 67: google.pubsub.v1.Subscription.LabelsEntry
+ (*PushConfig_OidcToken)(nil), // 68: google.pubsub.v1.PushConfig.OidcToken
+ (*PushConfig_PubsubWrapper)(nil), // 69: google.pubsub.v1.PushConfig.PubsubWrapper
+ (*PushConfig_NoWrapper)(nil), // 70: google.pubsub.v1.PushConfig.NoWrapper
+ nil, // 71: google.pubsub.v1.PushConfig.AttributesEntry
+ (*CloudStorageConfig_TextConfig)(nil), // 72: google.pubsub.v1.CloudStorageConfig.TextConfig
+ (*CloudStorageConfig_AvroConfig)(nil), // 73: google.pubsub.v1.CloudStorageConfig.AvroConfig
+ (*StreamingPullResponse_AcknowledgeConfirmation)(nil), // 74: google.pubsub.v1.StreamingPullResponse.AcknowledgeConfirmation
+ (*StreamingPullResponse_ModifyAckDeadlineConfirmation)(nil), // 75: google.pubsub.v1.StreamingPullResponse.ModifyAckDeadlineConfirmation
+ (*StreamingPullResponse_SubscriptionProperties)(nil), // 76: google.pubsub.v1.StreamingPullResponse.SubscriptionProperties
+ nil, // 77: google.pubsub.v1.CreateSnapshotRequest.LabelsEntry
+ nil, // 78: google.pubsub.v1.Snapshot.LabelsEntry
+ (Encoding)(0), // 79: google.pubsub.v1.Encoding
+ (*durationpb.Duration)(nil), // 80: google.protobuf.Duration
+ (*timestamppb.Timestamp)(nil), // 81: google.protobuf.Timestamp
+ (*fieldmaskpb.FieldMask)(nil), // 82: google.protobuf.FieldMask
+ (*emptypb.Empty)(nil), // 83: google.protobuf.Empty
}
var file_google_pubsub_v1_pubsub_proto_depIdxs = []int32{
- 67, // 0: google.pubsub.v1.SchemaSettings.encoding:type_name -> google.pubsub.v1.Encoding
- 52, // 1: google.pubsub.v1.IngestionDataSourceSettings.aws_kinesis:type_name -> google.pubsub.v1.IngestionDataSourceSettings.AwsKinesis
- 53, // 2: google.pubsub.v1.Topic.labels:type_name -> google.pubsub.v1.Topic.LabelsEntry
- 5, // 3: google.pubsub.v1.Topic.message_storage_policy:type_name -> google.pubsub.v1.MessageStoragePolicy
- 6, // 4: google.pubsub.v1.Topic.schema_settings:type_name -> google.pubsub.v1.SchemaSettings
- 68, // 5: google.pubsub.v1.Topic.message_retention_duration:type_name -> google.protobuf.Duration
- 1, // 6: google.pubsub.v1.Topic.state:type_name -> google.pubsub.v1.Topic.State
- 7, // 7: google.pubsub.v1.Topic.ingestion_data_source_settings:type_name -> google.pubsub.v1.IngestionDataSourceSettings
- 54, // 8: google.pubsub.v1.PubsubMessage.attributes:type_name -> google.pubsub.v1.PubsubMessage.AttributesEntry
- 69, // 9: google.pubsub.v1.PubsubMessage.publish_time:type_name -> google.protobuf.Timestamp
- 8, // 10: google.pubsub.v1.UpdateTopicRequest.topic:type_name -> google.pubsub.v1.Topic
- 70, // 11: google.pubsub.v1.UpdateTopicRequest.update_mask:type_name -> google.protobuf.FieldMask
- 9, // 12: google.pubsub.v1.PublishRequest.messages:type_name -> google.pubsub.v1.PubsubMessage
- 8, // 13: google.pubsub.v1.ListTopicsResponse.topics:type_name -> google.pubsub.v1.Topic
- 27, // 14: google.pubsub.v1.Subscription.push_config:type_name -> google.pubsub.v1.PushConfig
- 28, // 15: google.pubsub.v1.Subscription.bigquery_config:type_name -> google.pubsub.v1.BigQueryConfig
- 29, // 16: google.pubsub.v1.Subscription.cloud_storage_config:type_name -> google.pubsub.v1.CloudStorageConfig
- 68, // 17: google.pubsub.v1.Subscription.message_retention_duration:type_name -> google.protobuf.Duration
- 55, // 18: google.pubsub.v1.Subscription.labels:type_name -> google.pubsub.v1.Subscription.LabelsEntry
- 26, // 19: google.pubsub.v1.Subscription.expiration_policy:type_name -> google.pubsub.v1.ExpirationPolicy
- 25, // 20: google.pubsub.v1.Subscription.dead_letter_policy:type_name -> google.pubsub.v1.DeadLetterPolicy
- 24, // 21: google.pubsub.v1.Subscription.retry_policy:type_name -> google.pubsub.v1.RetryPolicy
- 68, // 22: google.pubsub.v1.Subscription.topic_message_retention_duration:type_name -> google.protobuf.Duration
- 2, // 23: google.pubsub.v1.Subscription.state:type_name -> google.pubsub.v1.Subscription.State
- 68, // 24: google.pubsub.v1.RetryPolicy.minimum_backoff:type_name -> google.protobuf.Duration
- 68, // 25: google.pubsub.v1.RetryPolicy.maximum_backoff:type_name -> google.protobuf.Duration
- 68, // 26: google.pubsub.v1.ExpirationPolicy.ttl:type_name -> google.protobuf.Duration
- 59, // 27: google.pubsub.v1.PushConfig.attributes:type_name -> google.pubsub.v1.PushConfig.AttributesEntry
- 56, // 28: google.pubsub.v1.PushConfig.oidc_token:type_name -> google.pubsub.v1.PushConfig.OidcToken
- 57, // 29: google.pubsub.v1.PushConfig.pubsub_wrapper:type_name -> google.pubsub.v1.PushConfig.PubsubWrapper
- 58, // 30: google.pubsub.v1.PushConfig.no_wrapper:type_name -> google.pubsub.v1.PushConfig.NoWrapper
- 3, // 31: google.pubsub.v1.BigQueryConfig.state:type_name -> google.pubsub.v1.BigQueryConfig.State
- 60, // 32: google.pubsub.v1.CloudStorageConfig.text_config:type_name -> google.pubsub.v1.CloudStorageConfig.TextConfig
- 61, // 33: google.pubsub.v1.CloudStorageConfig.avro_config:type_name -> google.pubsub.v1.CloudStorageConfig.AvroConfig
- 68, // 34: google.pubsub.v1.CloudStorageConfig.max_duration:type_name -> google.protobuf.Duration
- 4, // 35: google.pubsub.v1.CloudStorageConfig.state:type_name -> google.pubsub.v1.CloudStorageConfig.State
- 9, // 36: google.pubsub.v1.ReceivedMessage.message:type_name -> google.pubsub.v1.PubsubMessage
- 23, // 37: google.pubsub.v1.UpdateSubscriptionRequest.subscription:type_name -> google.pubsub.v1.Subscription
- 70, // 38: google.pubsub.v1.UpdateSubscriptionRequest.update_mask:type_name -> google.protobuf.FieldMask
- 23, // 39: google.pubsub.v1.ListSubscriptionsResponse.subscriptions:type_name -> google.pubsub.v1.Subscription
- 27, // 40: google.pubsub.v1.ModifyPushConfigRequest.push_config:type_name -> google.pubsub.v1.PushConfig
- 30, // 41: google.pubsub.v1.PullResponse.received_messages:type_name -> google.pubsub.v1.ReceivedMessage
- 30, // 42: google.pubsub.v1.StreamingPullResponse.received_messages:type_name -> google.pubsub.v1.ReceivedMessage
- 62, // 43: google.pubsub.v1.StreamingPullResponse.acknowledge_confirmation:type_name -> google.pubsub.v1.StreamingPullResponse.AcknowledgeConfirmation
- 63, // 44: google.pubsub.v1.StreamingPullResponse.modify_ack_deadline_confirmation:type_name -> google.pubsub.v1.StreamingPullResponse.ModifyAckDeadlineConfirmation
- 64, // 45: google.pubsub.v1.StreamingPullResponse.subscription_properties:type_name -> google.pubsub.v1.StreamingPullResponse.SubscriptionProperties
- 65, // 46: google.pubsub.v1.CreateSnapshotRequest.labels:type_name -> google.pubsub.v1.CreateSnapshotRequest.LabelsEntry
- 45, // 47: google.pubsub.v1.UpdateSnapshotRequest.snapshot:type_name -> google.pubsub.v1.Snapshot
- 70, // 48: google.pubsub.v1.UpdateSnapshotRequest.update_mask:type_name -> google.protobuf.FieldMask
- 69, // 49: google.pubsub.v1.Snapshot.expire_time:type_name -> google.protobuf.Timestamp
- 66, // 50: google.pubsub.v1.Snapshot.labels:type_name -> google.pubsub.v1.Snapshot.LabelsEntry
- 45, // 51: google.pubsub.v1.ListSnapshotsResponse.snapshots:type_name -> google.pubsub.v1.Snapshot
- 69, // 52: google.pubsub.v1.SeekRequest.time:type_name -> google.protobuf.Timestamp
- 0, // 53: google.pubsub.v1.IngestionDataSourceSettings.AwsKinesis.state:type_name -> google.pubsub.v1.IngestionDataSourceSettings.AwsKinesis.State
- 8, // 54: google.pubsub.v1.Publisher.CreateTopic:input_type -> google.pubsub.v1.Topic
- 11, // 55: google.pubsub.v1.Publisher.UpdateTopic:input_type -> google.pubsub.v1.UpdateTopicRequest
- 12, // 56: google.pubsub.v1.Publisher.Publish:input_type -> google.pubsub.v1.PublishRequest
- 10, // 57: google.pubsub.v1.Publisher.GetTopic:input_type -> google.pubsub.v1.GetTopicRequest
- 14, // 58: google.pubsub.v1.Publisher.ListTopics:input_type -> google.pubsub.v1.ListTopicsRequest
- 16, // 59: google.pubsub.v1.Publisher.ListTopicSubscriptions:input_type -> google.pubsub.v1.ListTopicSubscriptionsRequest
- 18, // 60: google.pubsub.v1.Publisher.ListTopicSnapshots:input_type -> google.pubsub.v1.ListTopicSnapshotsRequest
- 20, // 61: google.pubsub.v1.Publisher.DeleteTopic:input_type -> google.pubsub.v1.DeleteTopicRequest
- 21, // 62: google.pubsub.v1.Publisher.DetachSubscription:input_type -> google.pubsub.v1.DetachSubscriptionRequest
- 23, // 63: google.pubsub.v1.Subscriber.CreateSubscription:input_type -> google.pubsub.v1.Subscription
- 31, // 64: google.pubsub.v1.Subscriber.GetSubscription:input_type -> google.pubsub.v1.GetSubscriptionRequest
- 32, // 65: google.pubsub.v1.Subscriber.UpdateSubscription:input_type -> google.pubsub.v1.UpdateSubscriptionRequest
- 33, // 66: google.pubsub.v1.Subscriber.ListSubscriptions:input_type -> google.pubsub.v1.ListSubscriptionsRequest
- 35, // 67: google.pubsub.v1.Subscriber.DeleteSubscription:input_type -> google.pubsub.v1.DeleteSubscriptionRequest
- 39, // 68: google.pubsub.v1.Subscriber.ModifyAckDeadline:input_type -> google.pubsub.v1.ModifyAckDeadlineRequest
- 40, // 69: google.pubsub.v1.Subscriber.Acknowledge:input_type -> google.pubsub.v1.AcknowledgeRequest
- 37, // 70: google.pubsub.v1.Subscriber.Pull:input_type -> google.pubsub.v1.PullRequest
- 41, // 71: google.pubsub.v1.Subscriber.StreamingPull:input_type -> google.pubsub.v1.StreamingPullRequest
- 36, // 72: google.pubsub.v1.Subscriber.ModifyPushConfig:input_type -> google.pubsub.v1.ModifyPushConfigRequest
- 46, // 73: google.pubsub.v1.Subscriber.GetSnapshot:input_type -> google.pubsub.v1.GetSnapshotRequest
- 47, // 74: google.pubsub.v1.Subscriber.ListSnapshots:input_type -> google.pubsub.v1.ListSnapshotsRequest
- 43, // 75: google.pubsub.v1.Subscriber.CreateSnapshot:input_type -> google.pubsub.v1.CreateSnapshotRequest
- 44, // 76: google.pubsub.v1.Subscriber.UpdateSnapshot:input_type -> google.pubsub.v1.UpdateSnapshotRequest
- 49, // 77: google.pubsub.v1.Subscriber.DeleteSnapshot:input_type -> google.pubsub.v1.DeleteSnapshotRequest
- 50, // 78: google.pubsub.v1.Subscriber.Seek:input_type -> google.pubsub.v1.SeekRequest
- 8, // 79: google.pubsub.v1.Publisher.CreateTopic:output_type -> google.pubsub.v1.Topic
- 8, // 80: google.pubsub.v1.Publisher.UpdateTopic:output_type -> google.pubsub.v1.Topic
- 13, // 81: google.pubsub.v1.Publisher.Publish:output_type -> google.pubsub.v1.PublishResponse
- 8, // 82: google.pubsub.v1.Publisher.GetTopic:output_type -> google.pubsub.v1.Topic
- 15, // 83: google.pubsub.v1.Publisher.ListTopics:output_type -> google.pubsub.v1.ListTopicsResponse
- 17, // 84: google.pubsub.v1.Publisher.ListTopicSubscriptions:output_type -> google.pubsub.v1.ListTopicSubscriptionsResponse
- 19, // 85: google.pubsub.v1.Publisher.ListTopicSnapshots:output_type -> google.pubsub.v1.ListTopicSnapshotsResponse
- 71, // 86: google.pubsub.v1.Publisher.DeleteTopic:output_type -> google.protobuf.Empty
- 22, // 87: google.pubsub.v1.Publisher.DetachSubscription:output_type -> google.pubsub.v1.DetachSubscriptionResponse
- 23, // 88: google.pubsub.v1.Subscriber.CreateSubscription:output_type -> google.pubsub.v1.Subscription
- 23, // 89: google.pubsub.v1.Subscriber.GetSubscription:output_type -> google.pubsub.v1.Subscription
- 23, // 90: google.pubsub.v1.Subscriber.UpdateSubscription:output_type -> google.pubsub.v1.Subscription
- 34, // 91: google.pubsub.v1.Subscriber.ListSubscriptions:output_type -> google.pubsub.v1.ListSubscriptionsResponse
- 71, // 92: google.pubsub.v1.Subscriber.DeleteSubscription:output_type -> google.protobuf.Empty
- 71, // 93: google.pubsub.v1.Subscriber.ModifyAckDeadline:output_type -> google.protobuf.Empty
- 71, // 94: google.pubsub.v1.Subscriber.Acknowledge:output_type -> google.protobuf.Empty
- 38, // 95: google.pubsub.v1.Subscriber.Pull:output_type -> google.pubsub.v1.PullResponse
- 42, // 96: google.pubsub.v1.Subscriber.StreamingPull:output_type -> google.pubsub.v1.StreamingPullResponse
- 71, // 97: google.pubsub.v1.Subscriber.ModifyPushConfig:output_type -> google.protobuf.Empty
- 45, // 98: google.pubsub.v1.Subscriber.GetSnapshot:output_type -> google.pubsub.v1.Snapshot
- 48, // 99: google.pubsub.v1.Subscriber.ListSnapshots:output_type -> google.pubsub.v1.ListSnapshotsResponse
- 45, // 100: google.pubsub.v1.Subscriber.CreateSnapshot:output_type -> google.pubsub.v1.Snapshot
- 45, // 101: google.pubsub.v1.Subscriber.UpdateSnapshot:output_type -> google.pubsub.v1.Snapshot
- 71, // 102: google.pubsub.v1.Subscriber.DeleteSnapshot:output_type -> google.protobuf.Empty
- 51, // 103: google.pubsub.v1.Subscriber.Seek:output_type -> google.pubsub.v1.SeekResponse
- 79, // [79:104] is the sub-list for method output_type
- 54, // [54:79] is the sub-list for method input_type
- 54, // [54:54] is the sub-list for extension type_name
- 54, // [54:54] is the sub-list for extension extendee
- 0, // [0:54] is the sub-list for field type_name
+ 79, // 0: google.pubsub.v1.SchemaSettings.encoding:type_name -> google.pubsub.v1.Encoding
+ 56, // 1: google.pubsub.v1.IngestionDataSourceSettings.aws_kinesis:type_name -> google.pubsub.v1.IngestionDataSourceSettings.AwsKinesis
+ 57, // 2: google.pubsub.v1.IngestionDataSourceSettings.cloud_storage:type_name -> google.pubsub.v1.IngestionDataSourceSettings.CloudStorage
+ 10, // 3: google.pubsub.v1.IngestionDataSourceSettings.platform_logs_settings:type_name -> google.pubsub.v1.PlatformLogsSettings
+ 2, // 4: google.pubsub.v1.PlatformLogsSettings.severity:type_name -> google.pubsub.v1.PlatformLogsSettings.Severity
+ 63, // 5: google.pubsub.v1.IngestionFailureEvent.cloud_storage_failure:type_name -> google.pubsub.v1.IngestionFailureEvent.CloudStorageFailure
+ 64, // 6: google.pubsub.v1.Topic.labels:type_name -> google.pubsub.v1.Topic.LabelsEntry
+ 7, // 7: google.pubsub.v1.Topic.message_storage_policy:type_name -> google.pubsub.v1.MessageStoragePolicy
+ 8, // 8: google.pubsub.v1.Topic.schema_settings:type_name -> google.pubsub.v1.SchemaSettings
+ 80, // 9: google.pubsub.v1.Topic.message_retention_duration:type_name -> google.protobuf.Duration
+ 3, // 10: google.pubsub.v1.Topic.state:type_name -> google.pubsub.v1.Topic.State
+ 9, // 11: google.pubsub.v1.Topic.ingestion_data_source_settings:type_name -> google.pubsub.v1.IngestionDataSourceSettings
+ 65, // 12: google.pubsub.v1.PubsubMessage.attributes:type_name -> google.pubsub.v1.PubsubMessage.AttributesEntry
+ 81, // 13: google.pubsub.v1.PubsubMessage.publish_time:type_name -> google.protobuf.Timestamp
+ 12, // 14: google.pubsub.v1.UpdateTopicRequest.topic:type_name -> google.pubsub.v1.Topic
+ 82, // 15: google.pubsub.v1.UpdateTopicRequest.update_mask:type_name -> google.protobuf.FieldMask
+ 13, // 16: google.pubsub.v1.PublishRequest.messages:type_name -> google.pubsub.v1.PubsubMessage
+ 12, // 17: google.pubsub.v1.ListTopicsResponse.topics:type_name -> google.pubsub.v1.Topic
+ 31, // 18: google.pubsub.v1.Subscription.push_config:type_name -> google.pubsub.v1.PushConfig
+ 32, // 19: google.pubsub.v1.Subscription.bigquery_config:type_name -> google.pubsub.v1.BigQueryConfig
+ 33, // 20: google.pubsub.v1.Subscription.cloud_storage_config:type_name -> google.pubsub.v1.CloudStorageConfig
+ 80, // 21: google.pubsub.v1.Subscription.message_retention_duration:type_name -> google.protobuf.Duration
+ 67, // 22: google.pubsub.v1.Subscription.labels:type_name -> google.pubsub.v1.Subscription.LabelsEntry
+ 30, // 23: google.pubsub.v1.Subscription.expiration_policy:type_name -> google.pubsub.v1.ExpirationPolicy
+ 29, // 24: google.pubsub.v1.Subscription.dead_letter_policy:type_name -> google.pubsub.v1.DeadLetterPolicy
+ 28, // 25: google.pubsub.v1.Subscription.retry_policy:type_name -> google.pubsub.v1.RetryPolicy
+ 80, // 26: google.pubsub.v1.Subscription.topic_message_retention_duration:type_name -> google.protobuf.Duration
+ 4, // 27: google.pubsub.v1.Subscription.state:type_name -> google.pubsub.v1.Subscription.State
+ 66, // 28: google.pubsub.v1.Subscription.analytics_hub_subscription_info:type_name -> google.pubsub.v1.Subscription.AnalyticsHubSubscriptionInfo
+ 80, // 29: google.pubsub.v1.RetryPolicy.minimum_backoff:type_name -> google.protobuf.Duration
+ 80, // 30: google.pubsub.v1.RetryPolicy.maximum_backoff:type_name -> google.protobuf.Duration
+ 80, // 31: google.pubsub.v1.ExpirationPolicy.ttl:type_name -> google.protobuf.Duration
+ 71, // 32: google.pubsub.v1.PushConfig.attributes:type_name -> google.pubsub.v1.PushConfig.AttributesEntry
+ 68, // 33: google.pubsub.v1.PushConfig.oidc_token:type_name -> google.pubsub.v1.PushConfig.OidcToken
+ 69, // 34: google.pubsub.v1.PushConfig.pubsub_wrapper:type_name -> google.pubsub.v1.PushConfig.PubsubWrapper
+ 70, // 35: google.pubsub.v1.PushConfig.no_wrapper:type_name -> google.pubsub.v1.PushConfig.NoWrapper
+ 5, // 36: google.pubsub.v1.BigQueryConfig.state:type_name -> google.pubsub.v1.BigQueryConfig.State
+ 72, // 37: google.pubsub.v1.CloudStorageConfig.text_config:type_name -> google.pubsub.v1.CloudStorageConfig.TextConfig
+ 73, // 38: google.pubsub.v1.CloudStorageConfig.avro_config:type_name -> google.pubsub.v1.CloudStorageConfig.AvroConfig
+ 80, // 39: google.pubsub.v1.CloudStorageConfig.max_duration:type_name -> google.protobuf.Duration
+ 6, // 40: google.pubsub.v1.CloudStorageConfig.state:type_name -> google.pubsub.v1.CloudStorageConfig.State
+ 13, // 41: google.pubsub.v1.ReceivedMessage.message:type_name -> google.pubsub.v1.PubsubMessage
+ 27, // 42: google.pubsub.v1.UpdateSubscriptionRequest.subscription:type_name -> google.pubsub.v1.Subscription
+ 82, // 43: google.pubsub.v1.UpdateSubscriptionRequest.update_mask:type_name -> google.protobuf.FieldMask
+ 27, // 44: google.pubsub.v1.ListSubscriptionsResponse.subscriptions:type_name -> google.pubsub.v1.Subscription
+ 31, // 45: google.pubsub.v1.ModifyPushConfigRequest.push_config:type_name -> google.pubsub.v1.PushConfig
+ 34, // 46: google.pubsub.v1.PullResponse.received_messages:type_name -> google.pubsub.v1.ReceivedMessage
+ 34, // 47: google.pubsub.v1.StreamingPullResponse.received_messages:type_name -> google.pubsub.v1.ReceivedMessage
+ 74, // 48: google.pubsub.v1.StreamingPullResponse.acknowledge_confirmation:type_name -> google.pubsub.v1.StreamingPullResponse.AcknowledgeConfirmation
+ 75, // 49: google.pubsub.v1.StreamingPullResponse.modify_ack_deadline_confirmation:type_name -> google.pubsub.v1.StreamingPullResponse.ModifyAckDeadlineConfirmation
+ 76, // 50: google.pubsub.v1.StreamingPullResponse.subscription_properties:type_name -> google.pubsub.v1.StreamingPullResponse.SubscriptionProperties
+ 77, // 51: google.pubsub.v1.CreateSnapshotRequest.labels:type_name -> google.pubsub.v1.CreateSnapshotRequest.LabelsEntry
+ 49, // 52: google.pubsub.v1.UpdateSnapshotRequest.snapshot:type_name -> google.pubsub.v1.Snapshot
+ 82, // 53: google.pubsub.v1.UpdateSnapshotRequest.update_mask:type_name -> google.protobuf.FieldMask
+ 81, // 54: google.pubsub.v1.Snapshot.expire_time:type_name -> google.protobuf.Timestamp
+ 78, // 55: google.pubsub.v1.Snapshot.labels:type_name -> google.pubsub.v1.Snapshot.LabelsEntry
+ 49, // 56: google.pubsub.v1.ListSnapshotsResponse.snapshots:type_name -> google.pubsub.v1.Snapshot
+ 81, // 57: google.pubsub.v1.SeekRequest.time:type_name -> google.protobuf.Timestamp
+ 0, // 58: google.pubsub.v1.IngestionDataSourceSettings.AwsKinesis.state:type_name -> google.pubsub.v1.IngestionDataSourceSettings.AwsKinesis.State
+ 1, // 59: google.pubsub.v1.IngestionDataSourceSettings.CloudStorage.state:type_name -> google.pubsub.v1.IngestionDataSourceSettings.CloudStorage.State
+ 58, // 60: google.pubsub.v1.IngestionDataSourceSettings.CloudStorage.text_format:type_name -> google.pubsub.v1.IngestionDataSourceSettings.CloudStorage.TextFormat
+ 59, // 61: google.pubsub.v1.IngestionDataSourceSettings.CloudStorage.avro_format:type_name -> google.pubsub.v1.IngestionDataSourceSettings.CloudStorage.AvroFormat
+ 60, // 62: google.pubsub.v1.IngestionDataSourceSettings.CloudStorage.pubsub_avro_format:type_name -> google.pubsub.v1.IngestionDataSourceSettings.CloudStorage.PubSubAvroFormat
+ 81, // 63: google.pubsub.v1.IngestionDataSourceSettings.CloudStorage.minimum_object_create_time:type_name -> google.protobuf.Timestamp
+ 62, // 64: google.pubsub.v1.IngestionFailureEvent.CloudStorageFailure.avro_failure_reason:type_name -> google.pubsub.v1.IngestionFailureEvent.AvroFailureReason
+ 61, // 65: google.pubsub.v1.IngestionFailureEvent.CloudStorageFailure.api_violation_reason:type_name -> google.pubsub.v1.IngestionFailureEvent.ApiViolationReason
+ 12, // 66: google.pubsub.v1.Publisher.CreateTopic:input_type -> google.pubsub.v1.Topic
+ 15, // 67: google.pubsub.v1.Publisher.UpdateTopic:input_type -> google.pubsub.v1.UpdateTopicRequest
+ 16, // 68: google.pubsub.v1.Publisher.Publish:input_type -> google.pubsub.v1.PublishRequest
+ 14, // 69: google.pubsub.v1.Publisher.GetTopic:input_type -> google.pubsub.v1.GetTopicRequest
+ 18, // 70: google.pubsub.v1.Publisher.ListTopics:input_type -> google.pubsub.v1.ListTopicsRequest
+ 20, // 71: google.pubsub.v1.Publisher.ListTopicSubscriptions:input_type -> google.pubsub.v1.ListTopicSubscriptionsRequest
+ 22, // 72: google.pubsub.v1.Publisher.ListTopicSnapshots:input_type -> google.pubsub.v1.ListTopicSnapshotsRequest
+ 24, // 73: google.pubsub.v1.Publisher.DeleteTopic:input_type -> google.pubsub.v1.DeleteTopicRequest
+ 25, // 74: google.pubsub.v1.Publisher.DetachSubscription:input_type -> google.pubsub.v1.DetachSubscriptionRequest
+ 27, // 75: google.pubsub.v1.Subscriber.CreateSubscription:input_type -> google.pubsub.v1.Subscription
+ 35, // 76: google.pubsub.v1.Subscriber.GetSubscription:input_type -> google.pubsub.v1.GetSubscriptionRequest
+ 36, // 77: google.pubsub.v1.Subscriber.UpdateSubscription:input_type -> google.pubsub.v1.UpdateSubscriptionRequest
+ 37, // 78: google.pubsub.v1.Subscriber.ListSubscriptions:input_type -> google.pubsub.v1.ListSubscriptionsRequest
+ 39, // 79: google.pubsub.v1.Subscriber.DeleteSubscription:input_type -> google.pubsub.v1.DeleteSubscriptionRequest
+ 43, // 80: google.pubsub.v1.Subscriber.ModifyAckDeadline:input_type -> google.pubsub.v1.ModifyAckDeadlineRequest
+ 44, // 81: google.pubsub.v1.Subscriber.Acknowledge:input_type -> google.pubsub.v1.AcknowledgeRequest
+ 41, // 82: google.pubsub.v1.Subscriber.Pull:input_type -> google.pubsub.v1.PullRequest
+ 45, // 83: google.pubsub.v1.Subscriber.StreamingPull:input_type -> google.pubsub.v1.StreamingPullRequest
+ 40, // 84: google.pubsub.v1.Subscriber.ModifyPushConfig:input_type -> google.pubsub.v1.ModifyPushConfigRequest
+ 50, // 85: google.pubsub.v1.Subscriber.GetSnapshot:input_type -> google.pubsub.v1.GetSnapshotRequest
+ 51, // 86: google.pubsub.v1.Subscriber.ListSnapshots:input_type -> google.pubsub.v1.ListSnapshotsRequest
+ 47, // 87: google.pubsub.v1.Subscriber.CreateSnapshot:input_type -> google.pubsub.v1.CreateSnapshotRequest
+ 48, // 88: google.pubsub.v1.Subscriber.UpdateSnapshot:input_type -> google.pubsub.v1.UpdateSnapshotRequest
+ 53, // 89: google.pubsub.v1.Subscriber.DeleteSnapshot:input_type -> google.pubsub.v1.DeleteSnapshotRequest
+ 54, // 90: google.pubsub.v1.Subscriber.Seek:input_type -> google.pubsub.v1.SeekRequest
+ 12, // 91: google.pubsub.v1.Publisher.CreateTopic:output_type -> google.pubsub.v1.Topic
+ 12, // 92: google.pubsub.v1.Publisher.UpdateTopic:output_type -> google.pubsub.v1.Topic
+ 17, // 93: google.pubsub.v1.Publisher.Publish:output_type -> google.pubsub.v1.PublishResponse
+ 12, // 94: google.pubsub.v1.Publisher.GetTopic:output_type -> google.pubsub.v1.Topic
+ 19, // 95: google.pubsub.v1.Publisher.ListTopics:output_type -> google.pubsub.v1.ListTopicsResponse
+ 21, // 96: google.pubsub.v1.Publisher.ListTopicSubscriptions:output_type -> google.pubsub.v1.ListTopicSubscriptionsResponse
+ 23, // 97: google.pubsub.v1.Publisher.ListTopicSnapshots:output_type -> google.pubsub.v1.ListTopicSnapshotsResponse
+ 83, // 98: google.pubsub.v1.Publisher.DeleteTopic:output_type -> google.protobuf.Empty
+ 26, // 99: google.pubsub.v1.Publisher.DetachSubscription:output_type -> google.pubsub.v1.DetachSubscriptionResponse
+ 27, // 100: google.pubsub.v1.Subscriber.CreateSubscription:output_type -> google.pubsub.v1.Subscription
+ 27, // 101: google.pubsub.v1.Subscriber.GetSubscription:output_type -> google.pubsub.v1.Subscription
+ 27, // 102: google.pubsub.v1.Subscriber.UpdateSubscription:output_type -> google.pubsub.v1.Subscription
+ 38, // 103: google.pubsub.v1.Subscriber.ListSubscriptions:output_type -> google.pubsub.v1.ListSubscriptionsResponse
+ 83, // 104: google.pubsub.v1.Subscriber.DeleteSubscription:output_type -> google.protobuf.Empty
+ 83, // 105: google.pubsub.v1.Subscriber.ModifyAckDeadline:output_type -> google.protobuf.Empty
+ 83, // 106: google.pubsub.v1.Subscriber.Acknowledge:output_type -> google.protobuf.Empty
+ 42, // 107: google.pubsub.v1.Subscriber.Pull:output_type -> google.pubsub.v1.PullResponse
+ 46, // 108: google.pubsub.v1.Subscriber.StreamingPull:output_type -> google.pubsub.v1.StreamingPullResponse
+ 83, // 109: google.pubsub.v1.Subscriber.ModifyPushConfig:output_type -> google.protobuf.Empty
+ 49, // 110: google.pubsub.v1.Subscriber.GetSnapshot:output_type -> google.pubsub.v1.Snapshot
+ 52, // 111: google.pubsub.v1.Subscriber.ListSnapshots:output_type -> google.pubsub.v1.ListSnapshotsResponse
+ 49, // 112: google.pubsub.v1.Subscriber.CreateSnapshot:output_type -> google.pubsub.v1.Snapshot
+ 49, // 113: google.pubsub.v1.Subscriber.UpdateSnapshot:output_type -> google.pubsub.v1.Snapshot
+ 83, // 114: google.pubsub.v1.Subscriber.DeleteSnapshot:output_type -> google.protobuf.Empty
+ 55, // 115: google.pubsub.v1.Subscriber.Seek:output_type -> google.pubsub.v1.SeekResponse
+ 91, // [91:116] is the sub-list for method output_type
+ 66, // [66:91] is the sub-list for method input_type
+ 66, // [66:66] is the sub-list for extension type_name
+ 66, // [66:66] is the sub-list for extension extendee
+ 0, // [0:66] is the sub-list for field type_name
}
func init() { file_google_pubsub_v1_pubsub_proto_init() }
@@ -5996,7 +7018,7 @@ func file_google_pubsub_v1_pubsub_proto_init() {
}
}
file_google_pubsub_v1_pubsub_proto_msgTypes[3].Exporter = func(v any, i int) any {
- switch v := v.(*Topic); i {
+ switch v := v.(*PlatformLogsSettings); i {
case 0:
return &v.state
case 1:
@@ -6008,7 +7030,7 @@ func file_google_pubsub_v1_pubsub_proto_init() {
}
}
file_google_pubsub_v1_pubsub_proto_msgTypes[4].Exporter = func(v any, i int) any {
- switch v := v.(*PubsubMessage); i {
+ switch v := v.(*IngestionFailureEvent); i {
case 0:
return &v.state
case 1:
@@ -6020,7 +7042,7 @@ func file_google_pubsub_v1_pubsub_proto_init() {
}
}
file_google_pubsub_v1_pubsub_proto_msgTypes[5].Exporter = func(v any, i int) any {
- switch v := v.(*GetTopicRequest); i {
+ switch v := v.(*Topic); i {
case 0:
return &v.state
case 1:
@@ -6032,7 +7054,7 @@ func file_google_pubsub_v1_pubsub_proto_init() {
}
}
file_google_pubsub_v1_pubsub_proto_msgTypes[6].Exporter = func(v any, i int) any {
- switch v := v.(*UpdateTopicRequest); i {
+ switch v := v.(*PubsubMessage); i {
case 0:
return &v.state
case 1:
@@ -6044,7 +7066,7 @@ func file_google_pubsub_v1_pubsub_proto_init() {
}
}
file_google_pubsub_v1_pubsub_proto_msgTypes[7].Exporter = func(v any, i int) any {
- switch v := v.(*PublishRequest); i {
+ switch v := v.(*GetTopicRequest); i {
case 0:
return &v.state
case 1:
@@ -6056,7 +7078,7 @@ func file_google_pubsub_v1_pubsub_proto_init() {
}
}
file_google_pubsub_v1_pubsub_proto_msgTypes[8].Exporter = func(v any, i int) any {
- switch v := v.(*PublishResponse); i {
+ switch v := v.(*UpdateTopicRequest); i {
case 0:
return &v.state
case 1:
@@ -6068,7 +7090,7 @@ func file_google_pubsub_v1_pubsub_proto_init() {
}
}
file_google_pubsub_v1_pubsub_proto_msgTypes[9].Exporter = func(v any, i int) any {
- switch v := v.(*ListTopicsRequest); i {
+ switch v := v.(*PublishRequest); i {
case 0:
return &v.state
case 1:
@@ -6080,7 +7102,7 @@ func file_google_pubsub_v1_pubsub_proto_init() {
}
}
file_google_pubsub_v1_pubsub_proto_msgTypes[10].Exporter = func(v any, i int) any {
- switch v := v.(*ListTopicsResponse); i {
+ switch v := v.(*PublishResponse); i {
case 0:
return &v.state
case 1:
@@ -6092,7 +7114,7 @@ func file_google_pubsub_v1_pubsub_proto_init() {
}
}
file_google_pubsub_v1_pubsub_proto_msgTypes[11].Exporter = func(v any, i int) any {
- switch v := v.(*ListTopicSubscriptionsRequest); i {
+ switch v := v.(*ListTopicsRequest); i {
case 0:
return &v.state
case 1:
@@ -6104,7 +7126,7 @@ func file_google_pubsub_v1_pubsub_proto_init() {
}
}
file_google_pubsub_v1_pubsub_proto_msgTypes[12].Exporter = func(v any, i int) any {
- switch v := v.(*ListTopicSubscriptionsResponse); i {
+ switch v := v.(*ListTopicsResponse); i {
case 0:
return &v.state
case 1:
@@ -6116,7 +7138,7 @@ func file_google_pubsub_v1_pubsub_proto_init() {
}
}
file_google_pubsub_v1_pubsub_proto_msgTypes[13].Exporter = func(v any, i int) any {
- switch v := v.(*ListTopicSnapshotsRequest); i {
+ switch v := v.(*ListTopicSubscriptionsRequest); i {
case 0:
return &v.state
case 1:
@@ -6128,7 +7150,7 @@ func file_google_pubsub_v1_pubsub_proto_init() {
}
}
file_google_pubsub_v1_pubsub_proto_msgTypes[14].Exporter = func(v any, i int) any {
- switch v := v.(*ListTopicSnapshotsResponse); i {
+ switch v := v.(*ListTopicSubscriptionsResponse); i {
case 0:
return &v.state
case 1:
@@ -6140,7 +7162,7 @@ func file_google_pubsub_v1_pubsub_proto_init() {
}
}
file_google_pubsub_v1_pubsub_proto_msgTypes[15].Exporter = func(v any, i int) any {
- switch v := v.(*DeleteTopicRequest); i {
+ switch v := v.(*ListTopicSnapshotsRequest); i {
case 0:
return &v.state
case 1:
@@ -6152,7 +7174,7 @@ func file_google_pubsub_v1_pubsub_proto_init() {
}
}
file_google_pubsub_v1_pubsub_proto_msgTypes[16].Exporter = func(v any, i int) any {
- switch v := v.(*DetachSubscriptionRequest); i {
+ switch v := v.(*ListTopicSnapshotsResponse); i {
case 0:
return &v.state
case 1:
@@ -6164,7 +7186,7 @@ func file_google_pubsub_v1_pubsub_proto_init() {
}
}
file_google_pubsub_v1_pubsub_proto_msgTypes[17].Exporter = func(v any, i int) any {
- switch v := v.(*DetachSubscriptionResponse); i {
+ switch v := v.(*DeleteTopicRequest); i {
case 0:
return &v.state
case 1:
@@ -6176,7 +7198,7 @@ func file_google_pubsub_v1_pubsub_proto_init() {
}
}
file_google_pubsub_v1_pubsub_proto_msgTypes[18].Exporter = func(v any, i int) any {
- switch v := v.(*Subscription); i {
+ switch v := v.(*DetachSubscriptionRequest); i {
case 0:
return &v.state
case 1:
@@ -6188,7 +7210,7 @@ func file_google_pubsub_v1_pubsub_proto_init() {
}
}
file_google_pubsub_v1_pubsub_proto_msgTypes[19].Exporter = func(v any, i int) any {
- switch v := v.(*RetryPolicy); i {
+ switch v := v.(*DetachSubscriptionResponse); i {
case 0:
return &v.state
case 1:
@@ -6200,7 +7222,7 @@ func file_google_pubsub_v1_pubsub_proto_init() {
}
}
file_google_pubsub_v1_pubsub_proto_msgTypes[20].Exporter = func(v any, i int) any {
- switch v := v.(*DeadLetterPolicy); i {
+ switch v := v.(*Subscription); i {
case 0:
return &v.state
case 1:
@@ -6212,7 +7234,7 @@ func file_google_pubsub_v1_pubsub_proto_init() {
}
}
file_google_pubsub_v1_pubsub_proto_msgTypes[21].Exporter = func(v any, i int) any {
- switch v := v.(*ExpirationPolicy); i {
+ switch v := v.(*RetryPolicy); i {
case 0:
return &v.state
case 1:
@@ -6224,7 +7246,7 @@ func file_google_pubsub_v1_pubsub_proto_init() {
}
}
file_google_pubsub_v1_pubsub_proto_msgTypes[22].Exporter = func(v any, i int) any {
- switch v := v.(*PushConfig); i {
+ switch v := v.(*DeadLetterPolicy); i {
case 0:
return &v.state
case 1:
@@ -6236,7 +7258,7 @@ func file_google_pubsub_v1_pubsub_proto_init() {
}
}
file_google_pubsub_v1_pubsub_proto_msgTypes[23].Exporter = func(v any, i int) any {
- switch v := v.(*BigQueryConfig); i {
+ switch v := v.(*ExpirationPolicy); i {
case 0:
return &v.state
case 1:
@@ -6248,7 +7270,7 @@ func file_google_pubsub_v1_pubsub_proto_init() {
}
}
file_google_pubsub_v1_pubsub_proto_msgTypes[24].Exporter = func(v any, i int) any {
- switch v := v.(*CloudStorageConfig); i {
+ switch v := v.(*PushConfig); i {
case 0:
return &v.state
case 1:
@@ -6260,7 +7282,7 @@ func file_google_pubsub_v1_pubsub_proto_init() {
}
}
file_google_pubsub_v1_pubsub_proto_msgTypes[25].Exporter = func(v any, i int) any {
- switch v := v.(*ReceivedMessage); i {
+ switch v := v.(*BigQueryConfig); i {
case 0:
return &v.state
case 1:
@@ -6272,7 +7294,7 @@ func file_google_pubsub_v1_pubsub_proto_init() {
}
}
file_google_pubsub_v1_pubsub_proto_msgTypes[26].Exporter = func(v any, i int) any {
- switch v := v.(*GetSubscriptionRequest); i {
+ switch v := v.(*CloudStorageConfig); i {
case 0:
return &v.state
case 1:
@@ -6284,7 +7306,7 @@ func file_google_pubsub_v1_pubsub_proto_init() {
}
}
file_google_pubsub_v1_pubsub_proto_msgTypes[27].Exporter = func(v any, i int) any {
- switch v := v.(*UpdateSubscriptionRequest); i {
+ switch v := v.(*ReceivedMessage); i {
case 0:
return &v.state
case 1:
@@ -6296,7 +7318,7 @@ func file_google_pubsub_v1_pubsub_proto_init() {
}
}
file_google_pubsub_v1_pubsub_proto_msgTypes[28].Exporter = func(v any, i int) any {
- switch v := v.(*ListSubscriptionsRequest); i {
+ switch v := v.(*GetSubscriptionRequest); i {
case 0:
return &v.state
case 1:
@@ -6308,7 +7330,7 @@ func file_google_pubsub_v1_pubsub_proto_init() {
}
}
file_google_pubsub_v1_pubsub_proto_msgTypes[29].Exporter = func(v any, i int) any {
- switch v := v.(*ListSubscriptionsResponse); i {
+ switch v := v.(*UpdateSubscriptionRequest); i {
case 0:
return &v.state
case 1:
@@ -6320,7 +7342,7 @@ func file_google_pubsub_v1_pubsub_proto_init() {
}
}
file_google_pubsub_v1_pubsub_proto_msgTypes[30].Exporter = func(v any, i int) any {
- switch v := v.(*DeleteSubscriptionRequest); i {
+ switch v := v.(*ListSubscriptionsRequest); i {
case 0:
return &v.state
case 1:
@@ -6332,7 +7354,7 @@ func file_google_pubsub_v1_pubsub_proto_init() {
}
}
file_google_pubsub_v1_pubsub_proto_msgTypes[31].Exporter = func(v any, i int) any {
- switch v := v.(*ModifyPushConfigRequest); i {
+ switch v := v.(*ListSubscriptionsResponse); i {
case 0:
return &v.state
case 1:
@@ -6344,7 +7366,7 @@ func file_google_pubsub_v1_pubsub_proto_init() {
}
}
file_google_pubsub_v1_pubsub_proto_msgTypes[32].Exporter = func(v any, i int) any {
- switch v := v.(*PullRequest); i {
+ switch v := v.(*DeleteSubscriptionRequest); i {
case 0:
return &v.state
case 1:
@@ -6356,7 +7378,7 @@ func file_google_pubsub_v1_pubsub_proto_init() {
}
}
file_google_pubsub_v1_pubsub_proto_msgTypes[33].Exporter = func(v any, i int) any {
- switch v := v.(*PullResponse); i {
+ switch v := v.(*ModifyPushConfigRequest); i {
case 0:
return &v.state
case 1:
@@ -6368,7 +7390,7 @@ func file_google_pubsub_v1_pubsub_proto_init() {
}
}
file_google_pubsub_v1_pubsub_proto_msgTypes[34].Exporter = func(v any, i int) any {
- switch v := v.(*ModifyAckDeadlineRequest); i {
+ switch v := v.(*PullRequest); i {
case 0:
return &v.state
case 1:
@@ -6380,7 +7402,7 @@ func file_google_pubsub_v1_pubsub_proto_init() {
}
}
file_google_pubsub_v1_pubsub_proto_msgTypes[35].Exporter = func(v any, i int) any {
- switch v := v.(*AcknowledgeRequest); i {
+ switch v := v.(*PullResponse); i {
case 0:
return &v.state
case 1:
@@ -6392,7 +7414,7 @@ func file_google_pubsub_v1_pubsub_proto_init() {
}
}
file_google_pubsub_v1_pubsub_proto_msgTypes[36].Exporter = func(v any, i int) any {
- switch v := v.(*StreamingPullRequest); i {
+ switch v := v.(*ModifyAckDeadlineRequest); i {
case 0:
return &v.state
case 1:
@@ -6404,7 +7426,7 @@ func file_google_pubsub_v1_pubsub_proto_init() {
}
}
file_google_pubsub_v1_pubsub_proto_msgTypes[37].Exporter = func(v any, i int) any {
- switch v := v.(*StreamingPullResponse); i {
+ switch v := v.(*AcknowledgeRequest); i {
case 0:
return &v.state
case 1:
@@ -6416,7 +7438,7 @@ func file_google_pubsub_v1_pubsub_proto_init() {
}
}
file_google_pubsub_v1_pubsub_proto_msgTypes[38].Exporter = func(v any, i int) any {
- switch v := v.(*CreateSnapshotRequest); i {
+ switch v := v.(*StreamingPullRequest); i {
case 0:
return &v.state
case 1:
@@ -6428,7 +7450,7 @@ func file_google_pubsub_v1_pubsub_proto_init() {
}
}
file_google_pubsub_v1_pubsub_proto_msgTypes[39].Exporter = func(v any, i int) any {
- switch v := v.(*UpdateSnapshotRequest); i {
+ switch v := v.(*StreamingPullResponse); i {
case 0:
return &v.state
case 1:
@@ -6440,7 +7462,7 @@ func file_google_pubsub_v1_pubsub_proto_init() {
}
}
file_google_pubsub_v1_pubsub_proto_msgTypes[40].Exporter = func(v any, i int) any {
- switch v := v.(*Snapshot); i {
+ switch v := v.(*CreateSnapshotRequest); i {
case 0:
return &v.state
case 1:
@@ -6452,7 +7474,7 @@ func file_google_pubsub_v1_pubsub_proto_init() {
}
}
file_google_pubsub_v1_pubsub_proto_msgTypes[41].Exporter = func(v any, i int) any {
- switch v := v.(*GetSnapshotRequest); i {
+ switch v := v.(*UpdateSnapshotRequest); i {
case 0:
return &v.state
case 1:
@@ -6464,7 +7486,7 @@ func file_google_pubsub_v1_pubsub_proto_init() {
}
}
file_google_pubsub_v1_pubsub_proto_msgTypes[42].Exporter = func(v any, i int) any {
- switch v := v.(*ListSnapshotsRequest); i {
+ switch v := v.(*Snapshot); i {
case 0:
return &v.state
case 1:
@@ -6476,7 +7498,7 @@ func file_google_pubsub_v1_pubsub_proto_init() {
}
}
file_google_pubsub_v1_pubsub_proto_msgTypes[43].Exporter = func(v any, i int) any {
- switch v := v.(*ListSnapshotsResponse); i {
+ switch v := v.(*GetSnapshotRequest); i {
case 0:
return &v.state
case 1:
@@ -6488,7 +7510,7 @@ func file_google_pubsub_v1_pubsub_proto_init() {
}
}
file_google_pubsub_v1_pubsub_proto_msgTypes[44].Exporter = func(v any, i int) any {
- switch v := v.(*DeleteSnapshotRequest); i {
+ switch v := v.(*ListSnapshotsRequest); i {
case 0:
return &v.state
case 1:
@@ -6500,7 +7522,7 @@ func file_google_pubsub_v1_pubsub_proto_init() {
}
}
file_google_pubsub_v1_pubsub_proto_msgTypes[45].Exporter = func(v any, i int) any {
- switch v := v.(*SeekRequest); i {
+ switch v := v.(*ListSnapshotsResponse); i {
case 0:
return &v.state
case 1:
@@ -6512,7 +7534,7 @@ func file_google_pubsub_v1_pubsub_proto_init() {
}
}
file_google_pubsub_v1_pubsub_proto_msgTypes[46].Exporter = func(v any, i int) any {
- switch v := v.(*SeekResponse); i {
+ switch v := v.(*DeleteSnapshotRequest); i {
case 0:
return &v.state
case 1:
@@ -6524,6 +7546,30 @@ func file_google_pubsub_v1_pubsub_proto_init() {
}
}
file_google_pubsub_v1_pubsub_proto_msgTypes[47].Exporter = func(v any, i int) any {
+ switch v := v.(*SeekRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_pubsub_v1_pubsub_proto_msgTypes[48].Exporter = func(v any, i int) any {
+ switch v := v.(*SeekResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_pubsub_v1_pubsub_proto_msgTypes[49].Exporter = func(v any, i int) any {
switch v := v.(*IngestionDataSourceSettings_AwsKinesis); i {
case 0:
return &v.state
@@ -6535,8 +7581,20 @@ func file_google_pubsub_v1_pubsub_proto_init() {
return nil
}
}
+ file_google_pubsub_v1_pubsub_proto_msgTypes[50].Exporter = func(v any, i int) any {
+ switch v := v.(*IngestionDataSourceSettings_CloudStorage); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
file_google_pubsub_v1_pubsub_proto_msgTypes[51].Exporter = func(v any, i int) any {
- switch v := v.(*PushConfig_OidcToken); i {
+ switch v := v.(*IngestionDataSourceSettings_CloudStorage_TextFormat); i {
case 0:
return &v.state
case 1:
@@ -6548,7 +7606,7 @@ func file_google_pubsub_v1_pubsub_proto_init() {
}
}
file_google_pubsub_v1_pubsub_proto_msgTypes[52].Exporter = func(v any, i int) any {
- switch v := v.(*PushConfig_PubsubWrapper); i {
+ switch v := v.(*IngestionDataSourceSettings_CloudStorage_AvroFormat); i {
case 0:
return &v.state
case 1:
@@ -6560,7 +7618,19 @@ func file_google_pubsub_v1_pubsub_proto_init() {
}
}
file_google_pubsub_v1_pubsub_proto_msgTypes[53].Exporter = func(v any, i int) any {
- switch v := v.(*PushConfig_NoWrapper); i {
+ switch v := v.(*IngestionDataSourceSettings_CloudStorage_PubSubAvroFormat); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_pubsub_v1_pubsub_proto_msgTypes[54].Exporter = func(v any, i int) any {
+ switch v := v.(*IngestionFailureEvent_ApiViolationReason); i {
case 0:
return &v.state
case 1:
@@ -6572,7 +7642,7 @@ func file_google_pubsub_v1_pubsub_proto_init() {
}
}
file_google_pubsub_v1_pubsub_proto_msgTypes[55].Exporter = func(v any, i int) any {
- switch v := v.(*CloudStorageConfig_TextConfig); i {
+ switch v := v.(*IngestionFailureEvent_AvroFailureReason); i {
case 0:
return &v.state
case 1:
@@ -6584,6 +7654,78 @@ func file_google_pubsub_v1_pubsub_proto_init() {
}
}
file_google_pubsub_v1_pubsub_proto_msgTypes[56].Exporter = func(v any, i int) any {
+ switch v := v.(*IngestionFailureEvent_CloudStorageFailure); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_pubsub_v1_pubsub_proto_msgTypes[59].Exporter = func(v any, i int) any {
+ switch v := v.(*Subscription_AnalyticsHubSubscriptionInfo); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_pubsub_v1_pubsub_proto_msgTypes[61].Exporter = func(v any, i int) any {
+ switch v := v.(*PushConfig_OidcToken); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_pubsub_v1_pubsub_proto_msgTypes[62].Exporter = func(v any, i int) any {
+ switch v := v.(*PushConfig_PubsubWrapper); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_pubsub_v1_pubsub_proto_msgTypes[63].Exporter = func(v any, i int) any {
+ switch v := v.(*PushConfig_NoWrapper); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_pubsub_v1_pubsub_proto_msgTypes[65].Exporter = func(v any, i int) any {
+ switch v := v.(*CloudStorageConfig_TextConfig); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_pubsub_v1_pubsub_proto_msgTypes[66].Exporter = func(v any, i int) any {
switch v := v.(*CloudStorageConfig_AvroConfig); i {
case 0:
return &v.state
@@ -6595,7 +7737,7 @@ func file_google_pubsub_v1_pubsub_proto_init() {
return nil
}
}
- file_google_pubsub_v1_pubsub_proto_msgTypes[57].Exporter = func(v any, i int) any {
+ file_google_pubsub_v1_pubsub_proto_msgTypes[67].Exporter = func(v any, i int) any {
switch v := v.(*StreamingPullResponse_AcknowledgeConfirmation); i {
case 0:
return &v.state
@@ -6607,7 +7749,7 @@ func file_google_pubsub_v1_pubsub_proto_init() {
return nil
}
}
- file_google_pubsub_v1_pubsub_proto_msgTypes[58].Exporter = func(v any, i int) any {
+ file_google_pubsub_v1_pubsub_proto_msgTypes[68].Exporter = func(v any, i int) any {
switch v := v.(*StreamingPullResponse_ModifyAckDeadlineConfirmation); i {
case 0:
return &v.state
@@ -6619,7 +7761,7 @@ func file_google_pubsub_v1_pubsub_proto_init() {
return nil
}
}
- file_google_pubsub_v1_pubsub_proto_msgTypes[59].Exporter = func(v any, i int) any {
+ file_google_pubsub_v1_pubsub_proto_msgTypes[69].Exporter = func(v any, i int) any {
switch v := v.(*StreamingPullResponse_SubscriptionProperties); i {
case 0:
return &v.state
@@ -6634,27 +7776,41 @@ func file_google_pubsub_v1_pubsub_proto_init() {
}
file_google_pubsub_v1_pubsub_proto_msgTypes[2].OneofWrappers = []any{
(*IngestionDataSourceSettings_AwsKinesis_)(nil),
+ (*IngestionDataSourceSettings_CloudStorage_)(nil),
+ }
+ file_google_pubsub_v1_pubsub_proto_msgTypes[4].OneofWrappers = []any{
+ (*IngestionFailureEvent_CloudStorageFailure_)(nil),
}
- file_google_pubsub_v1_pubsub_proto_msgTypes[22].OneofWrappers = []any{
+ file_google_pubsub_v1_pubsub_proto_msgTypes[24].OneofWrappers = []any{
(*PushConfig_OidcToken_)(nil),
(*PushConfig_PubsubWrapper_)(nil),
(*PushConfig_NoWrapper_)(nil),
}
- file_google_pubsub_v1_pubsub_proto_msgTypes[24].OneofWrappers = []any{
+ file_google_pubsub_v1_pubsub_proto_msgTypes[26].OneofWrappers = []any{
(*CloudStorageConfig_TextConfig_)(nil),
(*CloudStorageConfig_AvroConfig_)(nil),
}
- file_google_pubsub_v1_pubsub_proto_msgTypes[45].OneofWrappers = []any{
+ file_google_pubsub_v1_pubsub_proto_msgTypes[47].OneofWrappers = []any{
(*SeekRequest_Time)(nil),
(*SeekRequest_Snapshot)(nil),
}
+ file_google_pubsub_v1_pubsub_proto_msgTypes[50].OneofWrappers = []any{
+ (*IngestionDataSourceSettings_CloudStorage_TextFormat_)(nil),
+ (*IngestionDataSourceSettings_CloudStorage_AvroFormat_)(nil),
+ (*IngestionDataSourceSettings_CloudStorage_PubsubAvroFormat)(nil),
+ }
+ file_google_pubsub_v1_pubsub_proto_msgTypes[51].OneofWrappers = []any{}
+ file_google_pubsub_v1_pubsub_proto_msgTypes[56].OneofWrappers = []any{
+ (*IngestionFailureEvent_CloudStorageFailure_AvroFailureReason)(nil),
+ (*IngestionFailureEvent_CloudStorageFailure_ApiViolationReason)(nil),
+ }
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_google_pubsub_v1_pubsub_proto_rawDesc,
- NumEnums: 5,
- NumMessages: 62,
+ NumEnums: 7,
+ NumMessages: 72,
NumExtensions: 0,
NumServices: 2,
},
diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/schema_client.go b/vendor/cloud.google.com/go/pubsub/apiv1/schema_client.go
index 4013a77e9e4bc..18ea325b85289 100644
--- a/vendor/cloud.google.com/go/pubsub/apiv1/schema_client.go
+++ b/vendor/cloud.google.com/go/pubsub/apiv1/schema_client.go
@@ -69,6 +69,7 @@ func defaultSchemaGRPCClientOptions() []option.ClientOption {
internaloption.WithDefaultAudience("https://pubsub.googleapis.com/"),
internaloption.WithDefaultScopes(DefaultAuthScopes()...),
internaloption.EnableJwtWithScope(),
+ internaloption.EnableNewAuthLibrary(),
option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
grpc.MaxCallRecvMsgSize(math.MaxInt32))),
}
@@ -572,6 +573,7 @@ func defaultSchemaRESTClientOptions() []option.ClientOption {
internaloption.WithDefaultUniverseDomain("googleapis.com"),
internaloption.WithDefaultAudience("https://pubsub.googleapis.com/"),
internaloption.WithDefaultScopes(DefaultAuthScopes()...),
+ internaloption.EnableNewAuthLibrary(),
}
}
diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client.go b/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client.go
index 65d3ce16c3b2f..6b673129277ab 100644
--- a/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client.go
+++ b/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client.go
@@ -76,6 +76,7 @@ func defaultSubscriberGRPCClientOptions() []option.ClientOption {
internaloption.WithDefaultAudience("https://pubsub.googleapis.com/"),
internaloption.WithDefaultScopes(DefaultAuthScopes()...),
internaloption.EnableJwtWithScope(),
+ internaloption.EnableNewAuthLibrary(),
option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
grpc.MaxCallRecvMsgSize(math.MaxInt32))),
}
@@ -876,6 +877,7 @@ func defaultSubscriberRESTClientOptions() []option.ClientOption {
internaloption.WithDefaultUniverseDomain("googleapis.com"),
internaloption.WithDefaultAudience("https://pubsub.googleapis.com/"),
internaloption.WithDefaultScopes(DefaultAuthScopes()...),
+ internaloption.EnableNewAuthLibrary(),
}
}
diff --git a/vendor/cloud.google.com/go/pubsub/internal/version.go b/vendor/cloud.google.com/go/pubsub/internal/version.go
index f37b860085c8d..e3f747054984b 100644
--- a/vendor/cloud.google.com/go/pubsub/internal/version.go
+++ b/vendor/cloud.google.com/go/pubsub/internal/version.go
@@ -15,4 +15,4 @@
package internal
// Version is the current tagged release of the library.
-const Version = "1.42.0"
+const Version = "1.45.0"
diff --git a/vendor/cloud.google.com/go/pubsub/iterator.go b/vendor/cloud.google.com/go/pubsub/iterator.go
index 9f60359040fbb..4f791fa6c06ae 100644
--- a/vendor/cloud.google.com/go/pubsub/iterator.go
+++ b/vendor/cloud.google.com/go/pubsub/iterator.go
@@ -335,14 +335,17 @@ func (it *messageIterator) receive(maxToPull int32) ([]*Message, error) {
if m.Attributes != nil {
ctx = propagation.TraceContext{}.Extract(ctx, newMessageCarrier(m))
}
- attr := getSubscriberOpts(it.projectID, it.subID, m)
- _, span := startSpan(ctx, subscribeSpanName, it.subID, attr...)
- span.SetAttributes(
- attribute.Bool(eosAttribute, it.enableExactlyOnceDelivery),
- attribute.String(ackIDAttribute, ackID),
- semconv.MessagingBatchMessageCount(len(msgs)),
- semconv.CodeFunction("receive"),
+ opts := getSubscriberOpts(it.projectID, it.subID, m)
+ opts = append(
+ opts,
+ trace.WithAttributes(
+ attribute.Bool(eosAttribute, it.enableExactlyOnceDelivery),
+ attribute.String(ackIDAttribute, ackID),
+ semconv.MessagingBatchMessageCount(len(msgs)),
+ semconv.CodeFunction("receive"),
+ ),
)
+ _, span := startSpan(ctx, subscribeSpanName, it.subID, opts...)
// Always store the subscribe span, even if sampling isn't enabled.
// This is useful since we need to propagate the sampling flag
// to the callback in Receive, so traces have an unbroken sampling decision.
@@ -658,11 +661,16 @@ func (it *messageIterator) sendAck(m map[string]*AckResult) {
// Create the single ack span for this request, and for each
// message, add Subscribe<->Ack links.
opts := getCommonOptions(it.projectID, it.subID)
- opts = append(opts, trace.WithLinks(links...))
+ opts = append(
+ opts,
+ trace.WithLinks(links...),
+ trace.WithAttributes(
+ semconv.MessagingBatchMessageCount(len(ackIDs)),
+ semconv.CodeFunction("sendAck"),
+ ),
+ )
_, ackSpan := startSpan(context.Background(), ackSpanName, it.subID, opts...)
defer ackSpan.End()
- ackSpan.SetAttributes(semconv.MessagingBatchMessageCount(len(ackIDs)),
- semconv.CodeFunction("sendAck"))
if ackSpan.SpanContext().IsSampled() {
for _, s := range subscribeSpans {
s.AddLink(trace.Link{
@@ -740,16 +748,25 @@ func (it *messageIterator) sendModAck(m map[string]*AckResult, deadline time.Dur
// Create the single modack/nack span for this request, and for each
// message, add Subscribe<->Modack links.
opts := getCommonOptions(it.projectID, it.subID)
- opts = append(opts, trace.WithLinks(links...))
- _, mSpan := startSpan(context.Background(), spanName, it.subID, opts...)
- defer mSpan.End()
+ opts = append(
+ opts,
+ trace.WithLinks(links...),
+ trace.WithAttributes(
+ semconv.MessagingBatchMessageCount(len(ackIDs)),
+ semconv.CodeFunction("sendModAck"),
+ ),
+ )
if !isNack {
- mSpan.SetAttributes(
- semconv.MessagingGCPPubsubMessageAckDeadline(int(deadlineSec)),
- attribute.Bool(receiptModackAttribute, isReceipt))
+ opts = append(
+ opts,
+ trace.WithAttributes(
+ semconv.MessagingGCPPubsubMessageAckDeadline(int(deadlineSec)),
+ attribute.Bool(receiptModackAttribute, isReceipt),
+ ),
+ )
}
- mSpan.SetAttributes(semconv.MessagingBatchMessageCount(len(ackIDs)),
- semconv.CodeFunction("sendModAck"))
+ _, mSpan := startSpan(context.Background(), spanName, it.subID, opts...)
+ defer mSpan.End()
if mSpan.SpanContext().IsSampled() {
for _, s := range subscribeSpans {
s.AddLink(trace.Link{
diff --git a/vendor/cloud.google.com/go/pubsub/pullstream.go b/vendor/cloud.google.com/go/pubsub/pullstream.go
index c5ea8f510af82..231e5a64ada9d 100644
--- a/vendor/cloud.google.com/go/pubsub/pullstream.go
+++ b/vendor/cloud.google.com/go/pubsub/pullstream.go
@@ -31,8 +31,9 @@ import (
// the stream on a retryable error.
type pullStream struct {
ctx context.Context
- open func() (pb.Subscriber_StreamingPullClient, error)
- cancel context.CancelFunc
+ cancel context.CancelFunc // cancel function of the context above
+ open func() (pb.Subscriber_StreamingPullClient, context.CancelFunc, error)
+ close context.CancelFunc // cancel function to close down the currently open stream
mu sync.Mutex
spc *pb.Subscriber_StreamingPullClient
@@ -50,8 +51,9 @@ func newPullStream(ctx context.Context, streamingPull streamingPullFunc, subName
return &pullStream{
ctx: ctx,
cancel: cancel,
- open: func() (pb.Subscriber_StreamingPullClient, error) {
- spc, err := streamingPull(ctx, gax.WithGRPCOptions(grpc.MaxCallRecvMsgSize(maxSendRecvBytes)))
+ open: func() (pb.Subscriber_StreamingPullClient, context.CancelFunc, error) {
+ sctx, close := context.WithCancel(ctx)
+ spc, err := streamingPull(sctx, gax.WithGRPCOptions(grpc.MaxCallRecvMsgSize(maxSendRecvBytes)))
if err == nil {
recordStat(ctx, StreamRequestCount, 1)
streamAckDeadline := int32(maxDurationPerLeaseExtension / time.Second)
@@ -69,9 +71,10 @@ func newPullStream(ctx context.Context, streamingPull streamingPullFunc, subName
})
}
if err != nil {
- return nil, err
+ close()
+ return nil, nil, err
}
- return spc, nil
+ return spc, close, nil
},
}
}
@@ -100,29 +103,33 @@ func (s *pullStream) get(spc *pb.Subscriber_StreamingPullClient) (*pb.Subscriber
if spc != s.spc {
return s.spc, nil
}
+ // we are about to open a new stream: if necessary, make sure the previous one is closed
+ if s.close != nil {
+ s.close()
+ }
// Either this is the very first call on this stream (s.spc == nil), or we have a valid
// retry request. Either way, open a new stream.
// The lock is held here for a long time, but it doesn't matter because no callers could get
// anything done anyway.
s.spc = new(pb.Subscriber_StreamingPullClient)
- *s.spc, s.err = s.openWithRetry() // Any error from openWithRetry is permanent.
+ *s.spc, s.close, s.err = s.openWithRetry() // Any error from openWithRetry is permanent.
return s.spc, s.err
}
-func (s *pullStream) openWithRetry() (pb.Subscriber_StreamingPullClient, error) {
+func (s *pullStream) openWithRetry() (pb.Subscriber_StreamingPullClient, context.CancelFunc, error) {
r := defaultRetryer{}
for {
recordStat(s.ctx, StreamOpenCount, 1)
- spc, err := s.open()
+ spc, close, err := s.open()
bo, shouldRetry := r.Retry(err)
if err != nil && shouldRetry {
recordStat(s.ctx, StreamRetryCount, 1)
if err := gax.Sleep(s.ctx, bo); err != nil {
- return nil, err
+ return nil, nil, err
}
continue
}
- return spc, err
+ return spc, close, err
}
}
diff --git a/vendor/cloud.google.com/go/pubsub/topic.go b/vendor/cloud.google.com/go/pubsub/topic.go
index 1991fa7f03ce8..bb916f5290565 100644
--- a/vendor/cloud.google.com/go/pubsub/topic.go
+++ b/vendor/cloud.google.com/go/pubsub/topic.go
@@ -44,6 +44,7 @@ import (
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/durationpb"
fmpb "google.golang.org/protobuf/types/known/fieldmaskpb"
+ "google.golang.org/protobuf/types/known/timestamppb"
)
const (
@@ -350,6 +351,9 @@ type TopicConfigToUpdate struct {
// IngestionDataSourceSettings are settings for ingestion from a
// data source into this topic.
//
+ // When changing this value, the entire data source settings object must be applied,
+ // rather than just the differences. This includes the source and logging settings.
+ //
// Use the zero value &IngestionDataSourceSettings{} to remove the ingestion settings from the topic.
IngestionDataSourceSettings *IngestionDataSourceSettings
}
@@ -425,6 +429,8 @@ func messageStoragePolicyToProto(msp *MessageStoragePolicy) *pb.MessageStoragePo
// IngestionDataSourceSettings enables ingestion from a data source into this topic.
type IngestionDataSourceSettings struct {
Source IngestionDataSource
+
+ PlatformLogsSettings *PlatformLogsSettings
}
// IngestionDataSource is the kind of ingestion source to be used.
@@ -495,6 +501,97 @@ func (i *IngestionDataSourceAWSKinesis) isIngestionDataSource() bool {
return true
}
+// CloudStorageIngestionState denotes the possible states for ingestion from Cloud Storage.
+type CloudStorageIngestionState int
+
+const (
+ // CloudStorageIngestionStateUnspecified is the default value. This value is unused.
+ CloudStorageIngestionStateUnspecified = iota
+
+ // CloudStorageIngestionStateActive means ingestion is active.
+ CloudStorageIngestionStateActive
+
+ // CloudStorageIngestionPermissionDenied means encountering an error while calling the Cloud Storage API.
+ // This can happen if the Pub/Sub SA has not been granted the
+ // [appropriate permissions](https://cloud.google.com/storage/docs/access-control/iam-permissions):
+ // - storage.objects.list: to list the objects in a bucket.
+ // - storage.objects.get: to read the objects in a bucket.
+ // - storage.buckets.get: to verify the bucket exists.
+ CloudStorageIngestionPermissionDenied
+
+ // CloudStorageIngestionPublishPermissionDenied means encountering an error when publishing to the topic.
+ // This can happen if the Pub/Sub SA has not been granted the [appropriate publish
+ // permissions](https://cloud.google.com/pubsub/docs/access-control#pubsub.publisher)
+ CloudStorageIngestionPublishPermissionDenied
+
+ // CloudStorageIngestionBucketNotFound means the provided bucket doesn't exist.
+ CloudStorageIngestionBucketNotFound
+
+ // CloudStorageIngestionTooManyObjects means the bucket has too many objects, ingestion will be paused.
+ CloudStorageIngestionTooManyObjects
+)
+
+// IngestionDataSourceCloudStorage are ingestion settings for Cloud Storage.
+type IngestionDataSourceCloudStorage struct {
+ // State is an output-only field indicating the state of the Cloud storage ingestion source.
+ State CloudStorageIngestionState
+
+ // Bucket is the Cloud Storage bucket. The bucket name must be without any
+ // prefix like "gs://". See the bucket naming requirements (https://cloud.google.com/storage/docs/buckets#naming).
+ Bucket string
+
+ // InputFormat is the format of objects in Cloud Storage.
+ // Defaults to TextFormat.
+ InputFormat ingestionDataSourceCloudStorageInputFormat
+
+ // MinimumObjectCreateTime means objects with a larger or equal creation timestamp will be
+ // ingested.
+ MinimumObjectCreateTime time.Time
+
+ // MatchGlob is the pattern used to match objects that will be ingested. If
+ // empty, all objects will be ingested. See the [supported
+ // patterns](https://cloud.google.com/storage/docs/json_api/v1/objects/list#list-objects-and-prefixes-using-glob).
+ MatchGlob string
+}
+
+var _ IngestionDataSource = (*IngestionDataSourceCloudStorage)(nil)
+
+func (i *IngestionDataSourceCloudStorage) isIngestionDataSource() bool {
+ return true
+}
+
+type ingestionDataSourceCloudStorageInputFormat interface {
+ isCloudStorageIngestionInputFormat() bool
+}
+
+var _ ingestionDataSourceCloudStorageInputFormat = (*IngestionDataSourceCloudStorageTextFormat)(nil)
+var _ ingestionDataSourceCloudStorageInputFormat = (*IngestionDataSourceCloudStorageAvroFormat)(nil)
+var _ ingestionDataSourceCloudStorageInputFormat = (*IngestionDataSourceCloudStoragePubSubAvroFormat)(nil)
+
+// IngestionDataSourceCloudStorageTextFormat means Cloud Storage data will be interpreted as text.
+type IngestionDataSourceCloudStorageTextFormat struct {
+ Delimiter string
+}
+
+func (i *IngestionDataSourceCloudStorageTextFormat) isCloudStorageIngestionInputFormat() bool {
+ return true
+}
+
+// IngestionDataSourceCloudStorageAvroFormat means Cloud Storage data will be interpreted in Avro format.
+type IngestionDataSourceCloudStorageAvroFormat struct{}
+
+func (i *IngestionDataSourceCloudStorageAvroFormat) isCloudStorageIngestionInputFormat() bool {
+ return true
+}
+
+// IngestionDataSourceCloudStoragePubSubAvroFormat is used assuming the data was written using Cloud
+// Storage subscriptions https://cloud.google.com/pubsub/docs/cloudstorage.
+type IngestionDataSourceCloudStoragePubSubAvroFormat struct{}
+
+func (i *IngestionDataSourceCloudStoragePubSubAvroFormat) isCloudStorageIngestionInputFormat() bool {
+ return true
+}
+
func protoToIngestionDataSourceSettings(pbs *pb.IngestionDataSourceSettings) *IngestionDataSourceSettings {
if pbs == nil {
return nil
@@ -509,7 +606,33 @@ func protoToIngestionDataSourceSettings(pbs *pb.IngestionDataSourceSettings) *In
AWSRoleARN: k.GetAwsRoleArn(),
GCPServiceAccount: k.GetGcpServiceAccount(),
}
+ } else if cs := pbs.GetCloudStorage(); cs != nil {
+ var format ingestionDataSourceCloudStorageInputFormat
+ switch t := cs.InputFormat.(type) {
+ case *pb.IngestionDataSourceSettings_CloudStorage_TextFormat_:
+ format = &IngestionDataSourceCloudStorageTextFormat{
+ Delimiter: *t.TextFormat.Delimiter,
+ }
+ case *pb.IngestionDataSourceSettings_CloudStorage_AvroFormat_:
+ format = &IngestionDataSourceCloudStorageAvroFormat{}
+ case *pb.IngestionDataSourceSettings_CloudStorage_PubsubAvroFormat:
+ format = &IngestionDataSourceCloudStoragePubSubAvroFormat{}
+ }
+ s.Source = &IngestionDataSourceCloudStorage{
+ State: CloudStorageIngestionState(cs.GetState()),
+ Bucket: cs.GetBucket(),
+ InputFormat: format,
+ MinimumObjectCreateTime: cs.GetMinimumObjectCreateTime().AsTime(),
+ MatchGlob: cs.GetMatchGlob(),
+ }
+ }
+
+ if pbs.PlatformLogsSettings != nil {
+ s.PlatformLogsSettings = &PlatformLogsSettings{
+ Severity: PlatformLogsSeverity(pbs.PlatformLogsSettings.Severity),
+ }
}
+
return s
}
@@ -522,6 +645,11 @@ func (i *IngestionDataSourceSettings) toProto() *pb.IngestionDataSourceSettings
return nil
}
pbs := &pb.IngestionDataSourceSettings{}
+ if i.PlatformLogsSettings != nil {
+ pbs.PlatformLogsSettings = &pb.PlatformLogsSettings{
+ Severity: pb.PlatformLogsSettings_Severity(i.PlatformLogsSettings.Severity),
+ }
+ }
if out := i.Source; out != nil {
if k, ok := out.(*IngestionDataSourceAWSKinesis); ok {
pbs.Source = &pb.IngestionDataSourceSettings_AwsKinesis_{
@@ -534,10 +662,76 @@ func (i *IngestionDataSourceSettings) toProto() *pb.IngestionDataSourceSettings
},
}
}
+ if cs, ok := out.(*IngestionDataSourceCloudStorage); ok {
+ switch format := cs.InputFormat.(type) {
+ case *IngestionDataSourceCloudStorageTextFormat:
+ pbs.Source = &pb.IngestionDataSourceSettings_CloudStorage_{
+ CloudStorage: &pb.IngestionDataSourceSettings_CloudStorage{
+ State: pb.IngestionDataSourceSettings_CloudStorage_State(cs.State),
+ Bucket: cs.Bucket,
+ InputFormat: &pb.IngestionDataSourceSettings_CloudStorage_TextFormat_{
+ TextFormat: &pb.IngestionDataSourceSettings_CloudStorage_TextFormat{
+ Delimiter: &format.Delimiter,
+ },
+ },
+ MinimumObjectCreateTime: timestamppb.New(cs.MinimumObjectCreateTime),
+ MatchGlob: cs.MatchGlob,
+ },
+ }
+ case *IngestionDataSourceCloudStorageAvroFormat:
+ pbs.Source = &pb.IngestionDataSourceSettings_CloudStorage_{
+ CloudStorage: &pb.IngestionDataSourceSettings_CloudStorage{
+ State: pb.IngestionDataSourceSettings_CloudStorage_State(cs.State),
+ Bucket: cs.Bucket,
+ InputFormat: &pb.IngestionDataSourceSettings_CloudStorage_AvroFormat_{
+ AvroFormat: &pb.IngestionDataSourceSettings_CloudStorage_AvroFormat{},
+ },
+ MinimumObjectCreateTime: timestamppb.New(cs.MinimumObjectCreateTime),
+ MatchGlob: cs.MatchGlob,
+ },
+ }
+ case *IngestionDataSourceCloudStoragePubSubAvroFormat:
+ pbs.Source = &pb.IngestionDataSourceSettings_CloudStorage_{
+ CloudStorage: &pb.IngestionDataSourceSettings_CloudStorage{
+ State: pb.IngestionDataSourceSettings_CloudStorage_State(cs.State),
+ Bucket: cs.Bucket,
+ InputFormat: &pb.IngestionDataSourceSettings_CloudStorage_PubsubAvroFormat{
+ PubsubAvroFormat: &pb.IngestionDataSourceSettings_CloudStorage_PubSubAvroFormat{},
+ },
+ MinimumObjectCreateTime: timestamppb.New(cs.MinimumObjectCreateTime),
+ MatchGlob: cs.MatchGlob,
+ },
+ }
+ }
+ }
}
return pbs
}
+// PlatformLogsSettings configures logging produced by Pub/Sub.
+// Currently only valid on Cloud Storage ingestion topics.
+type PlatformLogsSettings struct {
+ Severity PlatformLogsSeverity
+}
+
+// PlatformLogsSeverity are the severity levels of Platform Logs.
+type PlatformLogsSeverity int32
+
+const (
+ // PlatformLogsSeverityUnspecified is the default value. Logs level is unspecified. Logs will be disabled.
+ PlatformLogsSeverityUnspecified PlatformLogsSeverity = iota
+ // PlatformLogsSeverityDisabled means logs will be disabled.
+ PlatformLogsSeverityDisabled
+ // PlatformLogsSeverityDebug means debug logs and higher-severity logs will be written.
+ PlatformLogsSeverityDebug
+ // PlatformLogsSeverityInfo means info logs and higher-severity logs will be written.
+ PlatformLogsSeverityInfo
+ // PlatformLogsSeverityWarning means warning logs and higher-severity logs will be written.
+ PlatformLogsSeverityWarning
+ // PlatformLogsSeverityError means only error logs will be written.
+ PlatformLogsSeverityError
+)
+
// Config returns the TopicConfig for the topic.
func (t *Topic) Config(ctx context.Context) (TopicConfig, error) {
pbt, err := t.c.pubc.GetTopic(ctx, &pb.GetTopicRequest{Topic: t.name})
@@ -748,8 +942,8 @@ func (t *Topic) Publish(ctx context.Context, msg *Message) *PublishResult {
var createSpan trace.Span
if t.enableTracing {
opts := getPublishSpanAttributes(t.c.projectID, t.ID(), msg)
+ opts = append(opts, trace.WithAttributes(semconv.CodeFunction("Publish")))
ctx, createSpan = startSpan(ctx, createSpanName, t.ID(), opts...)
- createSpan.SetAttributes(semconv.CodeFunction("Publish"))
}
ctx, err := tag.New(ctx, tag.Insert(keyStatus, "OK"), tag.Upsert(keyTopic, t.name))
if err != nil {
@@ -973,8 +1167,14 @@ func (t *Topic) publishMessageBundle(ctx context.Context, bms []*bundledMessage)
opts := getCommonOptions(projectID, topicID)
// Add link to publish RPC span of createSpan(s).
opts = append(opts, trace.WithLinks(links...))
+ opts = append(
+ opts,
+ trace.WithAttributes(
+ semconv.MessagingBatchMessageCount(numMsgs),
+ semconv.CodeFunction("publishMessageBundle"),
+ ),
+ )
ctx, pSpan = startSpan(ctx, publishRPCSpanName, topicID, opts...)
- pSpan.SetAttributes(semconv.MessagingBatchMessageCount(numMsgs), semconv.CodeFunction("publishMessageBundle"))
defer pSpan.End()
// Add the reverse link to createSpan(s) of publish RPC span.
diff --git a/vendor/cloud.google.com/go/pubsub/trace.go b/vendor/cloud.google.com/go/pubsub/trace.go
index 1d41e9d894221..51112bb50f5f3 100644
--- a/vendor/cloud.google.com/go/pubsub/trace.go
+++ b/vendor/cloud.google.com/go/pubsub/trace.go
@@ -20,6 +20,7 @@ import (
"log"
"sync"
+ pb "cloud.google.com/go/pubsub/apiv1/pubsubpb"
"cloud.google.com/go/pubsub/internal"
"go.opencensus.io/stats"
"go.opencensus.io/stats/view"
@@ -273,33 +274,42 @@ func tracer() trace.Tracer {
var _ propagation.TextMapCarrier = (*messageCarrier)(nil)
-// messageCarrier injects and extracts traces from a pubsub.Message.
+// messageCarrier injects and extracts traces from pubsub.Message attributes.
type messageCarrier struct {
- msg *Message
+ attributes map[string]string
}
const googclientPrefix string = "googclient_"
// newMessageCarrier creates a new PubsubMessageCarrier.
func newMessageCarrier(msg *Message) messageCarrier {
- return messageCarrier{msg: msg}
+ return messageCarrier{attributes: msg.Attributes}
+}
+
+// NewMessageCarrierFromPB creates a propagation.TextMapCarrier that can be used to extract the trace
+// context from a protobuf PubsubMessage.
+//
+// Example:
+// ctx = propagation.TraceContext{}.Extract(ctx, pubsub.NewMessageCarrierFromPB(msg))
+func NewMessageCarrierFromPB(msg *pb.PubsubMessage) propagation.TextMapCarrier {
+ return messageCarrier{attributes: msg.Attributes}
}
// Get retrieves a single value for a given key.
func (c messageCarrier) Get(key string) string {
- return c.msg.Attributes[googclientPrefix+key]
+ return c.attributes[googclientPrefix+key]
}
// Set sets an attribute.
func (c messageCarrier) Set(key, val string) {
- c.msg.Attributes[googclientPrefix+key] = val
+ c.attributes[googclientPrefix+key] = val
}
// Keys returns a slice of all keys in the carrier.
func (c messageCarrier) Keys() []string {
i := 0
- out := make([]string, len(c.msg.Attributes))
- for k := range c.msg.Attributes {
+ out := make([]string, len(c.attributes))
+ for k := range c.attributes {
out[i] = k
i++
}
diff --git a/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json b/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json
index 1e924a8340e69..73021df5391d6 100644
--- a/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json
+++ b/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json
@@ -27,6 +27,9 @@
"apigeeregistry": {
"component": "apigeeregistry"
},
+ "apihub": {
+ "component": "apihub"
+ },
"apikeys": {
"component": "apikeys"
},
diff --git a/vendor/cloud.google.com/go/storage/CHANGES.md b/vendor/cloud.google.com/go/storage/CHANGES.md
index e9fb55585b97c..0e2e2e6948f85 100644
--- a/vendor/cloud.google.com/go/storage/CHANGES.md
+++ b/vendor/cloud.google.com/go/storage/CHANGES.md
@@ -1,6 +1,51 @@
# Changes
+## [1.44.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.43.0...storage/v1.44.0) (2024-10-03)
+
+
+### Features
+
+* **storage/dataflux:** Add dataflux interface ([#10748](https://github.com/googleapis/google-cloud-go/issues/10748)) ([cb7b0a1](https://github.com/googleapis/google-cloud-go/commit/cb7b0a1b285de9d4182155a123747419232dd35f))
+* **storage/dataflux:** Add range_splitter [#10748](https://github.com/googleapis/google-cloud-go/issues/10748) ([#10899](https://github.com/googleapis/google-cloud-go/issues/10899)) ([d49da26](https://github.com/googleapis/google-cloud-go/commit/d49da26be7dc52fad37c392c2876f62b1a5625a2))
+* **storage/dataflux:** Add worksteal algorithm to fast-listing ([#10913](https://github.com/googleapis/google-cloud-go/issues/10913)) ([015b52c](https://github.com/googleapis/google-cloud-go/commit/015b52c345df75408be3edcfda96d37145794f9f))
+* **storage/internal:** Add managed folder to testIamPermissions method ([2f0aec8](https://github.com/googleapis/google-cloud-go/commit/2f0aec894179304d234be6c792d82cf4336b6d0a))
+* **storage/transfermanager:** Add option to StripPrefix on directory download ([#10894](https://github.com/googleapis/google-cloud-go/issues/10894)) ([607534c](https://github.com/googleapis/google-cloud-go/commit/607534cdd5edf2d15d3de891cf6a0b6cbaa7d545))
+* **storage/transfermanager:** Add SkipIfExists option ([#10893](https://github.com/googleapis/google-cloud-go/issues/10893)) ([7daa1bd](https://github.com/googleapis/google-cloud-go/commit/7daa1bdc78844adac80f6378b1f6f2dd415b80a8))
+* **storage/transfermanager:** Checksum full object downloads ([#10569](https://github.com/googleapis/google-cloud-go/issues/10569)) ([c366c90](https://github.com/googleapis/google-cloud-go/commit/c366c908534ef09442f1f3e8a4f74bd545a474fb))
+* **storage:** Add direct google access side-effect imports by default ([#10757](https://github.com/googleapis/google-cloud-go/issues/10757)) ([9ad8324](https://github.com/googleapis/google-cloud-go/commit/9ad83248a7049c82580bc45d9685c329811bce88))
+* **storage:** Add full object checksum to reader.Attrs ([#10538](https://github.com/googleapis/google-cloud-go/issues/10538)) ([245d2ea](https://github.com/googleapis/google-cloud-go/commit/245d2eaddb4862da7c8d1892d5d462bf390adb2b))
+* **storage:** Add support for Go 1.23 iterators ([84461c0](https://github.com/googleapis/google-cloud-go/commit/84461c0ba464ec2f951987ba60030e37c8a8fc18))
+* **storage:** Add update time in bucketAttrs ([#10710](https://github.com/googleapis/google-cloud-go/issues/10710)) ([5f06ae1](https://github.com/googleapis/google-cloud-go/commit/5f06ae1a331c46ded47c96c205b3f1be92d64d29)), refs [#9361](https://github.com/googleapis/google-cloud-go/issues/9361)
+* **storage:** GA gRPC client ([#10859](https://github.com/googleapis/google-cloud-go/issues/10859)) ([c7a55a2](https://github.com/googleapis/google-cloud-go/commit/c7a55a26c645905317fe27505d503c338f50ee34))
+* **storage:** Introduce gRPC client-side metrics ([#10639](https://github.com/googleapis/google-cloud-go/issues/10639)) ([437bcb1](https://github.com/googleapis/google-cloud-go/commit/437bcb1e0b514959648eed36ba3963aa4fbeffc8))
+* **storage:** Support IncludeFoldersAsPrefixes for gRPC ([#10767](https://github.com/googleapis/google-cloud-go/issues/10767)) ([65bcc59](https://github.com/googleapis/google-cloud-go/commit/65bcc59a6c0753f8fbd66c8792bc69300e95ec62))
+
+
+### Bug Fixes
+
+* **storage/transfermanager:** Correct Attrs.StartOffset for sharded downloads ([#10512](https://github.com/googleapis/google-cloud-go/issues/10512)) ([01a5cbb](https://github.com/googleapis/google-cloud-go/commit/01a5cbba6d9d9f425f045b58fa16d8c85804c29c))
+* **storage:** Add retryalways policy to encryption test ([#10644](https://github.com/googleapis/google-cloud-go/issues/10644)) ([59cfd12](https://github.com/googleapis/google-cloud-go/commit/59cfd12ce5650279c99787da4a273db1e3253c76)), refs [#10567](https://github.com/googleapis/google-cloud-go/issues/10567)
+* **storage:** Add unknown host to retriable errors ([#10619](https://github.com/googleapis/google-cloud-go/issues/10619)) ([4ec0452](https://github.com/googleapis/google-cloud-go/commit/4ec0452a393341b1036ac6e1e7287843f097d978))
+* **storage:** Bump dependencies ([2ddeb15](https://github.com/googleapis/google-cloud-go/commit/2ddeb1544a53188a7592046b98913982f1b0cf04))
+* **storage:** Bump google.golang.org/grpc@v1.64.1 ([8ecc4e9](https://github.com/googleapis/google-cloud-go/commit/8ecc4e9622e5bbe9b90384d5848ab816027226c5))
+* **storage:** Check for grpc NotFound error in HMAC test ([#10645](https://github.com/googleapis/google-cloud-go/issues/10645)) ([3c8e88a](https://github.com/googleapis/google-cloud-go/commit/3c8e88a085bab3142dfff6ef9a8e49c29a5c877d))
+* **storage:** Disable grpc metrics using emulator ([#10870](https://github.com/googleapis/google-cloud-go/issues/10870)) ([35ad73d](https://github.com/googleapis/google-cloud-go/commit/35ad73d3be5485ac592e2ef1ea6c0854f1eff4a0))
+* **storage:** Retry gRPC DEADLINE_EXCEEDED errors ([#10635](https://github.com/googleapis/google-cloud-go/issues/10635)) ([0018415](https://github.com/googleapis/google-cloud-go/commit/0018415295a5fd964b923db6a4785e9eed46a2e2))
+* **storage:** Update dependencies ([257c40b](https://github.com/googleapis/google-cloud-go/commit/257c40bd6d7e59730017cf32bda8823d7a232758))
+* **storage:** Update google.golang.org/api to v0.191.0 ([5b32644](https://github.com/googleapis/google-cloud-go/commit/5b32644eb82eb6bd6021f80b4fad471c60fb9d73))
+
+
+### Performance Improvements
+
+* **storage:** GRPC zerocopy codec ([#10888](https://github.com/googleapis/google-cloud-go/issues/10888)) ([aeba28f](https://github.com/googleapis/google-cloud-go/commit/aeba28ffffcd82ac5540e45247112bdacc5c530d))
+
+
+### Documentation
+
+* **storage/internal:** Clarify possible objectAccessControl roles ([2f0aec8](https://github.com/googleapis/google-cloud-go/commit/2f0aec894179304d234be6c792d82cf4336b6d0a))
+* **storage/internal:** Update dual-region bucket link ([2f0aec8](https://github.com/googleapis/google-cloud-go/commit/2f0aec894179304d234be6c792d82cf4336b6d0a))
+
## [1.43.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.42.0...storage/v1.43.0) (2024-07-03)
diff --git a/vendor/cloud.google.com/go/storage/bucket.go b/vendor/cloud.google.com/go/storage/bucket.go
index d582a60d0e837..3eded017831eb 100644
--- a/vendor/cloud.google.com/go/storage/bucket.go
+++ b/vendor/cloud.google.com/go/storage/bucket.go
@@ -416,6 +416,10 @@ type BucketAttrs struct {
// This field is read-only.
Created time.Time
+ // Updated is the time at which the bucket was last modified.
+ // This field is read-only.
+ Updated time.Time
+
// VersioningEnabled reports whether this bucket has versioning enabled.
VersioningEnabled bool
@@ -824,6 +828,7 @@ func newBucket(b *raw.Bucket) (*BucketAttrs, error) {
DefaultEventBasedHold: b.DefaultEventBasedHold,
StorageClass: b.StorageClass,
Created: convertTime(b.TimeCreated),
+ Updated: convertTime(b.Updated),
VersioningEnabled: b.Versioning != nil && b.Versioning.Enabled,
ACL: toBucketACLRules(b.Acl),
DefaultObjectACL: toObjectACLRules(b.DefaultObjectAcl),
@@ -861,6 +866,7 @@ func newBucketFromProto(b *storagepb.Bucket) *BucketAttrs {
DefaultEventBasedHold: b.GetDefaultEventBasedHold(),
StorageClass: b.GetStorageClass(),
Created: b.GetCreateTime().AsTime(),
+ Updated: b.GetUpdateTime().AsTime(),
VersioningEnabled: b.GetVersioning().GetEnabled(),
ACL: toBucketACLRulesFromProto(b.GetAcl()),
DefaultObjectACL: toObjectACLRulesFromProto(b.GetDefaultObjectAcl()),
diff --git a/vendor/cloud.google.com/go/storage/client.go b/vendor/cloud.google.com/go/storage/client.go
index bbe89276a432c..aebba22517579 100644
--- a/vendor/cloud.google.com/go/storage/client.go
+++ b/vendor/cloud.google.com/go/storage/client.go
@@ -122,7 +122,7 @@ type settings struct {
gax []gax.CallOption
// idempotent indicates if the call is idempotent or not when considering
- // if the call should be retired or not.
+ // if the call should be retried or not.
idempotent bool
// clientOption is a set of option.ClientOption to be used during client
@@ -132,6 +132,8 @@ type settings struct {
// userProject is the user project that should be billed for the request.
userProject string
+
+ metricsContext *metricsContext
}
func initSettings(opts ...storageOption) *settings {
diff --git a/vendor/cloud.google.com/go/storage/doc.go b/vendor/cloud.google.com/go/storage/doc.go
index c274c762ea4ec..4fcfb7326487b 100644
--- a/vendor/cloud.google.com/go/storage/doc.go
+++ b/vendor/cloud.google.com/go/storage/doc.go
@@ -331,14 +331,14 @@ to add a [custom audit logging] header:
// Use client as usual with the context and the additional headers will be sent.
client.Bucket("my-bucket").Attrs(ctx)
-# Experimental gRPC API
+# gRPC API
-This package includes support for the Cloud Storage gRPC API, which is currently
-in preview. This implementation uses gRPC rather than the current JSON & XML
-APIs to make requests to Cloud Storage. Kindly contact the Google Cloud Storage gRPC
-team at gcs-grpc-contact@google.com with a list of GCS buckets you would like to
-allowlist to access this API. The Go Storage gRPC library is not yet generally
-available, so it may be subject to breaking changes.
+This package includes support for the Cloud Storage gRPC API. The
+implementation uses gRPC rather than the Default
+JSON & XML APIs to make requests to Cloud Storage.
+The Go Storage gRPC client is generally available.
+The Notifications, Serivce Account HMAC
+and GetServiceAccount RPCs are not supported through the gRPC client.
To create a client which will use gRPC, use the alternate constructor:
@@ -349,15 +349,43 @@ To create a client which will use gRPC, use the alternate constructor:
}
// Use client as usual.
-If the application is running within GCP, users may get better performance by
-enabling Direct Google Access (enabling requests to skip some proxy steps). To enable,
-set the environment variable `GOOGLE_CLOUD_ENABLE_DIRECT_PATH_XDS=true` and add
-the following side-effect imports to your application:
+Using the gRPC API inside GCP with a bucket in the same region can allow for
+[Direct Connectivity] (enabling requests to skip some proxy steps and reducing
+response latency). A warning is emmitted if gRPC is not used within GCP to
+warn that Direct Connectivity could not be initialized. Direct Connectivity
+is not required to access the gRPC API.
- import (
- _ "google.golang.org/grpc/balancer/rls"
- _ "google.golang.org/grpc/xds/googledirectpath"
- )
+Dependencies for the gRPC API may slightly increase the size of binaries for
+applications depending on this package. If you are not using gRPC, you can use
+the build tag `disable_grpc_modules` to opt out of these dependencies and
+reduce the binary size.
+
+The gRPC client emits metrics by default and will export the
+gRPC telemetry discussed in [gRFC/66] and [gRFC/78] to
+[Google Cloud Monitoring]. The metrics are accessible through Cloud Monitoring
+API and you incur no additional cost for publishing the metrics. Google Cloud
+Support can use this information to more quickly diagnose problems related to
+GCS and gRPC.
+Sending this data does not incur any billing charges, and requires minimal
+CPU (a single RPC every minute) or memory (a few KiB to batch the
+telemetry).
+
+To access the metrics you can view them through Cloud Monitoring
+[metric explorer] with the prefix `storage.googleapis.com/client`. Metrics are emitted
+every minute.
+
+You can disable metrics using the following example when creating a new gRPC
+client using [WithDisabledClientMetrics].
+
+The metrics exporter uses Cloud Monitoring API which determines
+project ID and credentials doing the following:
+
+* Project ID is determined using OTel Resource Detector for the environment
+otherwise it falls back to the project provided by [google.FindCredentials].
+
+* Credentials are determined using [Application Default Credentials]. The
+principal must have `roles/monitoring.metricWriter` role granted. If not a
+logged warning will be emitted. Subsequent are silenced to prevent noisy logs.
# Storage Control API
@@ -366,6 +394,11 @@ and Managed Folder operations) are supported via the autogenerated Storage Contr
client, which is available as a subpackage in this module. See package docs at
[cloud.google.com/go/storage/control/apiv2] or reference the [Storage Control API] docs.
+[Application Default Credentials]: https://cloud.google.com/docs/authentication/application-default-credentials
+[google.FindCredentials]: https://pkg.go.dev/golang.org/x/oauth2/google#FindDefaultCredentials
+[gRFC/66]: https://github.com/grpc/proposal/blob/master/A66-otel-stats.md
+[gRFC/78]: https://github.com/grpc/proposal/blob/master/A78-grpc-metrics-wrr-pf-xds.md
+[Google Cloud Monitoring]: https://cloud.google.com/monitoring/docs
[Cloud Storage IAM docs]: https://cloud.google.com/storage/docs/access-control/iam
[XML POST Object docs]: https://cloud.google.com/storage/docs/xml-api/post-object
[Cloud Storage retry docs]: https://cloud.google.com/storage/docs/retry-strategy
@@ -375,5 +408,7 @@ client, which is available as a subpackage in this module. See package docs at
[IAM Service Account Credentials API]: https://console.developers.google.com/apis/api/iamcredentials.googleapis.com/overview
[custom audit logging]: https://cloud.google.com/storage/docs/audit-logging#add-custom-metadata
[Storage Control API]: https://cloud.google.com/storage/docs/reference/rpc/google.storage.control.v2
+[metric explorer]: https://console.cloud.google.com/projectselector/monitoring/metrics-explorer
+[Direct Connectivity]: https://cloud.google.com/vpc-service-controls/docs/set-up-private-connectivity#direct-connectivity
*/
package storage // import "cloud.google.com/go/storage"
diff --git a/vendor/cloud.google.com/go/storage/dynamic_delay.go b/vendor/cloud.google.com/go/storage/dynamic_delay.go
new file mode 100644
index 0000000000000..5d4c42fb82bfc
--- /dev/null
+++ b/vendor/cloud.google.com/go/storage/dynamic_delay.go
@@ -0,0 +1,154 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package storage
+
+import (
+ "fmt"
+ "math"
+ "sync"
+ "time"
+)
+
+// dynamicDelay dynamically calculates the delay at a fixed percentile, based on
+// delay samples.
+//
+// dynamicDelay is goroutine-safe.
+type dynamicDelay struct {
+ increaseFactor float64
+ decreaseFactor float64
+ minDelay time.Duration
+ maxDelay time.Duration
+ value time.Duration
+
+ // Guards the value
+ mu *sync.RWMutex
+}
+
+// NewDynamicDelay returns a dynamicDelay.
+//
+// targetPercentile is the desired percentile to be computed. For example, a
+// targetPercentile of 0.99 computes the delay at the 99th percentile. Must be
+// in the range [0, 1].
+//
+// increaseRate (must be > 0) determines how many increase calls it takes for
+// Value to double.
+//
+// initialDelay is the start value of the delay.
+//
+// decrease can never lower the delay past minDelay, increase can never raise
+// the delay past maxDelay.
+func newDynamicDelay(targetPercentile float64, increaseRate float64, initialDelay, minDelay, maxDelay time.Duration) (*dynamicDelay, error) {
+ if targetPercentile < 0 || targetPercentile > 1 {
+ return nil, fmt.Errorf("invalid targetPercentile (%v): must be within [0, 1]", targetPercentile)
+ }
+ if increaseRate <= 0 {
+ return nil, fmt.Errorf("invalid increaseRate (%v): must be > 0", increaseRate)
+ }
+ if minDelay >= maxDelay {
+ return nil, fmt.Errorf("invalid minDelay (%v) and maxDelay (%v) combination: minDelay must be smaller than maxDelay", minDelay, maxDelay)
+ }
+ if initialDelay < minDelay {
+ initialDelay = minDelay
+ }
+ if initialDelay > maxDelay {
+ initialDelay = maxDelay
+ }
+
+ // Compute increaseFactor and decreaseFactor such that:
+ // (increaseFactor ^ (1 - targetPercentile)) * (decreaseFactor ^ targetPercentile) = 1
+ increaseFactor := math.Exp(math.Log(2) / increaseRate)
+ if increaseFactor < 1.001 {
+ increaseFactor = 1.001
+ }
+ decreaseFactor := math.Exp(-math.Log(increaseFactor) * (1 - targetPercentile) / targetPercentile)
+ if decreaseFactor > 0.9999 {
+ decreaseFactor = 0.9999
+ }
+
+ return &dynamicDelay{
+ increaseFactor: increaseFactor,
+ decreaseFactor: decreaseFactor,
+ minDelay: minDelay,
+ maxDelay: maxDelay,
+ value: initialDelay,
+ mu: &sync.RWMutex{},
+ }, nil
+}
+
+func (d *dynamicDelay) unsafeIncrease() {
+ v := time.Duration(float64(d.value) * d.increaseFactor)
+ if v > d.maxDelay {
+ d.value = d.maxDelay
+ } else {
+ d.value = v
+ }
+}
+
+// increase notes that the operation took longer than the delay returned by Value.
+func (d *dynamicDelay) increase() {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ d.unsafeIncrease()
+}
+
+func (d *dynamicDelay) unsafeDecrease() {
+ v := time.Duration(float64(d.value) * d.decreaseFactor)
+ if v < d.minDelay {
+ d.value = d.minDelay
+ } else {
+ d.value = v
+ }
+}
+
+// decrease notes that the operation completed before the delay returned by getValue.
+func (d *dynamicDelay) decrease() {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ d.unsafeDecrease()
+}
+
+// update updates the delay value depending on the specified latency.
+func (d *dynamicDelay) update(latency time.Duration) {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ if latency > d.value {
+ d.unsafeIncrease()
+ } else {
+ d.unsafeDecrease()
+ }
+}
+
+// getValue returns the desired delay to wait before retry the operation.
+func (d *dynamicDelay) getValue() time.Duration {
+ d.mu.RLock()
+ defer d.mu.RUnlock()
+
+ return d.value
+}
+
+// PrintDelay prints the state of delay, helpful in debugging.
+func (d *dynamicDelay) printDelay() {
+ d.mu.RLock()
+ defer d.mu.RUnlock()
+
+ fmt.Println("IncreaseFactor: ", d.increaseFactor)
+ fmt.Println("DecreaseFactor: ", d.decreaseFactor)
+ fmt.Println("MinDelay: ", d.minDelay)
+ fmt.Println("MaxDelay: ", d.maxDelay)
+ fmt.Println("Value: ", d.value)
+}
diff --git a/vendor/cloud.google.com/go/storage/grpc_client.go b/vendor/cloud.google.com/go/storage/grpc_client.go
index d81a17b6b04d6..eb327a3eeb48e 100644
--- a/vendor/cloud.google.com/go/storage/grpc_client.go
+++ b/vendor/cloud.google.com/go/storage/grpc_client.go
@@ -16,11 +16,12 @@ package storage
import (
"context"
- "encoding/base64"
+ "encoding/binary"
"errors"
"fmt"
"hash/crc32"
"io"
+ "log"
"net/url"
"os"
@@ -36,6 +37,7 @@ import (
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/encoding"
+ "google.golang.org/grpc/mem"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/encoding/protowire"
@@ -95,10 +97,11 @@ func defaultGRPCOptions() []option.ClientOption {
option.WithEndpoint(host),
option.WithGRPCDialOption(grpc.WithInsecure()),
option.WithoutAuthentication(),
+ WithDisabledClientMetrics(),
)
} else {
// Only enable DirectPath when the emulator is not being targeted.
- defaults = append(defaults, internaloption.EnableDirectPath(true))
+ defaults = append(defaults, internaloption.EnableDirectPath(true), internaloption.EnableDirectPathXds())
}
return defaults
@@ -124,6 +127,15 @@ func newGRPCStorageClient(ctx context.Context, opts ...storageOption) (storageCl
return nil, errors.New("storage: GRPC is incompatible with any option that specifies an API for reads")
}
+ if !config.disableClientMetrics {
+ // Do not fail client creation if enabling metrics fails.
+ if metricsContext, err := enableClientMetrics(ctx, s); err == nil {
+ s.metricsContext = metricsContext
+ s.clientOption = append(s.clientOption, metricsContext.clientOpts...)
+ } else {
+ log.Printf("Failed to enable client metrics: %v", err)
+ }
+ }
g, err := gapic.NewClient(ctx, s.clientOption...)
if err != nil {
return nil, err
@@ -136,26 +148,17 @@ func newGRPCStorageClient(ctx context.Context, opts ...storageOption) (storageCl
}
func (c *grpcStorageClient) Close() error {
+ if c.settings.metricsContext != nil {
+ c.settings.metricsContext.close()
+ }
return c.raw.Close()
}
// Top-level methods.
+// GetServiceAccount is not supported in the gRPC client.
func (c *grpcStorageClient) GetServiceAccount(ctx context.Context, project string, opts ...storageOption) (string, error) {
- s := callSettings(c.settings, opts...)
- req := &storagepb.GetServiceAccountRequest{
- Project: toProjectResource(project),
- }
- var resp *storagepb.ServiceAccount
- err := run(ctx, func(ctx context.Context) error {
- var err error
- resp, err = c.raw.GetServiceAccount(ctx, req, s.gax...)
- return err
- }, s.retry, s.idempotent)
- if err != nil {
- return "", err
- }
- return resp.EmailAddress, err
+ return "", errMethodNotSupported
}
func (c *grpcStorageClient) CreateBucket(ctx context.Context, project, bucket string, attrs *BucketAttrs, enableObjectRetention *bool, opts ...storageOption) (*BucketAttrs, error) {
@@ -432,16 +435,12 @@ func (c *grpcStorageClient) ListObjects(ctx context.Context, bucket string, q *Q
MatchGlob: it.query.MatchGlob,
ReadMask: q.toFieldMask(), // a nil Query still results in a "*" FieldMask
SoftDeleted: it.query.SoftDeleted,
+ IncludeFoldersAsPrefixes: it.query.IncludeFoldersAsPrefixes,
}
if s.userProject != "" {
ctx = setUserProjectMetadata(ctx, s.userProject)
}
fetch := func(pageSize int, pageToken string) (token string, err error) {
- // IncludeFoldersAsPrefixes is not supported for gRPC
- // TODO: remove this when support is added in the proto.
- if it.query.IncludeFoldersAsPrefixes {
- return "", status.Errorf(codes.Unimplemented, "storage: IncludeFoldersAsPrefixes is not supported in gRPC")
- }
var objects []*storagepb.Object
var gitr *gapic.ObjectIterator
err = run(it.ctx, func(ctx context.Context) error {
@@ -959,37 +958,48 @@ func (c *grpcStorageClient) RewriteObject(ctx context.Context, req *rewriteObjec
return r, nil
}
-// bytesCodec is a grpc codec which permits receiving messages as either
-// protobuf messages, or as raw []bytes.
-type bytesCodec struct {
- encoding.Codec
+// Custom codec to be used for unmarshaling ReadObjectResponse messages.
+// This is used to avoid a copy of object data in proto.Unmarshal.
+type bytesCodecV2 struct {
}
-func (bytesCodec) Marshal(v any) ([]byte, error) {
+var _ encoding.CodecV2 = bytesCodecV2{}
+
+// Marshal is used to encode messages to send for bytesCodecV2. Since we are only
+// using this to send ReadObjectRequest messages we don't need to recycle buffers
+// here.
+func (bytesCodecV2) Marshal(v any) (mem.BufferSlice, error) {
vv, ok := v.(proto.Message)
if !ok {
return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v)
}
- return proto.Marshal(vv)
+ var data mem.BufferSlice
+ buf, err := proto.Marshal(vv)
+ if err != nil {
+ return nil, err
+ }
+ data = append(data, mem.SliceBuffer(buf))
+ return data, nil
}
-func (bytesCodec) Unmarshal(data []byte, v any) error {
+// Unmarshal is used for data received for ReadObjectResponse. We want to preserve
+// the mem.BufferSlice in most cases rather than copying and calling proto.Unmarshal.
+func (bytesCodecV2) Unmarshal(data mem.BufferSlice, v any) error {
switch v := v.(type) {
- case *[]byte:
- // If gRPC could recycle the data []byte after unmarshaling (through
- // buffer pools), we would need to make a copy here.
+ case *mem.BufferSlice:
*v = data
+ // Pick up a reference to the data so that it is not freed while decoding.
+ data.Ref()
return nil
case proto.Message:
- return proto.Unmarshal(data, v)
+ buf := data.MaterializeToBuffer(mem.DefaultBufferPool())
+ return proto.Unmarshal(buf.ReadOnlyData(), v)
default:
- return fmt.Errorf("can not unmarshal type %T", v)
+ return fmt.Errorf("cannot unmarshal type %T, want proto.Message or mem.BufferSlice", v)
}
}
-func (bytesCodec) Name() string {
- // If this isn't "", then gRPC sets the content-subtype of the call to this
- // value and we get errors.
+func (bytesCodecV2) Name() string {
return ""
}
@@ -1000,7 +1010,7 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange
s := callSettings(c.settings, opts...)
s.gax = append(s.gax, gax.WithGRPCOptions(
- grpc.ForceCodec(bytesCodec{}),
+ grpc.ForceCodecV2(bytesCodecV2{}),
))
if s.userProject != "" {
@@ -1018,8 +1028,6 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange
req.Generation = params.gen
}
- var databuf []byte
-
// Define a function that initiates a Read with offset and length, assuming
// we have already read seen bytes.
reopen := func(seen int64) (*readStreamResponse, context.CancelFunc, error) {
@@ -1045,18 +1053,19 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange
}
var stream storagepb.Storage_ReadObjectClient
- var msg *storagepb.ReadObjectResponse
var err error
+ var decoder *readResponseDecoder
err = run(cc, func(ctx context.Context) error {
- stream, err = c.raw.ReadObject(cc, req, s.gax...)
+ stream, err = c.raw.ReadObject(ctx, req, s.gax...)
if err != nil {
return err
}
// Receive the message into databuf as a wire-encoded message so we can
// use a custom decoder to avoid an extra copy at the protobuf layer.
- err := stream.RecvMsg(&databuf)
+ databufs := mem.BufferSlice{}
+ err := stream.RecvMsg(&databufs)
// These types of errors show up on the Recv call, rather than the
// initialization of the stream via ReadObject above.
if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound {
@@ -1066,22 +1075,26 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange
return err
}
// Use a custom decoder that uses protobuf unmarshalling for all
- // fields except the checksummed data.
- // Subsequent receives in Read calls will skip all protobuf
- // unmarshalling and directly read the content from the gRPC []byte
- // response, since only the first call will contain other fields.
- msg, err = readFullObjectResponse(databuf)
-
+ // fields except the object data. Object data is handled separately
+ // to avoid a copy.
+ decoder = &readResponseDecoder{
+ databufs: databufs,
+ }
+ err = decoder.readFullObjectResponse()
return err
}, s.retry, s.idempotent)
if err != nil {
// Close the stream context we just created to ensure we don't leak
// resources.
cancel()
+ // Free any buffers.
+ if decoder != nil && decoder.databufs != nil {
+ decoder.databufs.Free()
+ }
return nil, nil, err
}
- return &readStreamResponse{stream, msg}, cancel, nil
+ return &readStreamResponse{stream, decoder}, cancel, nil
}
res, cancel, err := reopen(0)
@@ -1091,7 +1104,7 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange
// The first message was Recv'd on stream open, use it to populate the
// object metadata.
- msg := res.response
+ msg := res.decoder.msg
obj := msg.GetMetadata()
// This is the size of the entire object, even if only a range was requested.
size := obj.GetSize()
@@ -1101,9 +1114,11 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange
wantCRC uint32
checkCRC bool
)
- if checksums := msg.GetObjectChecksums(); checksums != nil && checksums.Crc32C != nil && params.offset == 0 && params.length < 0 {
+ if checksums := msg.GetObjectChecksums(); checksums != nil && checksums.Crc32C != nil {
+ if params.offset == 0 && params.length < 0 {
+ checkCRC = true
+ }
wantCRC = checksums.GetCrc32C()
- checkCRC = true
}
r = &Reader{
@@ -1115,18 +1130,17 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange
LastModified: obj.GetUpdateTime().AsTime(),
Metageneration: obj.GetMetageneration(),
Generation: obj.GetGeneration(),
+ CRC32C: wantCRC,
},
reader: &gRPCReader{
stream: res.stream,
reopen: reopen,
cancel: cancel,
size: size,
- // Store the content from the first Recv in the
- // client buffer for reading later.
- leftovers: msg.GetChecksummedData().GetContent(),
+ // Preserve the decoder to read out object data when Read/WriteTo is called.
+ currMsg: res.decoder,
settings: s,
zeroRange: params.length == 0,
- databuf: databuf,
wantCRC: wantCRC,
checkCRC: checkCRC,
},
@@ -1293,213 +1307,53 @@ func (c *grpcStorageClient) TestIamPermissions(ctx context.Context, resource str
return res.Permissions, nil
}
-// HMAC Key methods.
+// HMAC Key methods are not implemented in gRPC client.
func (c *grpcStorageClient) GetHMACKey(ctx context.Context, project, accessID string, opts ...storageOption) (*HMACKey, error) {
- s := callSettings(c.settings, opts...)
- req := &storagepb.GetHmacKeyRequest{
- AccessId: accessID,
- Project: toProjectResource(project),
- }
- if s.userProject != "" {
- ctx = setUserProjectMetadata(ctx, s.userProject)
- }
- var metadata *storagepb.HmacKeyMetadata
- err := run(ctx, func(ctx context.Context) error {
- var err error
- metadata, err = c.raw.GetHmacKey(ctx, req, s.gax...)
- return err
- }, s.retry, s.idempotent)
- if err != nil {
- return nil, err
- }
- return toHMACKeyFromProto(metadata), nil
+ return nil, errMethodNotSupported
}
func (c *grpcStorageClient) ListHMACKeys(ctx context.Context, project, serviceAccountEmail string, showDeletedKeys bool, opts ...storageOption) *HMACKeysIterator {
- s := callSettings(c.settings, opts...)
- req := &storagepb.ListHmacKeysRequest{
- Project: toProjectResource(project),
- ServiceAccountEmail: serviceAccountEmail,
- ShowDeletedKeys: showDeletedKeys,
- }
- if s.userProject != "" {
- ctx = setUserProjectMetadata(ctx, s.userProject)
- }
it := &HMACKeysIterator{
ctx: ctx,
- projectID: project,
- retry: s.retry,
+ projectID: "",
+ retry: nil,
}
- fetch := func(pageSize int, pageToken string) (token string, err error) {
- var hmacKeys []*storagepb.HmacKeyMetadata
- err = run(it.ctx, func(ctx context.Context) error {
- gitr := c.raw.ListHmacKeys(ctx, req, s.gax...)
- hmacKeys, token, err = gitr.InternalFetch(pageSize, pageToken)
- return err
- }, s.retry, s.idempotent)
- if err != nil {
- return "", err
- }
- for _, hkmd := range hmacKeys {
- hk := toHMACKeyFromProto(hkmd)
- it.hmacKeys = append(it.hmacKeys, hk)
- }
-
- return token, nil
+ fetch := func(_ int, _ string) (token string, err error) {
+ return "", errMethodNotSupported
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
fetch,
- func() int { return len(it.hmacKeys) - it.index },
- func() interface{} {
- prev := it.hmacKeys
- it.hmacKeys = it.hmacKeys[:0]
- it.index = 0
- return prev
- })
+ func() int { return 0 },
+ func() interface{} { return nil },
+ )
return it
}
func (c *grpcStorageClient) UpdateHMACKey(ctx context.Context, project, serviceAccountEmail, accessID string, attrs *HMACKeyAttrsToUpdate, opts ...storageOption) (*HMACKey, error) {
- s := callSettings(c.settings, opts...)
- hk := &storagepb.HmacKeyMetadata{
- AccessId: accessID,
- Project: toProjectResource(project),
- ServiceAccountEmail: serviceAccountEmail,
- State: string(attrs.State),
- Etag: attrs.Etag,
- }
- var paths []string
- fieldMask := &fieldmaskpb.FieldMask{
- Paths: paths,
- }
- if attrs.State != "" {
- fieldMask.Paths = append(fieldMask.Paths, "state")
- }
- req := &storagepb.UpdateHmacKeyRequest{
- HmacKey: hk,
- UpdateMask: fieldMask,
- }
- if s.userProject != "" {
- ctx = setUserProjectMetadata(ctx, s.userProject)
- }
- var metadata *storagepb.HmacKeyMetadata
- err := run(ctx, func(ctx context.Context) error {
- var err error
- metadata, err = c.raw.UpdateHmacKey(ctx, req, s.gax...)
- return err
- }, s.retry, s.idempotent)
- if err != nil {
- return nil, err
- }
- return toHMACKeyFromProto(metadata), nil
+ return nil, errMethodNotSupported
}
func (c *grpcStorageClient) CreateHMACKey(ctx context.Context, project, serviceAccountEmail string, opts ...storageOption) (*HMACKey, error) {
- s := callSettings(c.settings, opts...)
- req := &storagepb.CreateHmacKeyRequest{
- Project: toProjectResource(project),
- ServiceAccountEmail: serviceAccountEmail,
- }
- if s.userProject != "" {
- ctx = setUserProjectMetadata(ctx, s.userProject)
- }
- var res *storagepb.CreateHmacKeyResponse
- err := run(ctx, func(ctx context.Context) error {
- var err error
- res, err = c.raw.CreateHmacKey(ctx, req, s.gax...)
- return err
- }, s.retry, s.idempotent)
- if err != nil {
- return nil, err
- }
- key := toHMACKeyFromProto(res.Metadata)
- key.Secret = base64.StdEncoding.EncodeToString(res.SecretKeyBytes)
-
- return key, nil
+ return nil, errMethodNotSupported
}
func (c *grpcStorageClient) DeleteHMACKey(ctx context.Context, project string, accessID string, opts ...storageOption) error {
- s := callSettings(c.settings, opts...)
- req := &storagepb.DeleteHmacKeyRequest{
- AccessId: accessID,
- Project: toProjectResource(project),
- }
- if s.userProject != "" {
- ctx = setUserProjectMetadata(ctx, s.userProject)
- }
- return run(ctx, func(ctx context.Context) error {
- return c.raw.DeleteHmacKey(ctx, req, s.gax...)
- }, s.retry, s.idempotent)
+ return errMethodNotSupported
}
-// Notification methods.
+// Notification methods are not implemented in gRPC client.
func (c *grpcStorageClient) ListNotifications(ctx context.Context, bucket string, opts ...storageOption) (n map[string]*Notification, err error) {
- ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.ListNotifications")
- defer func() { trace.EndSpan(ctx, err) }()
-
- s := callSettings(c.settings, opts...)
- if s.userProject != "" {
- ctx = setUserProjectMetadata(ctx, s.userProject)
- }
- req := &storagepb.ListNotificationConfigsRequest{
- Parent: bucketResourceName(globalProjectAlias, bucket),
- }
- var notifications []*storagepb.NotificationConfig
- err = run(ctx, func(ctx context.Context) error {
- gitr := c.raw.ListNotificationConfigs(ctx, req, s.gax...)
- for {
- // PageSize is not set and fallbacks to the API default pageSize of 100.
- items, nextPageToken, err := gitr.InternalFetch(int(req.GetPageSize()), req.GetPageToken())
- if err != nil {
- return err
- }
- notifications = append(notifications, items...)
- // If there are no more results, nextPageToken is empty and err is nil.
- if nextPageToken == "" {
- return err
- }
- req.PageToken = nextPageToken
- }
- }, s.retry, s.idempotent)
- if err != nil {
- return nil, err
- }
-
- return notificationsToMapFromProto(notifications), nil
+ return nil, errMethodNotSupported
}
func (c *grpcStorageClient) CreateNotification(ctx context.Context, bucket string, n *Notification, opts ...storageOption) (ret *Notification, err error) {
- ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.CreateNotification")
- defer func() { trace.EndSpan(ctx, err) }()
-
- s := callSettings(c.settings, opts...)
- req := &storagepb.CreateNotificationConfigRequest{
- Parent: bucketResourceName(globalProjectAlias, bucket),
- NotificationConfig: toProtoNotification(n),
- }
- var pbn *storagepb.NotificationConfig
- err = run(ctx, func(ctx context.Context) error {
- var err error
- pbn, err = c.raw.CreateNotificationConfig(ctx, req, s.gax...)
- return err
- }, s.retry, s.idempotent)
- if err != nil {
- return nil, err
- }
- return toNotificationFromProto(pbn), err
+ return nil, errMethodNotSupported
}
func (c *grpcStorageClient) DeleteNotification(ctx context.Context, bucket string, id string, opts ...storageOption) (err error) {
- ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.DeleteNotification")
- defer func() { trace.EndSpan(ctx, err) }()
-
- s := callSettings(c.settings, opts...)
- req := &storagepb.DeleteNotificationConfigRequest{Name: id}
- return run(ctx, func(ctx context.Context) error {
- return c.raw.DeleteNotificationConfig(ctx, req, s.gax...)
- }, s.retry, s.idempotent)
+ return errMethodNotSupported
}
// setUserProjectMetadata appends a project ID to the outgoing Context metadata
@@ -1512,8 +1366,8 @@ func setUserProjectMetadata(ctx context.Context, project string) context.Context
}
type readStreamResponse struct {
- stream storagepb.Storage_ReadObjectClient
- response *storagepb.ReadObjectResponse
+ stream storagepb.Storage_ReadObjectClient
+ decoder *readResponseDecoder
}
type gRPCReader struct {
@@ -1522,7 +1376,7 @@ type gRPCReader struct {
stream storagepb.Storage_ReadObjectClient
reopen func(seen int64) (*readStreamResponse, context.CancelFunc, error)
leftovers []byte
- databuf []byte
+ currMsg *readResponseDecoder // decoder for the current message
cancel context.CancelFunc
settings *settings
checkCRC bool // should we check the CRC?
@@ -1565,18 +1419,21 @@ func (r *gRPCReader) Read(p []byte) (int, error) {
}
var n int
- // Read leftovers and return what was available to conform to the Reader
+
+ // If there is data remaining in the current message, return what was
+ // available to conform to the Reader
// interface: https://pkg.go.dev/io#Reader.
- if len(r.leftovers) > 0 {
- n = copy(p, r.leftovers)
+ if !r.currMsg.done {
+ n = r.currMsg.readAndUpdateCRC(p, func(b []byte) {
+ r.updateCRC(b)
+ })
r.seen += int64(n)
- r.updateCRC(p[:n])
- r.leftovers = r.leftovers[n:]
return n, nil
}
// Attempt to Recv the next message on the stream.
- content, err := r.recv()
+ // This will update r.currMsg with the decoder for the new message.
+ err := r.recv()
if err != nil {
return 0, err
}
@@ -1588,16 +1445,11 @@ func (r *gRPCReader) Read(p []byte) (int, error) {
// present in the response here.
// TODO: Figure out if we need to support decompressive transcoding
// https://cloud.google.com/storage/docs/transcoding.
- n = copy(p[n:], content)
- leftover := len(content) - n
- if leftover > 0 {
- // Wasn't able to copy all of the data in the message, store for
- // future Read calls.
- r.leftovers = content[n:]
- }
- r.seen += int64(n)
- r.updateCRC(p[:n])
+ n = r.currMsg.readAndUpdateCRC(p, func(b []byte) {
+ r.updateCRC(b)
+ })
+ r.seen += int64(n)
return n, nil
}
@@ -1624,14 +1476,14 @@ func (r *gRPCReader) WriteTo(w io.Writer) (int64, error) {
// Track bytes written during before call.
var alreadySeen = r.seen
- // Write any leftovers to the stream. There will be some leftovers from the
+ // Write any already received message to the stream. There will be some leftovers from the
// original NewRangeReader call.
- if len(r.leftovers) > 0 {
- // Write() will write the entire leftovers slice unless there is an error.
- written, err := w.Write(r.leftovers)
+ if r.currMsg != nil && !r.currMsg.done {
+ written, err := r.currMsg.writeToAndUpdateCRC(w, func(b []byte) {
+ r.updateCRC(b)
+ })
r.seen += int64(written)
- r.updateCRC(r.leftovers)
- r.leftovers = nil
+ r.currMsg = nil
if err != nil {
return r.seen - alreadySeen, err
}
@@ -1642,7 +1494,7 @@ func (r *gRPCReader) WriteTo(w io.Writer) (int64, error) {
// Attempt to receive the next message on the stream.
// Will terminate with io.EOF once data has all come through.
// recv() handles stream reopening and retry logic so no need for retries here.
- msg, err := r.recv()
+ err := r.recv()
if err != nil {
if err == io.EOF {
// We are done; check the checksum if necessary and return.
@@ -1658,9 +1510,10 @@ func (r *gRPCReader) WriteTo(w io.Writer) (int64, error) {
// present in the response here.
// TODO: Figure out if we need to support decompressive transcoding
// https://cloud.google.com/storage/docs/transcoding.
- written, err := w.Write(msg)
+ written, err := r.currMsg.writeToAndUpdateCRC(w, func(b []byte) {
+ r.updateCRC(b)
+ })
r.seen += int64(written)
- r.updateCRC(msg)
if err != nil {
return r.seen - alreadySeen, err
}
@@ -1669,12 +1522,13 @@ func (r *gRPCReader) WriteTo(w io.Writer) (int64, error) {
}
// Close cancels the read stream's context in order for it to be closed and
-// collected.
+// collected, and frees any currently in use buffers.
func (r *gRPCReader) Close() error {
if r.cancel != nil {
r.cancel()
}
r.stream = nil
+ r.currMsg = nil
return nil
}
@@ -1689,8 +1543,9 @@ func (r *gRPCReader) Close() error {
//
// The last error received is the one that is returned, which could be from
// an attempt to reopen the stream.
-func (r *gRPCReader) recv() ([]byte, error) {
- err := r.stream.RecvMsg(&r.databuf)
+func (r *gRPCReader) recv() error {
+ databufs := mem.BufferSlice{}
+ err := r.stream.RecvMsg(&databufs)
var shouldRetry = ShouldRetry
if r.settings.retry != nil && r.settings.retry.shouldRetry != nil {
@@ -1700,16 +1555,16 @@ func (r *gRPCReader) recv() ([]byte, error) {
// This will "close" the existing stream and immediately attempt to
// reopen the stream, but will backoff if further attempts are necessary.
// Reopening the stream Recvs the first message, so if retrying is
- // successful, the next logical chunk will be returned.
- msg, err := r.reopenStream()
- return msg.GetChecksummedData().GetContent(), err
+ // successful, r.currMsg will be updated to include the new data.
+ return r.reopenStream()
}
if err != nil {
- return nil, err
+ return err
}
- return readObjectResponseContent(r.databuf)
+ r.currMsg = &readResponseDecoder{databufs: databufs}
+ return r.currMsg.readFullObjectResponse()
}
// ReadObjectResponse field and subfield numbers.
@@ -1722,21 +1577,297 @@ const (
metadataField = protowire.Number(4)
)
-// readObjectResponseContent returns the checksummed_data.content field of a
-// ReadObjectResponse message, or an error if the message is invalid.
-// This can be used on recvs of objects after the first recv, since only the
-// first message will contain non-data fields.
-func readObjectResponseContent(b []byte) ([]byte, error) {
- checksummedData, err := readProtoBytes(b, checksummedDataField)
+// readResponseDecoder is a wrapper on the raw message, used to decode one message
+// without copying object data. It also has methods to write out the resulting object
+// data to the user application.
+type readResponseDecoder struct {
+ databufs mem.BufferSlice // raw bytes of the message being processed
+ // Decoding offsets
+ off uint64 // offset in the messsage relative to the data as a whole
+ currBuf int // index of the current buffer being processed
+ currOff uint64 // offset in the current buffer
+ // Processed data
+ msg *storagepb.ReadObjectResponse // processed response message with all fields other than object data populated
+ dataOffsets bufferSliceOffsets // offsets of the object data in the message.
+ done bool // true if the data has been completely read.
+}
+
+type bufferSliceOffsets struct {
+ startBuf, endBuf int // indices of start and end buffers of object data in the msg
+ startOff, endOff uint64 // offsets within these buffers where the data starts and ends.
+ currBuf int // index of current buffer being read out to the user application.
+ currOff uint64 // offset of read in current buffer.
+}
+
+// peek ahead 10 bytes from the current offset in the databufs. This will return a
+// slice of the current buffer if the bytes are all in one buffer, but will copy
+// the bytes into a new buffer if the distance is split across buffers. Use this
+// to allow protowire methods to be used to parse tags & fixed values.
+// The max length of a varint tag is 10 bytes, see
+// https://protobuf.dev/programming-guides/encoding/#varints . Other int types
+// are shorter.
+func (d *readResponseDecoder) peek() []byte {
+ b := d.databufs[d.currBuf].ReadOnlyData()
+ // Check if the tag will fit in the current buffer. If not, copy the next 10
+ // bytes into a new buffer to ensure that we can read the tag correctly
+ // without it being divided between buffers.
+ tagBuf := b[d.currOff:]
+ remainingInBuf := len(tagBuf)
+ // If we have less than 10 bytes remaining and are not in the final buffer,
+ // copy up to 10 bytes ahead from the next buffer.
+ if remainingInBuf < binary.MaxVarintLen64 && d.currBuf != len(d.databufs)-1 {
+ tagBuf = d.copyNextBytes(10)
+ }
+ return tagBuf
+}
+
+// Copies up to next n bytes into a new buffer, or fewer if fewer bytes remain in the
+// buffers overall. Does not advance offsets.
+func (d *readResponseDecoder) copyNextBytes(n int) []byte {
+ remaining := n
+ if r := d.databufs.Len() - int(d.off); r < remaining {
+ remaining = r
+ }
+ currBuf := d.currBuf
+ currOff := d.currOff
+ var buf []byte
+ for remaining > 0 {
+ b := d.databufs[currBuf].ReadOnlyData()
+ remainingInCurr := len(b[currOff:])
+ if remainingInCurr < remaining {
+ buf = append(buf, b[currOff:]...)
+ remaining -= remainingInCurr
+ currBuf++
+ currOff = 0
+ } else {
+ buf = append(buf, b[currOff:currOff+uint64(remaining)]...)
+ remaining = 0
+ }
+ }
+ return buf
+}
+
+// Advance current buffer & byte offset in the decoding by n bytes. Returns an error if we
+// go past the end of the data.
+func (d *readResponseDecoder) advanceOffset(n uint64) error {
+ remaining := n
+ for remaining > 0 {
+ remainingInCurr := uint64(d.databufs[d.currBuf].Len()) - d.currOff
+ if remainingInCurr <= remaining {
+ remaining -= remainingInCurr
+ d.currBuf++
+ d.currOff = 0
+ } else {
+ d.currOff += remaining
+ remaining = 0
+ }
+ }
+ // If we have advanced past the end of the buffers, something went wrong.
+ if (d.currBuf == len(d.databufs) && d.currOff > 0) || d.currBuf > len(d.databufs) {
+ return errors.New("decoding: truncated message, cannot advance offset")
+ }
+ d.off += n
+ return nil
+
+}
+
+// This copies object data from the message into the buffer and returns the number of
+// bytes copied. The data offsets are incremented in the message. The updateCRC
+// function is called on the copied bytes.
+func (d *readResponseDecoder) readAndUpdateCRC(p []byte, updateCRC func([]byte)) int {
+ // For a completely empty message, just return 0
+ if len(d.databufs) == 0 {
+ return 0
+ }
+ databuf := d.databufs[d.dataOffsets.currBuf]
+ startOff := d.dataOffsets.currOff
+ var b []byte
+ if d.dataOffsets.currBuf == d.dataOffsets.endBuf {
+ b = databuf.ReadOnlyData()[startOff:d.dataOffsets.endOff]
+ } else {
+ b = databuf.ReadOnlyData()[startOff:]
+ }
+ n := copy(p, b)
+ updateCRC(b[:n])
+ d.dataOffsets.currOff += uint64(n)
+
+ // We've read all the data from this message. Free the underlying buffers.
+ if d.dataOffsets.currBuf == d.dataOffsets.endBuf && d.dataOffsets.currOff == d.dataOffsets.endOff {
+ d.done = true
+ d.databufs.Free()
+ }
+ // We are at the end of the current buffer
+ if d.dataOffsets.currBuf != d.dataOffsets.endBuf && d.dataOffsets.currOff == uint64(databuf.Len()) {
+ d.dataOffsets.currOff = 0
+ d.dataOffsets.currBuf++
+ }
+ return n
+}
+
+func (d *readResponseDecoder) writeToAndUpdateCRC(w io.Writer, updateCRC func([]byte)) (int64, error) {
+ // For a completely empty message, just return 0
+ if len(d.databufs) == 0 {
+ return 0, nil
+ }
+ var written int64
+ for !d.done {
+ databuf := d.databufs[d.dataOffsets.currBuf]
+ startOff := d.dataOffsets.currOff
+ var b []byte
+ if d.dataOffsets.currBuf == d.dataOffsets.endBuf {
+ b = databuf.ReadOnlyData()[startOff:d.dataOffsets.endOff]
+ } else {
+ b = databuf.ReadOnlyData()[startOff:]
+ }
+ var n int
+ // Write all remaining data from the current buffer
+ n, err := w.Write(b)
+ written += int64(n)
+ updateCRC(b)
+ if err != nil {
+ return written, err
+ }
+ d.dataOffsets.currOff = 0
+ // We've read all the data from this message.
+ if d.dataOffsets.currBuf == d.dataOffsets.endBuf {
+ d.done = true
+ d.databufs.Free()
+ } else {
+ d.dataOffsets.currBuf++
+ }
+ }
+ return written, nil
+}
+
+// Consume the next available tag in the input data and return the field number and type.
+// Advances the relevant offsets in the data.
+func (d *readResponseDecoder) consumeTag() (protowire.Number, protowire.Type, error) {
+ tagBuf := d.peek()
+
+ // Consume the next tag. This will tell us which field is next in the
+ // buffer, its type, and how much space it takes up.
+ fieldNum, fieldType, tagLength := protowire.ConsumeTag(tagBuf)
+ if tagLength < 0 {
+ return 0, 0, protowire.ParseError(tagLength)
+ }
+ // Update the offsets and current buffer depending on the tag length.
+ if err := d.advanceOffset(uint64(tagLength)); err != nil {
+ return 0, 0, fmt.Errorf("consuming tag: %w", err)
+ }
+ return fieldNum, fieldType, nil
+}
+
+// Consume a varint that represents the length of a bytes field. Return the length of
+// the data, and advance the offsets by the length of the varint.
+func (d *readResponseDecoder) consumeVarint() (uint64, error) {
+ tagBuf := d.peek()
+
+ // Consume the next tag. This will tell us which field is next in the
+ // buffer, its type, and how much space it takes up.
+ dataLength, tagLength := protowire.ConsumeVarint(tagBuf)
+ if tagLength < 0 {
+ return 0, protowire.ParseError(tagLength)
+ }
+
+ // Update the offsets and current buffer depending on the tag length.
+ d.advanceOffset(uint64(tagLength))
+ return dataLength, nil
+}
+
+func (d *readResponseDecoder) consumeFixed32() (uint32, error) {
+ valueBuf := d.peek()
+
+ // Consume the next tag. This will tell us which field is next in the
+ // buffer, its type, and how much space it takes up.
+ value, tagLength := protowire.ConsumeFixed32(valueBuf)
+ if tagLength < 0 {
+ return 0, protowire.ParseError(tagLength)
+ }
+
+ // Update the offsets and current buffer depending on the tag length.
+ d.advanceOffset(uint64(tagLength))
+ return value, nil
+}
+
+func (d *readResponseDecoder) consumeFixed64() (uint64, error) {
+ valueBuf := d.peek()
+
+ // Consume the next tag. This will tell us which field is next in the
+ // buffer, its type, and how much space it takes up.
+ value, tagLength := protowire.ConsumeFixed64(valueBuf)
+ if tagLength < 0 {
+ return 0, protowire.ParseError(tagLength)
+ }
+
+ // Update the offsets and current buffer depending on the tag length.
+ d.advanceOffset(uint64(tagLength))
+ return value, nil
+}
+
+// Consume any field values up to the end offset provided and don't return anything.
+// This is used to skip any values which are not going to be used.
+// msgEndOff is indexed in terms of the overall data across all buffers.
+func (d *readResponseDecoder) consumeFieldValue(fieldNum protowire.Number, fieldType protowire.Type) error {
+ // reimplement protowire.ConsumeFieldValue without the extra case for groups (which
+ // are are complicted and not a thing in proto3).
+ var err error
+ switch fieldType {
+ case protowire.VarintType:
+ _, err = d.consumeVarint()
+ case protowire.Fixed32Type:
+ _, err = d.consumeFixed32()
+ case protowire.Fixed64Type:
+ _, err = d.consumeFixed64()
+ case protowire.BytesType:
+ _, err = d.consumeBytes()
+ default:
+ return fmt.Errorf("unknown field type %v in field %v", fieldType, fieldNum)
+ }
if err != nil {
- return b, fmt.Errorf("invalid ReadObjectResponse.ChecksummedData: %v", err)
+ return fmt.Errorf("consuming field %v of type %v: %w", fieldNum, fieldType, err)
}
- content, err := readProtoBytes(checksummedData, checksummedDataContentField)
+
+ return nil
+}
+
+// Consume a bytes field from the input. Returns offsets for the data in the buffer slices
+// and an error.
+func (d *readResponseDecoder) consumeBytes() (bufferSliceOffsets, error) {
+ // m is the length of the data past the tag.
+ m, err := d.consumeVarint()
if err != nil {
- return content, fmt.Errorf("invalid ReadObjectResponse.ChecksummedData.Content: %v", err)
+ return bufferSliceOffsets{}, fmt.Errorf("consuming bytes field: %w", err)
+ }
+ offsets := bufferSliceOffsets{
+ startBuf: d.currBuf,
+ startOff: d.currOff,
+ currBuf: d.currBuf,
+ currOff: d.currOff,
}
- return content, nil
+ // Advance offsets to lengths of bytes field and capture where we end.
+ d.advanceOffset(m)
+ offsets.endBuf = d.currBuf
+ offsets.endOff = d.currOff
+ return offsets, nil
+}
+
+// Consume a bytes field from the input and copy into a new buffer if
+// necessary (if the data is split across buffers in databuf). This can be
+// used to leverage proto.Unmarshal for small bytes fields (i.e. anything
+// except object data).
+func (d *readResponseDecoder) consumeBytesCopy() ([]byte, error) {
+ // m is the length of the bytes data.
+ m, err := d.consumeVarint()
+ if err != nil {
+ return nil, fmt.Errorf("consuming varint: %w", err)
+ }
+ // Copy the data into a buffer and advance the offset
+ b := d.copyNextBytes(int(m))
+ if err := d.advanceOffset(m); err != nil {
+ return nil, fmt.Errorf("advancing offset: %w", err)
+ }
+ return b, nil
}
// readFullObjectResponse returns the ReadObjectResponse that is encoded in the
@@ -1746,21 +1877,17 @@ func readObjectResponseContent(b []byte) ([]byte, error) {
// This function is essentially identical to proto.Unmarshal, except it aliases
// the data in the input []byte. If the proto library adds a feature to
// Unmarshal that does that, this function can be dropped.
-func readFullObjectResponse(b []byte) (*storagepb.ReadObjectResponse, error) {
+func (d *readResponseDecoder) readFullObjectResponse() error {
msg := &storagepb.ReadObjectResponse{}
// Loop over the entire message, extracting fields as we go. This does not
// handle field concatenation, in which the contents of a single field
// are split across multiple protobuf tags.
- off := 0
- for off < len(b) {
- // Consume the next tag. This will tell us which field is next in the
- // buffer, its type, and how much space it takes up.
- fieldNum, fieldType, fieldLength := protowire.ConsumeTag(b[off:])
- if fieldLength < 0 {
- return nil, protowire.ParseError(fieldLength)
+ for d.off < uint64(d.databufs.Len()) {
+ fieldNum, fieldType, err := d.consumeTag()
+ if err != nil {
+ return fmt.Errorf("consuming next tag: %w", err)
}
- off += fieldLength
// Unmarshal the field according to its type. Only fields that are not
// nil will be present.
@@ -1769,142 +1896,95 @@ func readFullObjectResponse(b []byte) (*storagepb.ReadObjectResponse, error) {
// The ChecksummedData field was found. Initialize the struct.
msg.ChecksummedData = &storagepb.ChecksummedData{}
- // Get the bytes corresponding to the checksummed data.
- fieldContent, n := protowire.ConsumeBytes(b[off:])
- if n < 0 {
- return nil, fmt.Errorf("invalid ReadObjectResponse.ChecksummedData: %v", protowire.ParseError(n))
+ bytesFieldLen, err := d.consumeVarint()
+ if err != nil {
+ return fmt.Errorf("consuming bytes: %v", err)
}
- off += n
-
- // Get the nested fields. We need to do this manually as it contains
- // the object content bytes.
- contentOff := 0
- for contentOff < len(fieldContent) {
- gotNum, gotTyp, n := protowire.ConsumeTag(fieldContent[contentOff:])
- if n < 0 {
- return nil, protowire.ParseError(n)
+
+ var contentEndOff = d.off + bytesFieldLen
+ for d.off < contentEndOff {
+ gotNum, gotTyp, err := d.consumeTag()
+ if err != nil {
+ return fmt.Errorf("consuming checksummedData tag: %w", err)
}
- contentOff += n
switch {
case gotNum == checksummedDataContentField && gotTyp == protowire.BytesType:
- // Get the content bytes.
- bytes, n := protowire.ConsumeBytes(fieldContent[contentOff:])
- if n < 0 {
- return nil, fmt.Errorf("invalid ReadObjectResponse.ChecksummedData.Content: %v", protowire.ParseError(n))
+ // Get the offsets of the content bytes.
+ d.dataOffsets, err = d.consumeBytes()
+ if err != nil {
+ return fmt.Errorf("invalid ReadObjectResponse.ChecksummedData.Content: %w", err)
}
- msg.ChecksummedData.Content = bytes
- contentOff += n
case gotNum == checksummedDataCRC32CField && gotTyp == protowire.Fixed32Type:
- v, n := protowire.ConsumeFixed32(fieldContent[contentOff:])
- if n < 0 {
- return nil, fmt.Errorf("invalid ReadObjectResponse.ChecksummedData.Crc32C: %v", protowire.ParseError(n))
+ v, err := d.consumeFixed32()
+ if err != nil {
+ return fmt.Errorf("invalid ReadObjectResponse.ChecksummedData.Crc32C: %w", err)
}
msg.ChecksummedData.Crc32C = &v
- contentOff += n
default:
- n = protowire.ConsumeFieldValue(gotNum, gotTyp, fieldContent[contentOff:])
- if n < 0 {
- return nil, protowire.ParseError(n)
+ err := d.consumeFieldValue(gotNum, gotTyp)
+ if err != nil {
+ return fmt.Errorf("invalid field in ReadObjectResponse.ChecksummedData: %w", err)
}
- contentOff += n
}
}
case fieldNum == objectChecksumsField && fieldType == protowire.BytesType:
// The field was found. Initialize the struct.
msg.ObjectChecksums = &storagepb.ObjectChecksums{}
-
- // Get the bytes corresponding to the checksums.
- bytes, n := protowire.ConsumeBytes(b[off:])
- if n < 0 {
- return nil, fmt.Errorf("invalid ReadObjectResponse.ObjectChecksums: %v", protowire.ParseError(n))
+ // Consume the bytes and copy them into a single buffer if they are split across buffers.
+ buf, err := d.consumeBytesCopy()
+ if err != nil {
+ return fmt.Errorf("invalid ReadObjectResponse.ObjectChecksums: %v", err)
}
- off += n
-
// Unmarshal.
- if err := proto.Unmarshal(bytes, msg.ObjectChecksums); err != nil {
- return nil, err
+ if err := proto.Unmarshal(buf, msg.ObjectChecksums); err != nil {
+ return err
}
case fieldNum == contentRangeField && fieldType == protowire.BytesType:
msg.ContentRange = &storagepb.ContentRange{}
-
- bytes, n := protowire.ConsumeBytes(b[off:])
- if n < 0 {
- return nil, fmt.Errorf("invalid ReadObjectResponse.ContentRange: %v", protowire.ParseError(n))
+ buf, err := d.consumeBytesCopy()
+ if err != nil {
+ return fmt.Errorf("invalid ReadObjectResponse.ContentRange: %v", err)
}
- off += n
-
- if err := proto.Unmarshal(bytes, msg.ContentRange); err != nil {
- return nil, err
+ if err := proto.Unmarshal(buf, msg.ContentRange); err != nil {
+ return err
}
case fieldNum == metadataField && fieldType == protowire.BytesType:
msg.Metadata = &storagepb.Object{}
- bytes, n := protowire.ConsumeBytes(b[off:])
- if n < 0 {
- return nil, fmt.Errorf("invalid ReadObjectResponse.Metadata: %v", protowire.ParseError(n))
+ buf, err := d.consumeBytesCopy()
+ if err != nil {
+ return fmt.Errorf("invalid ReadObjectResponse.Metadata: %v", err)
}
- off += n
- if err := proto.Unmarshal(bytes, msg.Metadata); err != nil {
- return nil, err
+ if err := proto.Unmarshal(buf, msg.Metadata); err != nil {
+ return err
}
default:
- fieldLength = protowire.ConsumeFieldValue(fieldNum, fieldType, b[off:])
- if fieldLength < 0 {
- return nil, fmt.Errorf("default: %v", protowire.ParseError(fieldLength))
- }
- off += fieldLength
- }
- }
-
- return msg, nil
-}
-
-// readProtoBytes returns the contents of the protobuf field with number num
-// and type bytes from a wire-encoded message. If the field cannot be found,
-// the returned slice will be nil and no error will be returned.
-//
-// It does not handle field concatenation, in which the contents of a single field
-// are split across multiple protobuf tags. Encoded data containing split fields
-// of this form is technically permissable, but uncommon.
-func readProtoBytes(b []byte, num protowire.Number) ([]byte, error) {
- off := 0
- for off < len(b) {
- gotNum, gotTyp, n := protowire.ConsumeTag(b[off:])
- if n < 0 {
- return nil, protowire.ParseError(n)
- }
- off += n
- if gotNum == num && gotTyp == protowire.BytesType {
- b, n := protowire.ConsumeBytes(b[off:])
- if n < 0 {
- return nil, protowire.ParseError(n)
+ err := d.consumeFieldValue(fieldNum, fieldType)
+ if err != nil {
+ return fmt.Errorf("invalid field in ReadObjectResponse: %w", err)
}
- return b, nil
}
- n = protowire.ConsumeFieldValue(gotNum, gotTyp, b[off:])
- if n < 0 {
- return nil, protowire.ParseError(n)
- }
- off += n
}
- return nil, nil
+ d.msg = msg
+ return nil
}
// reopenStream "closes" the existing stream and attempts to reopen a stream and
// sets the Reader's stream and cancelStream properties in the process.
-func (r *gRPCReader) reopenStream() (*storagepb.ReadObjectResponse, error) {
+func (r *gRPCReader) reopenStream() error {
// Close existing stream and initialize new stream with updated offset.
r.Close()
res, cancel, err := r.reopen(r.seen)
if err != nil {
- return nil, err
+ return err
}
r.stream = res.stream
+ r.currMsg = res.decoder
r.cancel = cancel
- return res.response, nil
+ return nil
}
func newGRPCWriter(c *grpcStorageClient, params *openWriterParams, r io.Reader) *gRPCWriter {
diff --git a/vendor/cloud.google.com/go/storage/grpc_dp.go b/vendor/cloud.google.com/go/storage/grpc_dp.go
new file mode 100644
index 0000000000000..d3422733497f5
--- /dev/null
+++ b/vendor/cloud.google.com/go/storage/grpc_dp.go
@@ -0,0 +1,22 @@
+//go:build !disable_grpc_modules
+
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package storage
+
+import (
+ _ "google.golang.org/grpc/balancer/rls"
+ _ "google.golang.org/grpc/xds/googledirectpath"
+)
diff --git a/vendor/cloud.google.com/go/storage/grpc_metrics.go b/vendor/cloud.google.com/go/storage/grpc_metrics.go
new file mode 100644
index 0000000000000..460a9d0a2b8f9
--- /dev/null
+++ b/vendor/cloud.google.com/go/storage/grpc_metrics.go
@@ -0,0 +1,275 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package storage
+
+import (
+ "context"
+ "fmt"
+ "log"
+ "strings"
+ "time"
+
+ mexporter "github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric"
+ "github.com/google/uuid"
+ "go.opentelemetry.io/contrib/detectors/gcp"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/metric"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+ "go.opentelemetry.io/otel/sdk/resource"
+ "google.golang.org/api/option"
+ "google.golang.org/api/transport"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/stats/opentelemetry"
+)
+
+const (
+ monitoredResourceName = "storage.googleapis.com/Client"
+ metricPrefix = "storage.googleapis.com/client/"
+)
+
+func latencyHistogramBoundaries() []float64 {
+ boundaries := []float64{}
+ boundary := 0.0
+ increment := 0.002
+ // 2ms buckets for first 100ms, so we can have higher resolution for uploads and downloads in the 100 KiB range
+ for i := 0; i < 50; i++ {
+ boundaries = append(boundaries, boundary)
+ // increment by 2ms
+ boundary += increment
+ }
+ // For the remaining buckets do 10 10ms, 10 20ms, and so on, up until 5 minutes
+ for i := 0; i < 150 && boundary < 300; i++ {
+ boundaries = append(boundaries, boundary)
+ if i != 0 && i%10 == 0 {
+ increment *= 2
+ }
+ boundary += increment
+ }
+ return boundaries
+}
+
+func sizeHistogramBoundaries() []float64 {
+ kb := 1024.0
+ mb := 1024.0 * kb
+ gb := 1024.0 * mb
+ boundaries := []float64{}
+ boundary := 0.0
+ increment := 128 * kb
+ // 128 KiB increments up to 4MiB, then exponential growth
+ for len(boundaries) < 200 && boundary <= 16*gb {
+ boundaries = append(boundaries, boundary)
+ boundary += increment
+ if boundary >= 4*mb {
+ increment *= 2
+ }
+ }
+ return boundaries
+}
+
+func metricFormatter(m metricdata.Metrics) string {
+ return metricPrefix + strings.ReplaceAll(string(m.Name), ".", "/")
+}
+
+func gcpAttributeExpectedDefaults() []attribute.KeyValue {
+ return []attribute.KeyValue{
+ {Key: "location", Value: attribute.StringValue("global")},
+ {Key: "cloud_platform", Value: attribute.StringValue("unknown")},
+ {Key: "host_id", Value: attribute.StringValue("unknown")}}
+}
+
+// Added to help with tests
+type preparedResource struct {
+ projectToUse string
+ resource *resource.Resource
+}
+
+func newPreparedResource(ctx context.Context, project string, resourceOptions []resource.Option) (*preparedResource, error) {
+ detectedAttrs, err := resource.New(ctx, resourceOptions...)
+ if err != nil {
+ return nil, err
+ }
+ preparedResource := &preparedResource{}
+ s := detectedAttrs.Set()
+ p, present := s.Value("cloud.account.id")
+ if present {
+ preparedResource.projectToUse = p.AsString()
+ } else {
+ preparedResource.projectToUse = project
+ }
+ updates := []attribute.KeyValue{}
+ for _, kv := range gcpAttributeExpectedDefaults() {
+ if val, present := s.Value(kv.Key); !present || val.AsString() == "" {
+ updates = append(updates, attribute.KeyValue{Key: kv.Key, Value: kv.Value})
+ }
+ }
+ r, err := resource.New(
+ ctx,
+ resource.WithAttributes(
+ attribute.KeyValue{Key: "gcp.resource_type", Value: attribute.StringValue(monitoredResourceName)},
+ attribute.KeyValue{Key: "instance_id", Value: attribute.StringValue(uuid.New().String())},
+ attribute.KeyValue{Key: "project_id", Value: attribute.StringValue(project)},
+ attribute.KeyValue{Key: "api", Value: attribute.StringValue("grpc")},
+ ),
+ resource.WithAttributes(detectedAttrs.Attributes()...),
+ // Last duplicate key / value wins
+ resource.WithAttributes(updates...),
+ )
+ if err != nil {
+ return nil, err
+ }
+ preparedResource.resource = r
+ return preparedResource, nil
+}
+
+type metricsContext struct {
+ // project used by exporter
+ project string
+ // client options passed to gRPC channels
+ clientOpts []option.ClientOption
+ // instance of metric reader used by gRPC client-side metrics
+ provider *metric.MeterProvider
+ // clean func to call when closing gRPC client
+ close func()
+}
+
+func createHistogramView(name string, boundaries []float64) metric.View {
+ return metric.NewView(metric.Instrument{
+ Name: name,
+ Kind: metric.InstrumentKindHistogram,
+ }, metric.Stream{
+ Name: name,
+ Aggregation: metric.AggregationExplicitBucketHistogram{Boundaries: boundaries},
+ })
+}
+
+func newGRPCMetricContext(ctx context.Context, project string) (*metricsContext, error) {
+ preparedResource, err := newPreparedResource(ctx, project, []resource.Option{resource.WithDetectors(gcp.NewDetector())})
+ if err != nil {
+ return nil, err
+ }
+ // Implementation requires a project, if one is not determined possibly user
+ // credentials. Then we will fail stating gRPC Metrics require a project-id.
+ if project == "" && preparedResource.projectToUse != "" {
+ return nil, fmt.Errorf("google cloud project is required to start client-side metrics")
+ }
+ // If projectTouse isn't the same as project provided to Storage client, then
+ // emit a log stating which project is being used to emit metrics to.
+ if project != preparedResource.projectToUse {
+ log.Printf("The Project ID configured for metrics is %s, but the Project ID of the storage client is %s. Make sure that the service account in use has the required metric writing role (roles/monitoring.metricWriter) in the project projectIdToUse or metrics will not be written.", preparedResource.projectToUse, project)
+ }
+ meOpts := []mexporter.Option{
+ mexporter.WithProjectID(preparedResource.projectToUse),
+ mexporter.WithMetricDescriptorTypeFormatter(metricFormatter),
+ mexporter.WithCreateServiceTimeSeries(),
+ mexporter.WithMonitoredResourceDescription(monitoredResourceName, []string{"project_id", "location", "cloud_platform", "host_id", "instance_id", "api"})}
+ exporter, err := mexporter.New(meOpts...)
+ if err != nil {
+ return nil, err
+ }
+ // Metric views update histogram boundaries to be relevant to GCS
+ // otherwise default OTel histogram boundaries are used.
+ metricViews := []metric.View{
+ createHistogramView("grpc.client.attempt.duration", latencyHistogramBoundaries()),
+ createHistogramView("grpc.client.attempt.rcvd_total_compressed_message_size", sizeHistogramBoundaries()),
+ createHistogramView("grpc.client.attempt.sent_total_compressed_message_size", sizeHistogramBoundaries()),
+ }
+ provider := metric.NewMeterProvider(
+ metric.WithReader(metric.NewPeriodicReader(&exporterLogSuppressor{exporter: exporter}, metric.WithInterval(time.Minute))),
+ metric.WithResource(preparedResource.resource),
+ metric.WithView(metricViews...),
+ )
+ mo := opentelemetry.MetricsOptions{
+ MeterProvider: provider,
+ Metrics: opentelemetry.DefaultMetrics().Add(
+ "grpc.lb.wrr.rr_fallback",
+ "grpc.lb.wrr.endpoint_weight_not_yet_usable",
+ "grpc.lb.wrr.endpoint_weight_stale",
+ "grpc.lb.wrr.endpoint_weights",
+ "grpc.lb.rls.cache_entries",
+ "grpc.lb.rls.cache_size",
+ "grpc.lb.rls.default_target_picks",
+ "grpc.lb.rls.target_picks",
+ "grpc.lb.rls.failed_picks"),
+ OptionalLabels: []string{"grpc.lb.locality"},
+ }
+ opts := []option.ClientOption{
+ option.WithGRPCDialOption(opentelemetry.DialOption(opentelemetry.Options{MetricsOptions: mo})),
+ option.WithGRPCDialOption(grpc.WithDefaultCallOptions(grpc.StaticMethodCallOption{})),
+ }
+ context := &metricsContext{
+ project: preparedResource.projectToUse,
+ clientOpts: opts,
+ provider: provider,
+ close: createShutdown(ctx, provider),
+ }
+ return context, nil
+}
+
+func enableClientMetrics(ctx context.Context, s *settings) (*metricsContext, error) {
+ var project string
+ c, err := transport.Creds(ctx, s.clientOption...)
+ if err == nil {
+ project = c.ProjectID
+ }
+ // Enable client-side metrics for gRPC
+ metricsContext, err := newGRPCMetricContext(ctx, project)
+ if err != nil {
+ return nil, fmt.Errorf("gRPC Metrics: %w", err)
+ }
+ return metricsContext, nil
+}
+
+func createShutdown(ctx context.Context, provider *metric.MeterProvider) func() {
+ return func() {
+ provider.Shutdown(ctx)
+ }
+}
+
+// Silences permission errors after initial error is emitted to prevent
+// chatty logs.
+type exporterLogSuppressor struct {
+ exporter metric.Exporter
+ emittedFailure bool
+}
+
+// Implements OTel SDK metric.Exporter interface to prevent noisy logs from
+// lack of credentials after initial failure.
+// https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric@v1.28.0#Exporter
+func (e *exporterLogSuppressor) Export(ctx context.Context, rm *metricdata.ResourceMetrics) error {
+ if err := e.exporter.Export(ctx, rm); err != nil && !e.emittedFailure {
+ if strings.Contains(err.Error(), "PermissionDenied") {
+ e.emittedFailure = true
+ return fmt.Errorf("gRPC metrics failed due permission issue: %w", err)
+ }
+ return err
+ }
+ return nil
+}
+
+func (e *exporterLogSuppressor) Temporality(k metric.InstrumentKind) metricdata.Temporality {
+ return e.exporter.Temporality(k)
+}
+
+func (e *exporterLogSuppressor) Aggregation(k metric.InstrumentKind) metric.Aggregation {
+ return e.exporter.Aggregation(k)
+}
+
+func (e *exporterLogSuppressor) ForceFlush(ctx context.Context) error {
+ return e.exporter.ForceFlush(ctx)
+}
+
+func (e *exporterLogSuppressor) Shutdown(ctx context.Context) error {
+ return e.exporter.Shutdown(ctx)
+}
diff --git a/vendor/cloud.google.com/go/storage/hmac.go b/vendor/cloud.google.com/go/storage/hmac.go
index f7811a5d140f0..2387fd33c703f 100644
--- a/vendor/cloud.google.com/go/storage/hmac.go
+++ b/vendor/cloud.google.com/go/storage/hmac.go
@@ -20,7 +20,6 @@ import (
"fmt"
"time"
- "cloud.google.com/go/storage/internal/apiv2/storagepb"
"google.golang.org/api/iterator"
raw "google.golang.org/api/storage/v1"
)
@@ -103,6 +102,7 @@ func (c *Client) HMACKeyHandle(projectID, accessID string) *HMACKeyHandle {
//
// Options such as UserProjectForHMACKeys can be used to set the
// userProject to be billed against for operations.
+// Note: gRPC is not supported.
func (hkh *HMACKeyHandle) Get(ctx context.Context, opts ...HMACKeyOption) (*HMACKey, error) {
desc := new(hmacKeyDesc)
for _, opt := range opts {
@@ -118,6 +118,7 @@ func (hkh *HMACKeyHandle) Get(ctx context.Context, opts ...HMACKeyOption) (*HMAC
// Delete invokes an RPC to delete the key referenced by accessID, on Google Cloud Storage.
// Only inactive HMAC keys can be deleted.
// After deletion, a key cannot be used to authenticate requests.
+// Note: gRPC is not supported.
func (hkh *HMACKeyHandle) Delete(ctx context.Context, opts ...HMACKeyOption) error {
desc := new(hmacKeyDesc)
for _, opt := range opts {
@@ -158,23 +159,8 @@ func toHMACKeyFromRaw(hk *raw.HmacKey, updatedTimeCanBeNil bool) (*HMACKey, erro
return hmKey, nil
}
-func toHMACKeyFromProto(pbmd *storagepb.HmacKeyMetadata) *HMACKey {
- if pbmd == nil {
- return nil
- }
-
- return &HMACKey{
- AccessID: pbmd.GetAccessId(),
- ID: pbmd.GetId(),
- State: HMACState(pbmd.GetState()),
- ProjectID: pbmd.GetProject(),
- CreatedTime: convertProtoTime(pbmd.GetCreateTime()),
- UpdatedTime: convertProtoTime(pbmd.GetUpdateTime()),
- ServiceAccountEmail: pbmd.GetServiceAccountEmail(),
- }
-}
-
// CreateHMACKey invokes an RPC for Google Cloud Storage to create a new HMACKey.
+// Note: gRPC is not supported.
func (c *Client) CreateHMACKey(ctx context.Context, projectID, serviceAccountEmail string, opts ...HMACKeyOption) (*HMACKey, error) {
if projectID == "" {
return nil, errors.New("storage: expecting a non-blank projectID")
@@ -203,6 +189,7 @@ type HMACKeyAttrsToUpdate struct {
}
// Update mutates the HMACKey referred to by accessID.
+// Note: gRPC is not supported.
func (h *HMACKeyHandle) Update(ctx context.Context, au HMACKeyAttrsToUpdate, opts ...HMACKeyOption) (*HMACKey, error) {
if au.State != Active && au.State != Inactive {
return nil, fmt.Errorf("storage: invalid state %q for update, must be either %q or %q", au.State, Active, Inactive)
@@ -237,6 +224,7 @@ type HMACKeysIterator struct {
// ListHMACKeys returns an iterator for listing HMACKeys.
//
// Note: This iterator is not safe for concurrent operations without explicit synchronization.
+// Note: gRPC is not supported.
func (c *Client) ListHMACKeys(ctx context.Context, projectID string, opts ...HMACKeyOption) *HMACKeysIterator {
desc := new(hmacKeyDesc)
for _, opt := range opts {
diff --git a/vendor/cloud.google.com/go/storage/http_client.go b/vendor/cloud.google.com/go/storage/http_client.go
index 0e213a6632a3e..82d9566b9f36d 100644
--- a/vendor/cloud.google.com/go/storage/http_client.go
+++ b/vendor/cloud.google.com/go/storage/http_client.go
@@ -857,14 +857,7 @@ func (c *httpStorageClient) newRangeReaderXML(ctx context.Context, params *newRa
reopen := readerReopen(ctx, req.Header, params, s,
func(ctx context.Context) (*http.Response, error) {
- // Set custom headers passed in via the context. This is only required for XML;
- // for gRPC & JSON this is handled in the GAPIC and Apiary layers respectively.
- ctxHeaders := callctx.HeadersFromContext(ctx)
- for k, vals := range ctxHeaders {
- for _, v := range vals {
- req.Header.Set(k, v)
- }
- }
+ setHeadersFromCtx(ctx, req.Header)
return c.hc.Do(req.WithContext(ctx))
},
func() error { return setConditionsHeaders(req.Header, params.conds) },
@@ -1422,18 +1415,20 @@ func parseReadResponse(res *http.Response, params *newRangeReaderParams, reopen
}
} else {
size = res.ContentLength
- // Check the CRC iff all of the following hold:
- // - We asked for content (length != 0).
- // - We got all the content (status != PartialContent).
- // - The server sent a CRC header.
- // - The Go http stack did not uncompress the file.
- // - We were not served compressed data that was uncompressed on download.
- // The problem with the last two cases is that the CRC will not match -- GCS
- // computes it on the compressed contents, but we compute it on the
- // uncompressed contents.
- if params.length != 0 && !res.Uncompressed && !uncompressedByServer(res) {
- crc, checkCRC = parseCRC32c(res)
- }
+ }
+
+ // Check the CRC iff all of the following hold:
+ // - We asked for content (length != 0).
+ // - We got all the content (status != PartialContent).
+ // - The server sent a CRC header.
+ // - The Go http stack did not uncompress the file.
+ // - We were not served compressed data that was uncompressed on download.
+ // The problem with the last two cases is that the CRC will not match -- GCS
+ // computes it on the compressed contents, but we compute it on the
+ // uncompressed contents.
+ crc, checkCRC = parseCRC32c(res)
+ if params.length == 0 || res.StatusCode == http.StatusPartialContent || res.Uncompressed || uncompressedByServer(res) {
+ checkCRC = false
}
remain := res.ContentLength
@@ -1470,6 +1465,8 @@ func parseReadResponse(res *http.Response, params *newRangeReaderParams, reopen
StartOffset: startOffset,
Generation: params.gen,
Metageneration: metaGen,
+ CRC32C: crc,
+ Decompressed: res.Uncompressed || uncompressedByServer(res),
}
return &Reader{
Attrs: attrs,
@@ -1484,3 +1481,30 @@ func parseReadResponse(res *http.Response, params *newRangeReaderParams, reopen
},
}, nil
}
+
+// setHeadersFromCtx sets custom headers passed in via the context on the header,
+// replacing any header with the same key (which avoids duplicating invocation headers).
+// This is only required for XML; for gRPC & JSON requests this is handled in
+// the GAPIC and Apiary layers respectively.
+func setHeadersFromCtx(ctx context.Context, header http.Header) {
+ ctxHeaders := callctx.HeadersFromContext(ctx)
+ for k, vals := range ctxHeaders {
+ // Merge x-goog-api-client values into a single space-separated value.
+ if strings.EqualFold(k, xGoogHeaderKey) {
+ alreadySetValues := header.Values(xGoogHeaderKey)
+ vals = append(vals, alreadySetValues...)
+
+ if len(vals) > 0 {
+ xGoogHeader := vals[0]
+ for _, v := range vals[1:] {
+ xGoogHeader = strings.Join([]string{xGoogHeader, v}, " ")
+ }
+ header.Set(k, xGoogHeader)
+ }
+ } else {
+ for _, v := range vals {
+ header.Set(k, v)
+ }
+ }
+ }
+}
diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary_go123.go b/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary_go123.go
new file mode 100644
index 0000000000000..f2822035c41b4
--- /dev/null
+++ b/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary_go123.go
@@ -0,0 +1,50 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
+
+//go:build go1.23
+
+package storage
+
+import (
+ "iter"
+
+ storagepb "cloud.google.com/go/storage/internal/apiv2/storagepb"
+ "github.com/googleapis/gax-go/v2/iterator"
+)
+
+// All returns an iterator. If an error is returned by the iterator, the
+// iterator will stop after that iteration.
+func (it *BucketIterator) All() iter.Seq2[*storagepb.Bucket, error] {
+ return iterator.RangeAdapter(it.Next)
+}
+
+// All returns an iterator. If an error is returned by the iterator, the
+// iterator will stop after that iteration.
+func (it *HmacKeyMetadataIterator) All() iter.Seq2[*storagepb.HmacKeyMetadata, error] {
+ return iterator.RangeAdapter(it.Next)
+}
+
+// All returns an iterator. If an error is returned by the iterator, the
+// iterator will stop after that iteration.
+func (it *NotificationConfigIterator) All() iter.Seq2[*storagepb.NotificationConfig, error] {
+ return iterator.RangeAdapter(it.Next)
+}
+
+// All returns an iterator. If an error is returned by the iterator, the
+// iterator will stop after that iteration.
+func (it *ObjectIterator) All() iter.Seq2[*storagepb.Object, error] {
+ return iterator.RangeAdapter(it.Next)
+}
diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go b/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go
index 5e2a8f0ad5bec..869f3b1fbcd73 100644
--- a/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go
+++ b/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go
@@ -17,19 +17,15 @@
// Package storage is an auto-generated package for the
// Cloud Storage API.
//
-// Stop. This folder is likely not what you are looking for. This folder
-// contains protocol buffer definitions for an API only accessible to select
-// customers. Customers not participating should not depend on this file.
-// Please contact Google Cloud sales if you are interested. Unless told
-// otherwise by a Google Cloud representative, do not use or otherwise rely
-// on any of the contents of this folder. If you would like to use Cloud
-// Storage, please consult our official documentation (at
+// This folder contains protocol buffer definitions for an API only
+// accessible to select customers. Customers not participating should not
+// depend on this file. Please contact Google Cloud sales if you are
+// interested. Unless told otherwise by a Google Cloud representative, do not
+// use or otherwise rely on any of the contents of this folder. If you would
+// like to use Cloud Storage, please consult our official documentation (at
// https://cloud.google.com/storage/docs/apis) for details on our XML and
// JSON APIs, or else consider one of our client libraries (at
-// https://cloud.google.com/storage/docs/reference/libraries). This API
-// defined in this folder is unreleased and may shut off, break, or fail at
-// any time for any users who are not registered as a part of a private
-// preview program.
+// https://cloud.google.com/storage/docs/reference/libraries).
//
// # General documentation
//
diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go b/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go
index 82ec5db902b93..5611f1e9e7095 100644
--- a/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go
+++ b/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go
@@ -50,10 +50,6 @@ type CallOptions struct {
SetIamPolicy []gax.CallOption
TestIamPermissions []gax.CallOption
UpdateBucket []gax.CallOption
- DeleteNotificationConfig []gax.CallOption
- GetNotificationConfig []gax.CallOption
- CreateNotificationConfig []gax.CallOption
- ListNotificationConfigs []gax.CallOption
ComposeObject []gax.CallOption
DeleteObject []gax.CallOption
RestoreObject []gax.CallOption
@@ -73,6 +69,10 @@ type CallOptions struct {
GetHmacKey []gax.CallOption
ListHmacKeys []gax.CallOption
UpdateHmacKey []gax.CallOption
+ DeleteNotificationConfig []gax.CallOption
+ GetNotificationConfig []gax.CallOption
+ CreateNotificationConfig []gax.CallOption
+ ListNotificationConfigs []gax.CallOption
}
func defaultGRPCClientOptions() []option.ClientOption {
@@ -84,6 +84,7 @@ func defaultGRPCClientOptions() []option.ClientOption {
internaloption.WithDefaultAudience("https://storage.googleapis.com/"),
internaloption.WithDefaultScopes(DefaultAuthScopes()...),
internaloption.EnableJwtWithScope(),
+ internaloption.EnableNewAuthLibrary(),
option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
grpc.MaxCallRecvMsgSize(math.MaxInt32))),
}
@@ -208,7 +209,7 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- DeleteNotificationConfig: []gax.CallOption{
+ ComposeObject: []gax.CallOption{
gax.WithTimeout(60000 * time.Millisecond),
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
@@ -221,7 +222,7 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- GetNotificationConfig: []gax.CallOption{
+ DeleteObject: []gax.CallOption{
gax.WithTimeout(60000 * time.Millisecond),
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
@@ -234,7 +235,7 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- CreateNotificationConfig: []gax.CallOption{
+ RestoreObject: []gax.CallOption{
gax.WithTimeout(60000 * time.Millisecond),
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
@@ -247,7 +248,7 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- ListNotificationConfigs: []gax.CallOption{
+ CancelResumableWrite: []gax.CallOption{
gax.WithTimeout(60000 * time.Millisecond),
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
@@ -260,7 +261,7 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- ComposeObject: []gax.CallOption{
+ GetObject: []gax.CallOption{
gax.WithTimeout(60000 * time.Millisecond),
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
@@ -273,8 +274,7 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- DeleteObject: []gax.CallOption{
- gax.WithTimeout(60000 * time.Millisecond),
+ ReadObject: []gax.CallOption{
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
@@ -286,7 +286,7 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- RestoreObject: []gax.CallOption{
+ UpdateObject: []gax.CallOption{
gax.WithTimeout(60000 * time.Millisecond),
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
@@ -299,8 +299,7 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- CancelResumableWrite: []gax.CallOption{
- gax.WithTimeout(60000 * time.Millisecond),
+ WriteObject: []gax.CallOption{
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
@@ -312,8 +311,7 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- GetObject: []gax.CallOption{
- gax.WithTimeout(60000 * time.Millisecond),
+ BidiWriteObject: []gax.CallOption{
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
@@ -325,7 +323,8 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- ReadObject: []gax.CallOption{
+ ListObjects: []gax.CallOption{
+ gax.WithTimeout(60000 * time.Millisecond),
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
@@ -337,7 +336,7 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- UpdateObject: []gax.CallOption{
+ RewriteObject: []gax.CallOption{
gax.WithTimeout(60000 * time.Millisecond),
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
@@ -350,7 +349,8 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- WriteObject: []gax.CallOption{
+ StartResumableWrite: []gax.CallOption{
+ gax.WithTimeout(60000 * time.Millisecond),
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
@@ -362,7 +362,8 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- BidiWriteObject: []gax.CallOption{
+ QueryWriteStatus: []gax.CallOption{
+ gax.WithTimeout(60000 * time.Millisecond),
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
@@ -374,7 +375,7 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- ListObjects: []gax.CallOption{
+ GetServiceAccount: []gax.CallOption{
gax.WithTimeout(60000 * time.Millisecond),
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
@@ -387,7 +388,7 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- RewriteObject: []gax.CallOption{
+ CreateHmacKey: []gax.CallOption{
gax.WithTimeout(60000 * time.Millisecond),
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
@@ -400,7 +401,7 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- StartResumableWrite: []gax.CallOption{
+ DeleteHmacKey: []gax.CallOption{
gax.WithTimeout(60000 * time.Millisecond),
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
@@ -413,7 +414,7 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- QueryWriteStatus: []gax.CallOption{
+ GetHmacKey: []gax.CallOption{
gax.WithTimeout(60000 * time.Millisecond),
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
@@ -426,7 +427,7 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- GetServiceAccount: []gax.CallOption{
+ ListHmacKeys: []gax.CallOption{
gax.WithTimeout(60000 * time.Millisecond),
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
@@ -439,7 +440,7 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- CreateHmacKey: []gax.CallOption{
+ UpdateHmacKey: []gax.CallOption{
gax.WithTimeout(60000 * time.Millisecond),
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
@@ -452,7 +453,7 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- DeleteHmacKey: []gax.CallOption{
+ DeleteNotificationConfig: []gax.CallOption{
gax.WithTimeout(60000 * time.Millisecond),
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
@@ -465,7 +466,7 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- GetHmacKey: []gax.CallOption{
+ GetNotificationConfig: []gax.CallOption{
gax.WithTimeout(60000 * time.Millisecond),
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
@@ -478,7 +479,7 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- ListHmacKeys: []gax.CallOption{
+ CreateNotificationConfig: []gax.CallOption{
gax.WithTimeout(60000 * time.Millisecond),
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
@@ -491,7 +492,7 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- UpdateHmacKey: []gax.CallOption{
+ ListNotificationConfigs: []gax.CallOption{
gax.WithTimeout(60000 * time.Millisecond),
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
@@ -521,10 +522,6 @@ type internalClient interface {
SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error)
TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest, ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error)
UpdateBucket(context.Context, *storagepb.UpdateBucketRequest, ...gax.CallOption) (*storagepb.Bucket, error)
- DeleteNotificationConfig(context.Context, *storagepb.DeleteNotificationConfigRequest, ...gax.CallOption) error
- GetNotificationConfig(context.Context, *storagepb.GetNotificationConfigRequest, ...gax.CallOption) (*storagepb.NotificationConfig, error)
- CreateNotificationConfig(context.Context, *storagepb.CreateNotificationConfigRequest, ...gax.CallOption) (*storagepb.NotificationConfig, error)
- ListNotificationConfigs(context.Context, *storagepb.ListNotificationConfigsRequest, ...gax.CallOption) *NotificationConfigIterator
ComposeObject(context.Context, *storagepb.ComposeObjectRequest, ...gax.CallOption) (*storagepb.Object, error)
DeleteObject(context.Context, *storagepb.DeleteObjectRequest, ...gax.CallOption) error
RestoreObject(context.Context, *storagepb.RestoreObjectRequest, ...gax.CallOption) (*storagepb.Object, error)
@@ -544,6 +541,10 @@ type internalClient interface {
GetHmacKey(context.Context, *storagepb.GetHmacKeyRequest, ...gax.CallOption) (*storagepb.HmacKeyMetadata, error)
ListHmacKeys(context.Context, *storagepb.ListHmacKeysRequest, ...gax.CallOption) *HmacKeyMetadataIterator
UpdateHmacKey(context.Context, *storagepb.UpdateHmacKeyRequest, ...gax.CallOption) (*storagepb.HmacKeyMetadata, error)
+ DeleteNotificationConfig(context.Context, *storagepb.DeleteNotificationConfigRequest, ...gax.CallOption) error
+ GetNotificationConfig(context.Context, *storagepb.GetNotificationConfigRequest, ...gax.CallOption) (*storagepb.NotificationConfig, error)
+ CreateNotificationConfig(context.Context, *storagepb.CreateNotificationConfigRequest, ...gax.CallOption) (*storagepb.NotificationConfig, error)
+ ListNotificationConfigs(context.Context, *storagepb.ListNotificationConfigsRequest, ...gax.CallOption) *NotificationConfigIterator
}
// Client is a client for interacting with Cloud Storage API.
@@ -641,11 +642,13 @@ func (c *Client) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyReques
return c.internalClient.SetIamPolicy(ctx, req, opts...)
}
-// TestIamPermissions tests a set of permissions on the given bucket or object to see which, if
-// any, are held by the caller.
+// TestIamPermissions tests a set of permissions on the given bucket, object, or managed folder
+// to see which, if any, are held by the caller.
// The resource field in the request should be
-// projects/_/buckets/{bucket} for a bucket or
-// projects/_/buckets/{bucket}/objects/{object} for an object.
+// projects/_/buckets/{bucket} for a bucket,
+// projects/_/buckets/{bucket}/objects/{object} for an object, or
+// projects/_/buckets/{bucket}/managedFolders/{managedFolder}
+// for a managed folder.
func (c *Client) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) {
return c.internalClient.TestIamPermissions(ctx, req, opts...)
}
@@ -655,29 +658,6 @@ func (c *Client) UpdateBucket(ctx context.Context, req *storagepb.UpdateBucketRe
return c.internalClient.UpdateBucket(ctx, req, opts...)
}
-// DeleteNotificationConfig permanently deletes a NotificationConfig.
-func (c *Client) DeleteNotificationConfig(ctx context.Context, req *storagepb.DeleteNotificationConfigRequest, opts ...gax.CallOption) error {
- return c.internalClient.DeleteNotificationConfig(ctx, req, opts...)
-}
-
-// GetNotificationConfig view a NotificationConfig.
-func (c *Client) GetNotificationConfig(ctx context.Context, req *storagepb.GetNotificationConfigRequest, opts ...gax.CallOption) (*storagepb.NotificationConfig, error) {
- return c.internalClient.GetNotificationConfig(ctx, req, opts...)
-}
-
-// CreateNotificationConfig creates a NotificationConfig for a given bucket.
-// These NotificationConfigs, when triggered, publish messages to the
-// specified Pub/Sub topics. See
-// https://cloud.google.com/storage/docs/pubsub-notifications (at https://cloud.google.com/storage/docs/pubsub-notifications).
-func (c *Client) CreateNotificationConfig(ctx context.Context, req *storagepb.CreateNotificationConfigRequest, opts ...gax.CallOption) (*storagepb.NotificationConfig, error) {
- return c.internalClient.CreateNotificationConfig(ctx, req, opts...)
-}
-
-// ListNotificationConfigs retrieves a list of NotificationConfigs for a given bucket.
-func (c *Client) ListNotificationConfigs(ctx context.Context, req *storagepb.ListNotificationConfigsRequest, opts ...gax.CallOption) *NotificationConfigIterator {
- return c.internalClient.ListNotificationConfigs(ctx, req, opts...)
-}
-
// ComposeObject concatenates a list of existing objects into a new object in the same
// bucket.
func (c *Client) ComposeObject(ctx context.Context, req *storagepb.ComposeObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) {
@@ -849,35 +829,78 @@ func (c *Client) QueryWriteStatus(ctx context.Context, req *storagepb.QueryWrite
}
// GetServiceAccount retrieves the name of a project’s Google Cloud Storage service account.
+//
+// Deprecated: GetServiceAccount may be removed in a future version.
func (c *Client) GetServiceAccount(ctx context.Context, req *storagepb.GetServiceAccountRequest, opts ...gax.CallOption) (*storagepb.ServiceAccount, error) {
return c.internalClient.GetServiceAccount(ctx, req, opts...)
}
// CreateHmacKey creates a new HMAC key for the given service account.
+//
+// Deprecated: CreateHmacKey may be removed in a future version.
func (c *Client) CreateHmacKey(ctx context.Context, req *storagepb.CreateHmacKeyRequest, opts ...gax.CallOption) (*storagepb.CreateHmacKeyResponse, error) {
return c.internalClient.CreateHmacKey(ctx, req, opts...)
}
// DeleteHmacKey deletes a given HMAC key. Key must be in an INACTIVE state.
+//
+// Deprecated: DeleteHmacKey may be removed in a future version.
func (c *Client) DeleteHmacKey(ctx context.Context, req *storagepb.DeleteHmacKeyRequest, opts ...gax.CallOption) error {
return c.internalClient.DeleteHmacKey(ctx, req, opts...)
}
// GetHmacKey gets an existing HMAC key metadata for the given id.
+//
+// Deprecated: GetHmacKey may be removed in a future version.
func (c *Client) GetHmacKey(ctx context.Context, req *storagepb.GetHmacKeyRequest, opts ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) {
return c.internalClient.GetHmacKey(ctx, req, opts...)
}
// ListHmacKeys lists HMAC keys under a given project with the additional filters provided.
+//
+// Deprecated: ListHmacKeys may be removed in a future version.
func (c *Client) ListHmacKeys(ctx context.Context, req *storagepb.ListHmacKeysRequest, opts ...gax.CallOption) *HmacKeyMetadataIterator {
return c.internalClient.ListHmacKeys(ctx, req, opts...)
}
// UpdateHmacKey updates a given HMAC key state between ACTIVE and INACTIVE.
+//
+// Deprecated: UpdateHmacKey may be removed in a future version.
func (c *Client) UpdateHmacKey(ctx context.Context, req *storagepb.UpdateHmacKeyRequest, opts ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) {
return c.internalClient.UpdateHmacKey(ctx, req, opts...)
}
+// DeleteNotificationConfig permanently deletes a NotificationConfig.
+//
+// Deprecated: DeleteNotificationConfig may be removed in a future version.
+func (c *Client) DeleteNotificationConfig(ctx context.Context, req *storagepb.DeleteNotificationConfigRequest, opts ...gax.CallOption) error {
+ return c.internalClient.DeleteNotificationConfig(ctx, req, opts...)
+}
+
+// GetNotificationConfig view a NotificationConfig.
+//
+// Deprecated: GetNotificationConfig may be removed in a future version.
+func (c *Client) GetNotificationConfig(ctx context.Context, req *storagepb.GetNotificationConfigRequest, opts ...gax.CallOption) (*storagepb.NotificationConfig, error) {
+ return c.internalClient.GetNotificationConfig(ctx, req, opts...)
+}
+
+// CreateNotificationConfig creates a NotificationConfig for a given bucket.
+// These NotificationConfigs, when triggered, publish messages to the
+// specified Pub/Sub topics. See
+// https://cloud.google.com/storage/docs/pubsub-notifications (at https://cloud.google.com/storage/docs/pubsub-notifications).
+//
+// Deprecated: CreateNotificationConfig may be removed in a future version.
+func (c *Client) CreateNotificationConfig(ctx context.Context, req *storagepb.CreateNotificationConfigRequest, opts ...gax.CallOption) (*storagepb.NotificationConfig, error) {
+ return c.internalClient.CreateNotificationConfig(ctx, req, opts...)
+}
+
+// ListNotificationConfigs retrieves a list of NotificationConfigs for a given bucket.
+//
+// Deprecated: ListNotificationConfigs may be removed in a future version.
+func (c *Client) ListNotificationConfigs(ctx context.Context, req *storagepb.ListNotificationConfigsRequest, opts ...gax.CallOption) *NotificationConfigIterator {
+ return c.internalClient.ListNotificationConfigs(ctx, req, opts...)
+}
+
// gRPCClient is a client for interacting with Cloud Storage API over gRPC transport.
//
// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
@@ -1198,6 +1221,9 @@ func (c *gRPCClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamP
if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)/objects(?:/.*)?"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 {
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])
}
+ if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)/managedFolders(?:/.*)?"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 {
+ routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])
+ }
for headerName, headerValue := range routingHeadersMap {
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
}
@@ -1246,138 +1272,6 @@ func (c *gRPCClient) UpdateBucket(ctx context.Context, req *storagepb.UpdateBuck
return resp, nil
}
-func (c *gRPCClient) DeleteNotificationConfig(ctx context.Context, req *storagepb.DeleteNotificationConfigRequest, opts ...gax.CallOption) error {
- routingHeaders := ""
- routingHeadersMap := make(map[string]string)
- if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)(?:/.*)?"); reg.MatchString(req.GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])) > 0 {
- routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])
- }
- for headerName, headerValue := range routingHeadersMap {
- routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
- }
- routingHeaders = strings.TrimSuffix(routingHeaders, "&")
- hds := []string{"x-goog-request-params", routingHeaders}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).DeleteNotificationConfig[0:len((*c.CallOptions).DeleteNotificationConfig):len((*c.CallOptions).DeleteNotificationConfig)], opts...)
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- _, err = c.client.DeleteNotificationConfig(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- return err
-}
-
-func (c *gRPCClient) GetNotificationConfig(ctx context.Context, req *storagepb.GetNotificationConfigRequest, opts ...gax.CallOption) (*storagepb.NotificationConfig, error) {
- routingHeaders := ""
- routingHeadersMap := make(map[string]string)
- if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)(?:/.*)?"); reg.MatchString(req.GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])) > 0 {
- routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])
- }
- for headerName, headerValue := range routingHeadersMap {
- routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
- }
- routingHeaders = strings.TrimSuffix(routingHeaders, "&")
- hds := []string{"x-goog-request-params", routingHeaders}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).GetNotificationConfig[0:len((*c.CallOptions).GetNotificationConfig):len((*c.CallOptions).GetNotificationConfig)], opts...)
- var resp *storagepb.NotificationConfig
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.client.GetNotificationConfig(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *gRPCClient) CreateNotificationConfig(ctx context.Context, req *storagepb.CreateNotificationConfigRequest, opts ...gax.CallOption) (*storagepb.NotificationConfig, error) {
- routingHeaders := ""
- routingHeadersMap := make(map[string]string)
- if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 {
- routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])
- }
- for headerName, headerValue := range routingHeadersMap {
- routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
- }
- routingHeaders = strings.TrimSuffix(routingHeaders, "&")
- hds := []string{"x-goog-request-params", routingHeaders}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).CreateNotificationConfig[0:len((*c.CallOptions).CreateNotificationConfig):len((*c.CallOptions).CreateNotificationConfig)], opts...)
- var resp *storagepb.NotificationConfig
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.client.CreateNotificationConfig(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *gRPCClient) ListNotificationConfigs(ctx context.Context, req *storagepb.ListNotificationConfigsRequest, opts ...gax.CallOption) *NotificationConfigIterator {
- routingHeaders := ""
- routingHeadersMap := make(map[string]string)
- if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 {
- routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])
- }
- for headerName, headerValue := range routingHeadersMap {
- routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
- }
- routingHeaders = strings.TrimSuffix(routingHeaders, "&")
- hds := []string{"x-goog-request-params", routingHeaders}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).ListNotificationConfigs[0:len((*c.CallOptions).ListNotificationConfigs):len((*c.CallOptions).ListNotificationConfigs)], opts...)
- it := &NotificationConfigIterator{}
- req = proto.Clone(req).(*storagepb.ListNotificationConfigsRequest)
- it.InternalFetch = func(pageSize int, pageToken string) ([]*storagepb.NotificationConfig, string, error) {
- resp := &storagepb.ListNotificationConfigsResponse{}
- if pageToken != "" {
- req.PageToken = pageToken
- }
- if pageSize > math.MaxInt32 {
- req.PageSize = math.MaxInt32
- } else if pageSize != 0 {
- req.PageSize = int32(pageSize)
- }
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.client.ListNotificationConfigs(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, "", err
- }
-
- it.Response = resp
- return resp.GetNotificationConfigs(), resp.GetNextPageToken(), nil
- }
- fetch := func(pageSize int, pageToken string) (string, error) {
- items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
- if err != nil {
- return "", err
- }
- it.items = append(it.items, items...)
- return nextPageToken, nil
- }
-
- it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
- it.pageInfo.MaxSize = int(req.GetPageSize())
- it.pageInfo.Token = req.GetPageToken()
-
- return it
-}
-
func (c *gRPCClient) ComposeObject(ctx context.Context, req *storagepb.ComposeObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) {
routingHeaders := ""
routingHeadersMap := make(map[string]string)
@@ -1917,3 +1811,135 @@ func (c *gRPCClient) UpdateHmacKey(ctx context.Context, req *storagepb.UpdateHma
}
return resp, nil
}
+
+func (c *gRPCClient) DeleteNotificationConfig(ctx context.Context, req *storagepb.DeleteNotificationConfigRequest, opts ...gax.CallOption) error {
+ routingHeaders := ""
+ routingHeadersMap := make(map[string]string)
+ if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)(?:/.*)?"); reg.MatchString(req.GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])) > 0 {
+ routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])
+ }
+ for headerName, headerValue := range routingHeadersMap {
+ routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
+ }
+ routingHeaders = strings.TrimSuffix(routingHeaders, "&")
+ hds := []string{"x-goog-request-params", routingHeaders}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).DeleteNotificationConfig[0:len((*c.CallOptions).DeleteNotificationConfig):len((*c.CallOptions).DeleteNotificationConfig)], opts...)
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ _, err = c.client.DeleteNotificationConfig(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ return err
+}
+
+func (c *gRPCClient) GetNotificationConfig(ctx context.Context, req *storagepb.GetNotificationConfigRequest, opts ...gax.CallOption) (*storagepb.NotificationConfig, error) {
+ routingHeaders := ""
+ routingHeadersMap := make(map[string]string)
+ if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)(?:/.*)?"); reg.MatchString(req.GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])) > 0 {
+ routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])
+ }
+ for headerName, headerValue := range routingHeadersMap {
+ routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
+ }
+ routingHeaders = strings.TrimSuffix(routingHeaders, "&")
+ hds := []string{"x-goog-request-params", routingHeaders}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).GetNotificationConfig[0:len((*c.CallOptions).GetNotificationConfig):len((*c.CallOptions).GetNotificationConfig)], opts...)
+ var resp *storagepb.NotificationConfig
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.client.GetNotificationConfig(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *gRPCClient) CreateNotificationConfig(ctx context.Context, req *storagepb.CreateNotificationConfigRequest, opts ...gax.CallOption) (*storagepb.NotificationConfig, error) {
+ routingHeaders := ""
+ routingHeadersMap := make(map[string]string)
+ if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 {
+ routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])
+ }
+ for headerName, headerValue := range routingHeadersMap {
+ routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
+ }
+ routingHeaders = strings.TrimSuffix(routingHeaders, "&")
+ hds := []string{"x-goog-request-params", routingHeaders}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).CreateNotificationConfig[0:len((*c.CallOptions).CreateNotificationConfig):len((*c.CallOptions).CreateNotificationConfig)], opts...)
+ var resp *storagepb.NotificationConfig
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.client.CreateNotificationConfig(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *gRPCClient) ListNotificationConfigs(ctx context.Context, req *storagepb.ListNotificationConfigsRequest, opts ...gax.CallOption) *NotificationConfigIterator {
+ routingHeaders := ""
+ routingHeadersMap := make(map[string]string)
+ if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 {
+ routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])
+ }
+ for headerName, headerValue := range routingHeadersMap {
+ routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
+ }
+ routingHeaders = strings.TrimSuffix(routingHeaders, "&")
+ hds := []string{"x-goog-request-params", routingHeaders}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).ListNotificationConfigs[0:len((*c.CallOptions).ListNotificationConfigs):len((*c.CallOptions).ListNotificationConfigs)], opts...)
+ it := &NotificationConfigIterator{}
+ req = proto.Clone(req).(*storagepb.ListNotificationConfigsRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*storagepb.NotificationConfig, string, error) {
+ resp := &storagepb.ListNotificationConfigsResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.client.ListNotificationConfigs(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+
+ it.Response = resp
+ return resp.GetNotificationConfigs(), resp.GetNextPageToken(), nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go b/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go
index aeb7512f4ab3a..5c0e784517a77 100644
--- a/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go
+++ b/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go
@@ -177,7 +177,7 @@ func (x ServiceConstants_Values) Number() protoreflect.EnumNumber {
// Deprecated: Use ServiceConstants_Values.Descriptor instead.
func (ServiceConstants_Values) EnumDescriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{42, 0}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{39, 0}
}
// Request message for DeleteBucket.
@@ -743,18 +743,41 @@ func (x *UpdateBucketRequest) GetUpdateMask() *fieldmaskpb.FieldMask {
return nil
}
-// Request message for DeleteNotificationConfig.
-type DeleteNotificationConfigRequest struct {
+// Request message for ComposeObject.
+type ComposeObjectRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // Required. The parent bucket of the NotificationConfig.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Required. Properties of the resulting object.
+ Destination *Object `protobuf:"bytes,1,opt,name=destination,proto3" json:"destination,omitempty"`
+ // The list of source objects that will be concatenated into a single object.
+ SourceObjects []*ComposeObjectRequest_SourceObject `protobuf:"bytes,2,rep,name=source_objects,json=sourceObjects,proto3" json:"source_objects,omitempty"`
+ // Apply a predefined set of access controls to the destination object.
+ // Valid values are "authenticatedRead", "bucketOwnerFullControl",
+ // "bucketOwnerRead", "private", "projectPrivate", or "publicRead".
+ DestinationPredefinedAcl string `protobuf:"bytes,9,opt,name=destination_predefined_acl,json=destinationPredefinedAcl,proto3" json:"destination_predefined_acl,omitempty"`
+ // Makes the operation conditional on whether the object's current generation
+ // matches the given value. Setting to 0 makes the operation succeed only if
+ // there are no live versions of the object.
+ IfGenerationMatch *int64 `protobuf:"varint,4,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"`
+ // Makes the operation conditional on whether the object's current
+ // metageneration matches the given value.
+ IfMetagenerationMatch *int64 `protobuf:"varint,5,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"`
+ // Resource name of the Cloud KMS key, of the form
+ // `projects/my-project/locations/my-location/keyRings/my-kr/cryptoKeys/my-key`,
+ // that will be used to encrypt the object. Overrides the object
+ // metadata's `kms_key_name` value, if any.
+ KmsKey string `protobuf:"bytes,6,opt,name=kms_key,json=kmsKey,proto3" json:"kms_key,omitempty"`
+ // A set of parameters common to Storage API requests concerning an object.
+ CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,7,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"`
+ // The checksums of the complete object. This will be validated against the
+ // combined checksums of the component objects.
+ ObjectChecksums *ObjectChecksums `protobuf:"bytes,10,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"`
}
-func (x *DeleteNotificationConfigRequest) Reset() {
- *x = DeleteNotificationConfigRequest{}
+func (x *ComposeObjectRequest) Reset() {
+ *x = ComposeObjectRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_google_storage_v2_storage_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@@ -762,13 +785,13 @@ func (x *DeleteNotificationConfigRequest) Reset() {
}
}
-func (x *DeleteNotificationConfigRequest) String() string {
+func (x *ComposeObjectRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*DeleteNotificationConfigRequest) ProtoMessage() {}
+func (*ComposeObjectRequest) ProtoMessage() {}
-func (x *DeleteNotificationConfigRequest) ProtoReflect() protoreflect.Message {
+func (x *ComposeObjectRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_storage_v2_storage_proto_msgTypes[7]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@@ -780,32 +803,104 @@ func (x *DeleteNotificationConfigRequest) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use DeleteNotificationConfigRequest.ProtoReflect.Descriptor instead.
-func (*DeleteNotificationConfigRequest) Descriptor() ([]byte, []int) {
+// Deprecated: Use ComposeObjectRequest.ProtoReflect.Descriptor instead.
+func (*ComposeObjectRequest) Descriptor() ([]byte, []int) {
return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{7}
}
-func (x *DeleteNotificationConfigRequest) GetName() string {
+func (x *ComposeObjectRequest) GetDestination() *Object {
if x != nil {
- return x.Name
+ return x.Destination
+ }
+ return nil
+}
+
+func (x *ComposeObjectRequest) GetSourceObjects() []*ComposeObjectRequest_SourceObject {
+ if x != nil {
+ return x.SourceObjects
+ }
+ return nil
+}
+
+func (x *ComposeObjectRequest) GetDestinationPredefinedAcl() string {
+ if x != nil {
+ return x.DestinationPredefinedAcl
}
return ""
}
-// Request message for GetNotificationConfig.
-type GetNotificationConfigRequest struct {
+func (x *ComposeObjectRequest) GetIfGenerationMatch() int64 {
+ if x != nil && x.IfGenerationMatch != nil {
+ return *x.IfGenerationMatch
+ }
+ return 0
+}
+
+func (x *ComposeObjectRequest) GetIfMetagenerationMatch() int64 {
+ if x != nil && x.IfMetagenerationMatch != nil {
+ return *x.IfMetagenerationMatch
+ }
+ return 0
+}
+
+func (x *ComposeObjectRequest) GetKmsKey() string {
+ if x != nil {
+ return x.KmsKey
+ }
+ return ""
+}
+
+func (x *ComposeObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams {
+ if x != nil {
+ return x.CommonObjectRequestParams
+ }
+ return nil
+}
+
+func (x *ComposeObjectRequest) GetObjectChecksums() *ObjectChecksums {
+ if x != nil {
+ return x.ObjectChecksums
+ }
+ return nil
+}
+
+// Message for deleting an object.
+// `bucket` and `object` **must** be set.
+type DeleteObjectRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // Required. The parent bucket of the NotificationConfig.
- // Format:
- // `projects/{project}/buckets/{bucket}/notificationConfigs/{notificationConfig}`
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Required. Name of the bucket in which the object resides.
+ Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"`
+ // Required. The name of the finalized object to delete.
+ // Note: If you want to delete an unfinalized resumable upload please use
+ // `CancelResumableWrite`.
+ Object string `protobuf:"bytes,2,opt,name=object,proto3" json:"object,omitempty"`
+ // If present, permanently deletes a specific revision of this object (as
+ // opposed to the latest version, the default).
+ Generation int64 `protobuf:"varint,4,opt,name=generation,proto3" json:"generation,omitempty"`
+ // Makes the operation conditional on whether the object's current generation
+ // matches the given value. Setting to 0 makes the operation succeed only if
+ // there are no live versions of the object.
+ IfGenerationMatch *int64 `protobuf:"varint,5,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"`
+ // Makes the operation conditional on whether the object's live generation
+ // does not match the given value. If no live object exists, the precondition
+ // fails. Setting to 0 makes the operation succeed only if there is a live
+ // version of the object.
+ IfGenerationNotMatch *int64 `protobuf:"varint,6,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"`
+ // Makes the operation conditional on whether the object's current
+ // metageneration matches the given value.
+ IfMetagenerationMatch *int64 `protobuf:"varint,7,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"`
+ // Makes the operation conditional on whether the object's current
+ // metageneration does not match the given value.
+ IfMetagenerationNotMatch *int64 `protobuf:"varint,8,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"`
+ // A set of parameters common to Storage API requests concerning an object.
+ CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,10,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"`
}
-func (x *GetNotificationConfigRequest) Reset() {
- *x = GetNotificationConfigRequest{}
+func (x *DeleteObjectRequest) Reset() {
+ *x = DeleteObjectRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_google_storage_v2_storage_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@@ -813,13 +908,13 @@ func (x *GetNotificationConfigRequest) Reset() {
}
}
-func (x *GetNotificationConfigRequest) String() string {
+func (x *DeleteObjectRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*GetNotificationConfigRequest) ProtoMessage() {}
+func (*DeleteObjectRequest) ProtoMessage() {}
-func (x *GetNotificationConfigRequest) ProtoReflect() protoreflect.Message {
+func (x *DeleteObjectRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_storage_v2_storage_proto_msgTypes[8]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@@ -831,32 +926,105 @@ func (x *GetNotificationConfigRequest) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use GetNotificationConfigRequest.ProtoReflect.Descriptor instead.
-func (*GetNotificationConfigRequest) Descriptor() ([]byte, []int) {
+// Deprecated: Use DeleteObjectRequest.ProtoReflect.Descriptor instead.
+func (*DeleteObjectRequest) Descriptor() ([]byte, []int) {
return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{8}
}
-func (x *GetNotificationConfigRequest) GetName() string {
+func (x *DeleteObjectRequest) GetBucket() string {
if x != nil {
- return x.Name
+ return x.Bucket
}
return ""
}
-// Request message for CreateNotificationConfig.
-type CreateNotificationConfigRequest struct {
+func (x *DeleteObjectRequest) GetObject() string {
+ if x != nil {
+ return x.Object
+ }
+ return ""
+}
+
+func (x *DeleteObjectRequest) GetGeneration() int64 {
+ if x != nil {
+ return x.Generation
+ }
+ return 0
+}
+
+func (x *DeleteObjectRequest) GetIfGenerationMatch() int64 {
+ if x != nil && x.IfGenerationMatch != nil {
+ return *x.IfGenerationMatch
+ }
+ return 0
+}
+
+func (x *DeleteObjectRequest) GetIfGenerationNotMatch() int64 {
+ if x != nil && x.IfGenerationNotMatch != nil {
+ return *x.IfGenerationNotMatch
+ }
+ return 0
+}
+
+func (x *DeleteObjectRequest) GetIfMetagenerationMatch() int64 {
+ if x != nil && x.IfMetagenerationMatch != nil {
+ return *x.IfMetagenerationMatch
+ }
+ return 0
+}
+
+func (x *DeleteObjectRequest) GetIfMetagenerationNotMatch() int64 {
+ if x != nil && x.IfMetagenerationNotMatch != nil {
+ return *x.IfMetagenerationNotMatch
+ }
+ return 0
+}
+
+func (x *DeleteObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams {
+ if x != nil {
+ return x.CommonObjectRequestParams
+ }
+ return nil
+}
+
+// Message for restoring an object.
+// `bucket`, `object`, and `generation` **must** be set.
+type RestoreObjectRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // Required. The bucket to which this NotificationConfig belongs.
- Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
- // Required. Properties of the NotificationConfig to be inserted.
- NotificationConfig *NotificationConfig `protobuf:"bytes,2,opt,name=notification_config,json=notificationConfig,proto3" json:"notification_config,omitempty"`
+ // Required. Name of the bucket in which the object resides.
+ Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"`
+ // Required. The name of the object to restore.
+ Object string `protobuf:"bytes,2,opt,name=object,proto3" json:"object,omitempty"`
+ // Required. The specific revision of the object to restore.
+ Generation int64 `protobuf:"varint,3,opt,name=generation,proto3" json:"generation,omitempty"`
+ // Makes the operation conditional on whether the object's current generation
+ // matches the given value. Setting to 0 makes the operation succeed only if
+ // there are no live versions of the object.
+ IfGenerationMatch *int64 `protobuf:"varint,4,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"`
+ // Makes the operation conditional on whether the object's live generation
+ // does not match the given value. If no live object exists, the precondition
+ // fails. Setting to 0 makes the operation succeed only if there is a live
+ // version of the object.
+ IfGenerationNotMatch *int64 `protobuf:"varint,5,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"`
+ // Makes the operation conditional on whether the object's current
+ // metageneration matches the given value.
+ IfMetagenerationMatch *int64 `protobuf:"varint,6,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"`
+ // Makes the operation conditional on whether the object's current
+ // metageneration does not match the given value.
+ IfMetagenerationNotMatch *int64 `protobuf:"varint,7,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"`
+ // If false or unset, the bucket's default object ACL will be used.
+ // If true, copy the source object's access controls.
+ // Return an error if bucket has UBLA enabled.
+ CopySourceAcl *bool `protobuf:"varint,9,opt,name=copy_source_acl,json=copySourceAcl,proto3,oneof" json:"copy_source_acl,omitempty"`
+ // A set of parameters common to Storage API requests concerning an object.
+ CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,8,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"`
}
-func (x *CreateNotificationConfigRequest) Reset() {
- *x = CreateNotificationConfigRequest{}
+func (x *RestoreObjectRequest) Reset() {
+ *x = RestoreObjectRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_google_storage_v2_storage_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@@ -864,13 +1032,13 @@ func (x *CreateNotificationConfigRequest) Reset() {
}
}
-func (x *CreateNotificationConfigRequest) String() string {
+func (x *RestoreObjectRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*CreateNotificationConfigRequest) ProtoMessage() {}
+func (*RestoreObjectRequest) ProtoMessage() {}
-func (x *CreateNotificationConfigRequest) ProtoReflect() protoreflect.Message {
+func (x *RestoreObjectRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_storage_v2_storage_proto_msgTypes[9]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@@ -882,47 +1050,88 @@ func (x *CreateNotificationConfigRequest) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use CreateNotificationConfigRequest.ProtoReflect.Descriptor instead.
-func (*CreateNotificationConfigRequest) Descriptor() ([]byte, []int) {
+// Deprecated: Use RestoreObjectRequest.ProtoReflect.Descriptor instead.
+func (*RestoreObjectRequest) Descriptor() ([]byte, []int) {
return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{9}
}
-func (x *CreateNotificationConfigRequest) GetParent() string {
+func (x *RestoreObjectRequest) GetBucket() string {
if x != nil {
- return x.Parent
+ return x.Bucket
}
return ""
}
-func (x *CreateNotificationConfigRequest) GetNotificationConfig() *NotificationConfig {
+func (x *RestoreObjectRequest) GetObject() string {
if x != nil {
- return x.NotificationConfig
+ return x.Object
+ }
+ return ""
+}
+
+func (x *RestoreObjectRequest) GetGeneration() int64 {
+ if x != nil {
+ return x.Generation
+ }
+ return 0
+}
+
+func (x *RestoreObjectRequest) GetIfGenerationMatch() int64 {
+ if x != nil && x.IfGenerationMatch != nil {
+ return *x.IfGenerationMatch
+ }
+ return 0
+}
+
+func (x *RestoreObjectRequest) GetIfGenerationNotMatch() int64 {
+ if x != nil && x.IfGenerationNotMatch != nil {
+ return *x.IfGenerationNotMatch
+ }
+ return 0
+}
+
+func (x *RestoreObjectRequest) GetIfMetagenerationMatch() int64 {
+ if x != nil && x.IfMetagenerationMatch != nil {
+ return *x.IfMetagenerationMatch
+ }
+ return 0
+}
+
+func (x *RestoreObjectRequest) GetIfMetagenerationNotMatch() int64 {
+ if x != nil && x.IfMetagenerationNotMatch != nil {
+ return *x.IfMetagenerationNotMatch
+ }
+ return 0
+}
+
+func (x *RestoreObjectRequest) GetCopySourceAcl() bool {
+ if x != nil && x.CopySourceAcl != nil {
+ return *x.CopySourceAcl
+ }
+ return false
+}
+
+func (x *RestoreObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams {
+ if x != nil {
+ return x.CommonObjectRequestParams
}
return nil
}
-// Request message for ListNotifications.
-type ListNotificationConfigsRequest struct {
+// Message for canceling an in-progress resumable upload.
+// `upload_id` **must** be set.
+type CancelResumableWriteRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // Required. Name of a Google Cloud Storage bucket.
- Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
- // The maximum number of NotificationConfigs to return. The service may
- // return fewer than this value. The default value is 100. Specifying a value
- // above 100 will result in a page_size of 100.
- PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
- // A page token, received from a previous `ListNotificationConfigs` call.
- // Provide this to retrieve the subsequent page.
- //
- // When paginating, all other parameters provided to `ListNotificationConfigs`
- // must match the call that provided the page token.
- PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+ // Required. The upload_id of the resumable upload to cancel. This should be
+ // copied from the `upload_id` field of `StartResumableWriteResponse`.
+ UploadId string `protobuf:"bytes,1,opt,name=upload_id,json=uploadId,proto3" json:"upload_id,omitempty"`
}
-func (x *ListNotificationConfigsRequest) Reset() {
- *x = ListNotificationConfigsRequest{}
+func (x *CancelResumableWriteRequest) Reset() {
+ *x = CancelResumableWriteRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_google_storage_v2_storage_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@@ -930,13 +1139,13 @@ func (x *ListNotificationConfigsRequest) Reset() {
}
}
-func (x *ListNotificationConfigsRequest) String() string {
+func (x *CancelResumableWriteRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*ListNotificationConfigsRequest) ProtoMessage() {}
+func (*CancelResumableWriteRequest) ProtoMessage() {}
-func (x *ListNotificationConfigsRequest) ProtoReflect() protoreflect.Message {
+func (x *CancelResumableWriteRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_storage_v2_storage_proto_msgTypes[10]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@@ -948,47 +1157,28 @@ func (x *ListNotificationConfigsRequest) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use ListNotificationConfigsRequest.ProtoReflect.Descriptor instead.
-func (*ListNotificationConfigsRequest) Descriptor() ([]byte, []int) {
+// Deprecated: Use CancelResumableWriteRequest.ProtoReflect.Descriptor instead.
+func (*CancelResumableWriteRequest) Descriptor() ([]byte, []int) {
return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{10}
}
-func (x *ListNotificationConfigsRequest) GetParent() string {
- if x != nil {
- return x.Parent
- }
- return ""
-}
-
-func (x *ListNotificationConfigsRequest) GetPageSize() int32 {
- if x != nil {
- return x.PageSize
- }
- return 0
-}
-
-func (x *ListNotificationConfigsRequest) GetPageToken() string {
+func (x *CancelResumableWriteRequest) GetUploadId() string {
if x != nil {
- return x.PageToken
+ return x.UploadId
}
return ""
}
-// The result of a call to ListNotificationConfigs
-type ListNotificationConfigsResponse struct {
+// Empty response message for canceling an in-progress resumable upload, will be
+// extended as needed.
+type CancelResumableWriteResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
-
- // The list of items.
- NotificationConfigs []*NotificationConfig `protobuf:"bytes,1,rep,name=notification_configs,json=notificationConfigs,proto3" json:"notification_configs,omitempty"`
- // A token, which can be sent as `page_token` to retrieve the next page.
- // If this field is omitted, there are no subsequent pages.
- NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
}
-func (x *ListNotificationConfigsResponse) Reset() {
- *x = ListNotificationConfigsResponse{}
+func (x *CancelResumableWriteResponse) Reset() {
+ *x = CancelResumableWriteResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_google_storage_v2_storage_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@@ -996,13 +1186,13 @@ func (x *ListNotificationConfigsResponse) Reset() {
}
}
-func (x *ListNotificationConfigsResponse) String() string {
+func (x *CancelResumableWriteResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*ListNotificationConfigsResponse) ProtoMessage() {}
+func (*CancelResumableWriteResponse) ProtoMessage() {}
-func (x *ListNotificationConfigsResponse) ProtoReflect() protoreflect.Message {
+func (x *CancelResumableWriteResponse) ProtoReflect() protoreflect.Message {
mi := &file_google_storage_v2_storage_proto_msgTypes[11]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@@ -1014,60 +1204,69 @@ func (x *ListNotificationConfigsResponse) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use ListNotificationConfigsResponse.ProtoReflect.Descriptor instead.
-func (*ListNotificationConfigsResponse) Descriptor() ([]byte, []int) {
+// Deprecated: Use CancelResumableWriteResponse.ProtoReflect.Descriptor instead.
+func (*CancelResumableWriteResponse) Descriptor() ([]byte, []int) {
return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{11}
}
-func (x *ListNotificationConfigsResponse) GetNotificationConfigs() []*NotificationConfig {
- if x != nil {
- return x.NotificationConfigs
- }
- return nil
-}
-
-func (x *ListNotificationConfigsResponse) GetNextPageToken() string {
- if x != nil {
- return x.NextPageToken
- }
- return ""
-}
-
-// Request message for ComposeObject.
-type ComposeObjectRequest struct {
+// Request message for ReadObject.
+type ReadObjectRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // Required. Properties of the resulting object.
- Destination *Object `protobuf:"bytes,1,opt,name=destination,proto3" json:"destination,omitempty"`
- // The list of source objects that will be concatenated into a single object.
- SourceObjects []*ComposeObjectRequest_SourceObject `protobuf:"bytes,2,rep,name=source_objects,json=sourceObjects,proto3" json:"source_objects,omitempty"`
- // Apply a predefined set of access controls to the destination object.
- // Valid values are "authenticatedRead", "bucketOwnerFullControl",
- // "bucketOwnerRead", "private", "projectPrivate", or "publicRead".
- DestinationPredefinedAcl string `protobuf:"bytes,9,opt,name=destination_predefined_acl,json=destinationPredefinedAcl,proto3" json:"destination_predefined_acl,omitempty"`
+ // Required. The name of the bucket containing the object to read.
+ Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"`
+ // Required. The name of the object to read.
+ Object string `protobuf:"bytes,2,opt,name=object,proto3" json:"object,omitempty"`
+ // If present, selects a specific revision of this object (as opposed
+ // to the latest version, the default).
+ Generation int64 `protobuf:"varint,3,opt,name=generation,proto3" json:"generation,omitempty"`
+ // The offset for the first byte to return in the read, relative to the start
+ // of the object.
+ //
+ // A negative `read_offset` value will be interpreted as the number of bytes
+ // back from the end of the object to be returned. For example, if an object's
+ // length is 15 bytes, a ReadObjectRequest with `read_offset` = -5 and
+ // `read_limit` = 3 would return bytes 10 through 12 of the object. Requesting
+ // a negative offset with magnitude larger than the size of the object will
+ // return the entire object.
+ ReadOffset int64 `protobuf:"varint,4,opt,name=read_offset,json=readOffset,proto3" json:"read_offset,omitempty"`
+ // The maximum number of `data` bytes the server is allowed to return in the
+ // sum of all `Object` messages. A `read_limit` of zero indicates that there
+ // is no limit, and a negative `read_limit` will cause an error.
+ //
+ // If the stream returns fewer bytes than allowed by the `read_limit` and no
+ // error occurred, the stream includes all data from the `read_offset` to the
+ // end of the resource.
+ ReadLimit int64 `protobuf:"varint,5,opt,name=read_limit,json=readLimit,proto3" json:"read_limit,omitempty"`
// Makes the operation conditional on whether the object's current generation
// matches the given value. Setting to 0 makes the operation succeed only if
// there are no live versions of the object.
- IfGenerationMatch *int64 `protobuf:"varint,4,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"`
+ IfGenerationMatch *int64 `protobuf:"varint,6,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"`
+ // Makes the operation conditional on whether the object's live generation
+ // does not match the given value. If no live object exists, the precondition
+ // fails. Setting to 0 makes the operation succeed only if there is a live
+ // version of the object.
+ IfGenerationNotMatch *int64 `protobuf:"varint,7,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"`
// Makes the operation conditional on whether the object's current
// metageneration matches the given value.
- IfMetagenerationMatch *int64 `protobuf:"varint,5,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"`
- // Resource name of the Cloud KMS key, of the form
- // `projects/my-project/locations/my-location/keyRings/my-kr/cryptoKeys/my-key`,
- // that will be used to encrypt the object. Overrides the object
- // metadata's `kms_key_name` value, if any.
- KmsKey string `protobuf:"bytes,6,opt,name=kms_key,json=kmsKey,proto3" json:"kms_key,omitempty"`
+ IfMetagenerationMatch *int64 `protobuf:"varint,8,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"`
+ // Makes the operation conditional on whether the object's current
+ // metageneration does not match the given value.
+ IfMetagenerationNotMatch *int64 `protobuf:"varint,9,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"`
// A set of parameters common to Storage API requests concerning an object.
- CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,7,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"`
- // The checksums of the complete object. This will be validated against the
- // combined checksums of the component objects.
- ObjectChecksums *ObjectChecksums `protobuf:"bytes,10,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"`
+ CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,10,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"`
+ // Mask specifying which fields to read.
+ // The checksummed_data field and its children will always be present.
+ // If no mask is specified, will default to all fields except metadata.owner
+ // and metadata.acl.
+ // * may be used to mean "all fields".
+ ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,12,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"`
}
-func (x *ComposeObjectRequest) Reset() {
- *x = ComposeObjectRequest{}
+func (x *ReadObjectRequest) Reset() {
+ *x = ReadObjectRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_google_storage_v2_storage_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@@ -1075,13 +1274,13 @@ func (x *ComposeObjectRequest) Reset() {
}
}
-func (x *ComposeObjectRequest) String() string {
+func (x *ReadObjectRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*ComposeObjectRequest) ProtoMessage() {}
+func (*ReadObjectRequest) ProtoMessage() {}
-func (x *ComposeObjectRequest) ProtoReflect() protoreflect.Message {
+func (x *ReadObjectRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_storage_v2_storage_proto_msgTypes[12]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@@ -1093,104 +1292,129 @@ func (x *ComposeObjectRequest) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use ComposeObjectRequest.ProtoReflect.Descriptor instead.
-func (*ComposeObjectRequest) Descriptor() ([]byte, []int) {
+// Deprecated: Use ReadObjectRequest.ProtoReflect.Descriptor instead.
+func (*ReadObjectRequest) Descriptor() ([]byte, []int) {
return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{12}
}
-func (x *ComposeObjectRequest) GetDestination() *Object {
+func (x *ReadObjectRequest) GetBucket() string {
if x != nil {
- return x.Destination
+ return x.Bucket
}
- return nil
+ return ""
}
-func (x *ComposeObjectRequest) GetSourceObjects() []*ComposeObjectRequest_SourceObject {
+func (x *ReadObjectRequest) GetObject() string {
if x != nil {
- return x.SourceObjects
+ return x.Object
}
- return nil
+ return ""
}
-func (x *ComposeObjectRequest) GetDestinationPredefinedAcl() string {
+func (x *ReadObjectRequest) GetGeneration() int64 {
if x != nil {
- return x.DestinationPredefinedAcl
+ return x.Generation
}
- return ""
+ return 0
}
-func (x *ComposeObjectRequest) GetIfGenerationMatch() int64 {
- if x != nil && x.IfGenerationMatch != nil {
- return *x.IfGenerationMatch
+func (x *ReadObjectRequest) GetReadOffset() int64 {
+ if x != nil {
+ return x.ReadOffset
}
return 0
}
-func (x *ComposeObjectRequest) GetIfMetagenerationMatch() int64 {
- if x != nil && x.IfMetagenerationMatch != nil {
- return *x.IfMetagenerationMatch
+func (x *ReadObjectRequest) GetReadLimit() int64 {
+ if x != nil {
+ return x.ReadLimit
}
return 0
}
-func (x *ComposeObjectRequest) GetKmsKey() string {
- if x != nil {
- return x.KmsKey
+func (x *ReadObjectRequest) GetIfGenerationMatch() int64 {
+ if x != nil && x.IfGenerationMatch != nil {
+ return *x.IfGenerationMatch
}
- return ""
+ return 0
}
-func (x *ComposeObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams {
- if x != nil {
+func (x *ReadObjectRequest) GetIfGenerationNotMatch() int64 {
+ if x != nil && x.IfGenerationNotMatch != nil {
+ return *x.IfGenerationNotMatch
+ }
+ return 0
+}
+
+func (x *ReadObjectRequest) GetIfMetagenerationMatch() int64 {
+ if x != nil && x.IfMetagenerationMatch != nil {
+ return *x.IfMetagenerationMatch
+ }
+ return 0
+}
+
+func (x *ReadObjectRequest) GetIfMetagenerationNotMatch() int64 {
+ if x != nil && x.IfMetagenerationNotMatch != nil {
+ return *x.IfMetagenerationNotMatch
+ }
+ return 0
+}
+
+func (x *ReadObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams {
+ if x != nil {
return x.CommonObjectRequestParams
}
return nil
}
-func (x *ComposeObjectRequest) GetObjectChecksums() *ObjectChecksums {
+func (x *ReadObjectRequest) GetReadMask() *fieldmaskpb.FieldMask {
if x != nil {
- return x.ObjectChecksums
+ return x.ReadMask
}
return nil
}
-// Message for deleting an object.
-// `bucket` and `object` **must** be set.
-type DeleteObjectRequest struct {
+// Request message for GetObject.
+type GetObjectRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Required. Name of the bucket in which the object resides.
Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"`
- // Required. The name of the finalized object to delete.
- // Note: If you want to delete an unfinalized resumable upload please use
- // `CancelResumableWrite`.
+ // Required. Name of the object.
Object string `protobuf:"bytes,2,opt,name=object,proto3" json:"object,omitempty"`
- // If present, permanently deletes a specific revision of this object (as
- // opposed to the latest version, the default).
- Generation int64 `protobuf:"varint,4,opt,name=generation,proto3" json:"generation,omitempty"`
+ // If present, selects a specific revision of this object (as opposed to the
+ // latest version, the default).
+ Generation int64 `protobuf:"varint,3,opt,name=generation,proto3" json:"generation,omitempty"`
+ // If true, return the soft-deleted version of this object.
+ SoftDeleted *bool `protobuf:"varint,11,opt,name=soft_deleted,json=softDeleted,proto3,oneof" json:"soft_deleted,omitempty"`
// Makes the operation conditional on whether the object's current generation
// matches the given value. Setting to 0 makes the operation succeed only if
// there are no live versions of the object.
- IfGenerationMatch *int64 `protobuf:"varint,5,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"`
+ IfGenerationMatch *int64 `protobuf:"varint,4,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"`
// Makes the operation conditional on whether the object's live generation
// does not match the given value. If no live object exists, the precondition
// fails. Setting to 0 makes the operation succeed only if there is a live
// version of the object.
- IfGenerationNotMatch *int64 `protobuf:"varint,6,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"`
+ IfGenerationNotMatch *int64 `protobuf:"varint,5,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"`
// Makes the operation conditional on whether the object's current
// metageneration matches the given value.
- IfMetagenerationMatch *int64 `protobuf:"varint,7,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"`
+ IfMetagenerationMatch *int64 `protobuf:"varint,6,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"`
// Makes the operation conditional on whether the object's current
// metageneration does not match the given value.
- IfMetagenerationNotMatch *int64 `protobuf:"varint,8,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"`
+ IfMetagenerationNotMatch *int64 `protobuf:"varint,7,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"`
// A set of parameters common to Storage API requests concerning an object.
- CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,10,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"`
+ CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,8,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"`
+ // Mask specifying which fields to read.
+ // If no mask is specified, will default to all fields except metadata.acl and
+ // metadata.owner.
+ // * may be used to mean "all fields".
+ ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,10,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"`
}
-func (x *DeleteObjectRequest) Reset() {
- *x = DeleteObjectRequest{}
+func (x *GetObjectRequest) Reset() {
+ *x = GetObjectRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_google_storage_v2_storage_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@@ -1198,13 +1422,13 @@ func (x *DeleteObjectRequest) Reset() {
}
}
-func (x *DeleteObjectRequest) String() string {
+func (x *GetObjectRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*DeleteObjectRequest) ProtoMessage() {}
+func (*GetObjectRequest) ProtoMessage() {}
-func (x *DeleteObjectRequest) ProtoReflect() protoreflect.Message {
+func (x *GetObjectRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_storage_v2_storage_proto_msgTypes[13]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@@ -1216,105 +1440,107 @@ func (x *DeleteObjectRequest) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use DeleteObjectRequest.ProtoReflect.Descriptor instead.
-func (*DeleteObjectRequest) Descriptor() ([]byte, []int) {
+// Deprecated: Use GetObjectRequest.ProtoReflect.Descriptor instead.
+func (*GetObjectRequest) Descriptor() ([]byte, []int) {
return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{13}
}
-func (x *DeleteObjectRequest) GetBucket() string {
+func (x *GetObjectRequest) GetBucket() string {
if x != nil {
return x.Bucket
}
return ""
}
-func (x *DeleteObjectRequest) GetObject() string {
+func (x *GetObjectRequest) GetObject() string {
if x != nil {
return x.Object
}
return ""
}
-func (x *DeleteObjectRequest) GetGeneration() int64 {
+func (x *GetObjectRequest) GetGeneration() int64 {
if x != nil {
return x.Generation
}
return 0
}
-func (x *DeleteObjectRequest) GetIfGenerationMatch() int64 {
+func (x *GetObjectRequest) GetSoftDeleted() bool {
+ if x != nil && x.SoftDeleted != nil {
+ return *x.SoftDeleted
+ }
+ return false
+}
+
+func (x *GetObjectRequest) GetIfGenerationMatch() int64 {
if x != nil && x.IfGenerationMatch != nil {
return *x.IfGenerationMatch
}
return 0
}
-func (x *DeleteObjectRequest) GetIfGenerationNotMatch() int64 {
+func (x *GetObjectRequest) GetIfGenerationNotMatch() int64 {
if x != nil && x.IfGenerationNotMatch != nil {
return *x.IfGenerationNotMatch
}
return 0
}
-func (x *DeleteObjectRequest) GetIfMetagenerationMatch() int64 {
+func (x *GetObjectRequest) GetIfMetagenerationMatch() int64 {
if x != nil && x.IfMetagenerationMatch != nil {
return *x.IfMetagenerationMatch
}
return 0
}
-func (x *DeleteObjectRequest) GetIfMetagenerationNotMatch() int64 {
+func (x *GetObjectRequest) GetIfMetagenerationNotMatch() int64 {
if x != nil && x.IfMetagenerationNotMatch != nil {
return *x.IfMetagenerationNotMatch
}
return 0
}
-func (x *DeleteObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams {
+func (x *GetObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams {
if x != nil {
return x.CommonObjectRequestParams
}
return nil
}
-// Message for restoring an object.
-// `bucket`, `object`, and `generation` **must** be set.
-type RestoreObjectRequest struct {
+func (x *GetObjectRequest) GetReadMask() *fieldmaskpb.FieldMask {
+ if x != nil {
+ return x.ReadMask
+ }
+ return nil
+}
+
+// Response message for ReadObject.
+type ReadObjectResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // Required. Name of the bucket in which the object resides.
- Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"`
- // Required. The name of the object to restore.
- Object string `protobuf:"bytes,2,opt,name=object,proto3" json:"object,omitempty"`
- // Required. The specific revision of the object to restore.
- Generation int64 `protobuf:"varint,3,opt,name=generation,proto3" json:"generation,omitempty"`
- // Makes the operation conditional on whether the object's current generation
- // matches the given value. Setting to 0 makes the operation succeed only if
- // there are no live versions of the object.
- IfGenerationMatch *int64 `protobuf:"varint,4,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"`
- // Makes the operation conditional on whether the object's live generation
- // does not match the given value. If no live object exists, the precondition
- // fails. Setting to 0 makes the operation succeed only if there is a live
- // version of the object.
- IfGenerationNotMatch *int64 `protobuf:"varint,5,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"`
- // Makes the operation conditional on whether the object's current
- // metageneration matches the given value.
- IfMetagenerationMatch *int64 `protobuf:"varint,6,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"`
- // Makes the operation conditional on whether the object's current
- // metageneration does not match the given value.
- IfMetagenerationNotMatch *int64 `protobuf:"varint,7,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"`
- // If false or unset, the bucket's default object ACL will be used.
- // If true, copy the source object's access controls.
- // Return an error if bucket has UBLA enabled.
- CopySourceAcl *bool `protobuf:"varint,9,opt,name=copy_source_acl,json=copySourceAcl,proto3,oneof" json:"copy_source_acl,omitempty"`
- // A set of parameters common to Storage API requests concerning an object.
- CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,8,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"`
+ // A portion of the data for the object. The service **may** leave `data`
+ // empty for any given `ReadResponse`. This enables the service to inform the
+ // client that the request is still live while it is running an operation to
+ // generate more data.
+ ChecksummedData *ChecksummedData `protobuf:"bytes,1,opt,name=checksummed_data,json=checksummedData,proto3" json:"checksummed_data,omitempty"`
+ // The checksums of the complete object. If the object is downloaded in full,
+ // the client should compute one of these checksums over the downloaded object
+ // and compare it against the value provided here.
+ ObjectChecksums *ObjectChecksums `protobuf:"bytes,2,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"`
+ // If read_offset and or read_limit was specified on the
+ // ReadObjectRequest, ContentRange will be populated on the first
+ // ReadObjectResponse message of the read stream.
+ ContentRange *ContentRange `protobuf:"bytes,3,opt,name=content_range,json=contentRange,proto3" json:"content_range,omitempty"`
+ // Metadata of the object whose media is being returned.
+ // Only populated in the first response in the stream.
+ Metadata *Object `protobuf:"bytes,4,opt,name=metadata,proto3" json:"metadata,omitempty"`
}
-func (x *RestoreObjectRequest) Reset() {
- *x = RestoreObjectRequest{}
+func (x *ReadObjectResponse) Reset() {
+ *x = ReadObjectResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_google_storage_v2_storage_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@@ -1322,13 +1548,13 @@ func (x *RestoreObjectRequest) Reset() {
}
}
-func (x *RestoreObjectRequest) String() string {
+func (x *ReadObjectResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*RestoreObjectRequest) ProtoMessage() {}
+func (*ReadObjectResponse) ProtoMessage() {}
-func (x *RestoreObjectRequest) ProtoReflect() protoreflect.Message {
+func (x *ReadObjectResponse) ProtoReflect() protoreflect.Message {
mi := &file_google_storage_v2_storage_proto_msgTypes[14]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@@ -1340,102 +1566,92 @@ func (x *RestoreObjectRequest) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use RestoreObjectRequest.ProtoReflect.Descriptor instead.
-func (*RestoreObjectRequest) Descriptor() ([]byte, []int) {
+// Deprecated: Use ReadObjectResponse.ProtoReflect.Descriptor instead.
+func (*ReadObjectResponse) Descriptor() ([]byte, []int) {
return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{14}
}
-func (x *RestoreObjectRequest) GetBucket() string {
+func (x *ReadObjectResponse) GetChecksummedData() *ChecksummedData {
if x != nil {
- return x.Bucket
+ return x.ChecksummedData
}
- return ""
+ return nil
}
-func (x *RestoreObjectRequest) GetObject() string {
+func (x *ReadObjectResponse) GetObjectChecksums() *ObjectChecksums {
if x != nil {
- return x.Object
+ return x.ObjectChecksums
}
- return ""
+ return nil
}
-func (x *RestoreObjectRequest) GetGeneration() int64 {
+func (x *ReadObjectResponse) GetContentRange() *ContentRange {
if x != nil {
- return x.Generation
+ return x.ContentRange
}
- return 0
+ return nil
}
-func (x *RestoreObjectRequest) GetIfGenerationMatch() int64 {
- if x != nil && x.IfGenerationMatch != nil {
- return *x.IfGenerationMatch
+func (x *ReadObjectResponse) GetMetadata() *Object {
+ if x != nil {
+ return x.Metadata
}
- return 0
+ return nil
}
-func (x *RestoreObjectRequest) GetIfGenerationNotMatch() int64 {
- if x != nil && x.IfGenerationNotMatch != nil {
- return *x.IfGenerationNotMatch
- }
- return 0
-}
+// Describes an attempt to insert an object, possibly over multiple requests.
+type WriteObjectSpec struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (x *RestoreObjectRequest) GetIfMetagenerationMatch() int64 {
- if x != nil && x.IfMetagenerationMatch != nil {
- return *x.IfMetagenerationMatch
- }
- return 0
+ // Required. Destination object, including its name and its metadata.
+ Resource *Object `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"`
+ // Apply a predefined set of access controls to this object.
+ // Valid values are "authenticatedRead", "bucketOwnerFullControl",
+ // "bucketOwnerRead", "private", "projectPrivate", or "publicRead".
+ PredefinedAcl string `protobuf:"bytes,7,opt,name=predefined_acl,json=predefinedAcl,proto3" json:"predefined_acl,omitempty"`
+ // Makes the operation conditional on whether the object's current
+ // generation matches the given value. Setting to 0 makes the operation
+ // succeed only if there are no live versions of the object.
+ IfGenerationMatch *int64 `protobuf:"varint,3,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"`
+ // Makes the operation conditional on whether the object's live
+ // generation does not match the given value. If no live object exists, the
+ // precondition fails. Setting to 0 makes the operation succeed only if
+ // there is a live version of the object.
+ IfGenerationNotMatch *int64 `protobuf:"varint,4,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"`
+ // Makes the operation conditional on whether the object's current
+ // metageneration matches the given value.
+ IfMetagenerationMatch *int64 `protobuf:"varint,5,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"`
+ // Makes the operation conditional on whether the object's current
+ // metageneration does not match the given value.
+ IfMetagenerationNotMatch *int64 `protobuf:"varint,6,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"`
+ // The expected final object size being uploaded.
+ // If this value is set, closing the stream after writing fewer or more than
+ // `object_size` bytes will result in an OUT_OF_RANGE error.
+ //
+ // This situation is considered a client error, and if such an error occurs
+ // you must start the upload over from scratch, this time sending the correct
+ // number of bytes.
+ ObjectSize *int64 `protobuf:"varint,8,opt,name=object_size,json=objectSize,proto3,oneof" json:"object_size,omitempty"`
}
-func (x *RestoreObjectRequest) GetIfMetagenerationNotMatch() int64 {
- if x != nil && x.IfMetagenerationNotMatch != nil {
- return *x.IfMetagenerationNotMatch
+func (x *WriteObjectSpec) Reset() {
+ *x = WriteObjectSpec{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return 0
}
-func (x *RestoreObjectRequest) GetCopySourceAcl() bool {
- if x != nil && x.CopySourceAcl != nil {
- return *x.CopySourceAcl
- }
- return false
+func (x *WriteObjectSpec) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (x *RestoreObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams {
- if x != nil {
- return x.CommonObjectRequestParams
- }
- return nil
-}
-
-// Message for canceling an in-progress resumable upload.
-// `upload_id` **must** be set.
-type CancelResumableWriteRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The upload_id of the resumable upload to cancel. This should be
- // copied from the `upload_id` field of `StartResumableWriteResponse`.
- UploadId string `protobuf:"bytes,1,opt,name=upload_id,json=uploadId,proto3" json:"upload_id,omitempty"`
-}
-
-func (x *CancelResumableWriteRequest) Reset() {
- *x = CancelResumableWriteRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[15]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *CancelResumableWriteRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*CancelResumableWriteRequest) ProtoMessage() {}
+func (*WriteObjectSpec) ProtoMessage() {}
-func (x *CancelResumableWriteRequest) ProtoReflect() protoreflect.Message {
+func (x *WriteObjectSpec) ProtoReflect() protoreflect.Message {
mi := &file_google_storage_v2_storage_proto_msgTypes[15]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@@ -1447,131 +1663,126 @@ func (x *CancelResumableWriteRequest) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use CancelResumableWriteRequest.ProtoReflect.Descriptor instead.
-func (*CancelResumableWriteRequest) Descriptor() ([]byte, []int) {
+// Deprecated: Use WriteObjectSpec.ProtoReflect.Descriptor instead.
+func (*WriteObjectSpec) Descriptor() ([]byte, []int) {
return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{15}
}
-func (x *CancelResumableWriteRequest) GetUploadId() string {
+func (x *WriteObjectSpec) GetResource() *Object {
if x != nil {
- return x.UploadId
+ return x.Resource
}
- return ""
+ return nil
}
-// Empty response message for canceling an in-progress resumable upload, will be
-// extended as needed.
-type CancelResumableWriteResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
+func (x *WriteObjectSpec) GetPredefinedAcl() string {
+ if x != nil {
+ return x.PredefinedAcl
+ }
+ return ""
}
-func (x *CancelResumableWriteResponse) Reset() {
- *x = CancelResumableWriteResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[16]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
+func (x *WriteObjectSpec) GetIfGenerationMatch() int64 {
+ if x != nil && x.IfGenerationMatch != nil {
+ return *x.IfGenerationMatch
}
+ return 0
}
-func (x *CancelResumableWriteResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
+func (x *WriteObjectSpec) GetIfGenerationNotMatch() int64 {
+ if x != nil && x.IfGenerationNotMatch != nil {
+ return *x.IfGenerationNotMatch
+ }
+ return 0
}
-func (*CancelResumableWriteResponse) ProtoMessage() {}
+func (x *WriteObjectSpec) GetIfMetagenerationMatch() int64 {
+ if x != nil && x.IfMetagenerationMatch != nil {
+ return *x.IfMetagenerationMatch
+ }
+ return 0
+}
-func (x *CancelResumableWriteResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[16]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
+func (x *WriteObjectSpec) GetIfMetagenerationNotMatch() int64 {
+ if x != nil && x.IfMetagenerationNotMatch != nil {
+ return *x.IfMetagenerationNotMatch
}
- return mi.MessageOf(x)
+ return 0
}
-// Deprecated: Use CancelResumableWriteResponse.ProtoReflect.Descriptor instead.
-func (*CancelResumableWriteResponse) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{16}
+func (x *WriteObjectSpec) GetObjectSize() int64 {
+ if x != nil && x.ObjectSize != nil {
+ return *x.ObjectSize
+ }
+ return 0
}
-// Request message for ReadObject.
-type ReadObjectRequest struct {
+// Request message for WriteObject.
+type WriteObjectRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // Required. The name of the bucket containing the object to read.
- Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"`
- // Required. The name of the object to read.
- Object string `protobuf:"bytes,2,opt,name=object,proto3" json:"object,omitempty"`
- // If present, selects a specific revision of this object (as opposed
- // to the latest version, the default).
- Generation int64 `protobuf:"varint,3,opt,name=generation,proto3" json:"generation,omitempty"`
- // The offset for the first byte to return in the read, relative to the start
- // of the object.
+ // The first message of each stream should set one of the following.
//
- // A negative `read_offset` value will be interpreted as the number of bytes
- // back from the end of the object to be returned. For example, if an object's
- // length is 15 bytes, a ReadObjectRequest with `read_offset` = -5 and
- // `read_limit` = 3 would return bytes 10 through 12 of the object. Requesting
- // a negative offset with magnitude larger than the size of the object will
- // return the entire object.
- ReadOffset int64 `protobuf:"varint,4,opt,name=read_offset,json=readOffset,proto3" json:"read_offset,omitempty"`
- // The maximum number of `data` bytes the server is allowed to return in the
- // sum of all `Object` messages. A `read_limit` of zero indicates that there
- // is no limit, and a negative `read_limit` will cause an error.
+ // Types that are assignable to FirstMessage:
//
- // If the stream returns fewer bytes than allowed by the `read_limit` and no
- // error occurred, the stream includes all data from the `read_offset` to the
- // end of the resource.
- ReadLimit int64 `protobuf:"varint,5,opt,name=read_limit,json=readLimit,proto3" json:"read_limit,omitempty"`
- // Makes the operation conditional on whether the object's current generation
- // matches the given value. Setting to 0 makes the operation succeed only if
- // there are no live versions of the object.
- IfGenerationMatch *int64 `protobuf:"varint,6,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"`
- // Makes the operation conditional on whether the object's live generation
- // does not match the given value. If no live object exists, the precondition
- // fails. Setting to 0 makes the operation succeed only if there is a live
- // version of the object.
- IfGenerationNotMatch *int64 `protobuf:"varint,7,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"`
- // Makes the operation conditional on whether the object's current
- // metageneration matches the given value.
- IfMetagenerationMatch *int64 `protobuf:"varint,8,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"`
- // Makes the operation conditional on whether the object's current
- // metageneration does not match the given value.
- IfMetagenerationNotMatch *int64 `protobuf:"varint,9,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"`
+ // *WriteObjectRequest_UploadId
+ // *WriteObjectRequest_WriteObjectSpec
+ FirstMessage isWriteObjectRequest_FirstMessage `protobuf_oneof:"first_message"`
+ // Required. The offset from the beginning of the object at which the data
+ // should be written.
+ //
+ // In the first `WriteObjectRequest` of a `WriteObject()` action, it
+ // indicates the initial offset for the `Write()` call. The value **must** be
+ // equal to the `persisted_size` that a call to `QueryWriteStatus()` would
+ // return (0 if this is the first write to the object).
+ //
+ // On subsequent calls, this value **must** be no larger than the sum of the
+ // first `write_offset` and the sizes of all `data` chunks sent previously on
+ // this stream.
+ //
+ // An incorrect value will cause an error.
+ WriteOffset int64 `protobuf:"varint,3,opt,name=write_offset,json=writeOffset,proto3" json:"write_offset,omitempty"`
+ // A portion of the data for the object.
+ //
+ // Types that are assignable to Data:
+ //
+ // *WriteObjectRequest_ChecksummedData
+ Data isWriteObjectRequest_Data `protobuf_oneof:"data"`
+ // Checksums for the complete object. If the checksums computed by the service
+ // don't match the specified checksums the call will fail. May only be
+ // provided in the first or last request (either with first_message, or
+ // finish_write set).
+ ObjectChecksums *ObjectChecksums `protobuf:"bytes,6,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"`
+ // If `true`, this indicates that the write is complete. Sending any
+ // `WriteObjectRequest`s subsequent to one in which `finish_write` is `true`
+ // will cause an error.
+ // For a non-resumable write (where the upload_id was not set in the first
+ // message), it is an error not to set this field in the final message of the
+ // stream.
+ FinishWrite bool `protobuf:"varint,7,opt,name=finish_write,json=finishWrite,proto3" json:"finish_write,omitempty"`
// A set of parameters common to Storage API requests concerning an object.
- CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,10,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"`
- // Mask specifying which fields to read.
- // The checksummed_data field and its children will always be present.
- // If no mask is specified, will default to all fields except metadata.owner
- // and metadata.acl.
- // * may be used to mean "all fields".
- ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,12,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"`
+ CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,8,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"`
}
-func (x *ReadObjectRequest) Reset() {
- *x = ReadObjectRequest{}
+func (x *WriteObjectRequest) Reset() {
+ *x = WriteObjectRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[17]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[16]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *ReadObjectRequest) String() string {
+func (x *WriteObjectRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*ReadObjectRequest) ProtoMessage() {}
+func (*WriteObjectRequest) ProtoMessage() {}
-func (x *ReadObjectRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[17]
+func (x *WriteObjectRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[16]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1582,270 +1793,138 @@ func (x *ReadObjectRequest) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use ReadObjectRequest.ProtoReflect.Descriptor instead.
-func (*ReadObjectRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{17}
+// Deprecated: Use WriteObjectRequest.ProtoReflect.Descriptor instead.
+func (*WriteObjectRequest) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{16}
}
-func (x *ReadObjectRequest) GetBucket() string {
- if x != nil {
- return x.Bucket
+func (m *WriteObjectRequest) GetFirstMessage() isWriteObjectRequest_FirstMessage {
+ if m != nil {
+ return m.FirstMessage
}
- return ""
+ return nil
}
-func (x *ReadObjectRequest) GetObject() string {
- if x != nil {
- return x.Object
+func (x *WriteObjectRequest) GetUploadId() string {
+ if x, ok := x.GetFirstMessage().(*WriteObjectRequest_UploadId); ok {
+ return x.UploadId
}
return ""
}
-func (x *ReadObjectRequest) GetGeneration() int64 {
- if x != nil {
- return x.Generation
+func (x *WriteObjectRequest) GetWriteObjectSpec() *WriteObjectSpec {
+ if x, ok := x.GetFirstMessage().(*WriteObjectRequest_WriteObjectSpec); ok {
+ return x.WriteObjectSpec
}
- return 0
+ return nil
}
-func (x *ReadObjectRequest) GetReadOffset() int64 {
+func (x *WriteObjectRequest) GetWriteOffset() int64 {
if x != nil {
- return x.ReadOffset
+ return x.WriteOffset
}
return 0
}
-func (x *ReadObjectRequest) GetReadLimit() int64 {
- if x != nil {
- return x.ReadLimit
+func (m *WriteObjectRequest) GetData() isWriteObjectRequest_Data {
+ if m != nil {
+ return m.Data
}
- return 0
+ return nil
}
-func (x *ReadObjectRequest) GetIfGenerationMatch() int64 {
- if x != nil && x.IfGenerationMatch != nil {
- return *x.IfGenerationMatch
+func (x *WriteObjectRequest) GetChecksummedData() *ChecksummedData {
+ if x, ok := x.GetData().(*WriteObjectRequest_ChecksummedData); ok {
+ return x.ChecksummedData
}
- return 0
+ return nil
}
-func (x *ReadObjectRequest) GetIfGenerationNotMatch() int64 {
- if x != nil && x.IfGenerationNotMatch != nil {
- return *x.IfGenerationNotMatch
+func (x *WriteObjectRequest) GetObjectChecksums() *ObjectChecksums {
+ if x != nil {
+ return x.ObjectChecksums
}
- return 0
+ return nil
}
-func (x *ReadObjectRequest) GetIfMetagenerationMatch() int64 {
- if x != nil && x.IfMetagenerationMatch != nil {
- return *x.IfMetagenerationMatch
- }
- return 0
-}
-
-func (x *ReadObjectRequest) GetIfMetagenerationNotMatch() int64 {
- if x != nil && x.IfMetagenerationNotMatch != nil {
- return *x.IfMetagenerationNotMatch
- }
- return 0
-}
-
-func (x *ReadObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams {
+func (x *WriteObjectRequest) GetFinishWrite() bool {
if x != nil {
- return x.CommonObjectRequestParams
+ return x.FinishWrite
}
- return nil
+ return false
}
-func (x *ReadObjectRequest) GetReadMask() *fieldmaskpb.FieldMask {
+func (x *WriteObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams {
if x != nil {
- return x.ReadMask
+ return x.CommonObjectRequestParams
}
return nil
}
-// Request message for GetObject.
-type GetObjectRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. Name of the bucket in which the object resides.
- Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"`
- // Required. Name of the object.
- Object string `protobuf:"bytes,2,opt,name=object,proto3" json:"object,omitempty"`
- // If present, selects a specific revision of this object (as opposed to the
- // latest version, the default).
- Generation int64 `protobuf:"varint,3,opt,name=generation,proto3" json:"generation,omitempty"`
- // If true, return the soft-deleted version of this object.
- SoftDeleted *bool `protobuf:"varint,11,opt,name=soft_deleted,json=softDeleted,proto3,oneof" json:"soft_deleted,omitempty"`
- // Makes the operation conditional on whether the object's current generation
- // matches the given value. Setting to 0 makes the operation succeed only if
- // there are no live versions of the object.
- IfGenerationMatch *int64 `protobuf:"varint,4,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"`
- // Makes the operation conditional on whether the object's live generation
- // does not match the given value. If no live object exists, the precondition
- // fails. Setting to 0 makes the operation succeed only if there is a live
- // version of the object.
- IfGenerationNotMatch *int64 `protobuf:"varint,5,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"`
- // Makes the operation conditional on whether the object's current
- // metageneration matches the given value.
- IfMetagenerationMatch *int64 `protobuf:"varint,6,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"`
- // Makes the operation conditional on whether the object's current
- // metageneration does not match the given value.
- IfMetagenerationNotMatch *int64 `protobuf:"varint,7,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"`
- // A set of parameters common to Storage API requests concerning an object.
- CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,8,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"`
- // Mask specifying which fields to read.
- // If no mask is specified, will default to all fields except metadata.acl and
- // metadata.owner.
- // * may be used to mean "all fields".
- ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,10,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"`
-}
-
-func (x *GetObjectRequest) Reset() {
- *x = GetObjectRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[18]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetObjectRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetObjectRequest) ProtoMessage() {}
-
-func (x *GetObjectRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[18]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetObjectRequest.ProtoReflect.Descriptor instead.
-func (*GetObjectRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{18}
-}
-
-func (x *GetObjectRequest) GetBucket() string {
- if x != nil {
- return x.Bucket
- }
- return ""
-}
-
-func (x *GetObjectRequest) GetObject() string {
- if x != nil {
- return x.Object
- }
- return ""
-}
-
-func (x *GetObjectRequest) GetGeneration() int64 {
- if x != nil {
- return x.Generation
- }
- return 0
+type isWriteObjectRequest_FirstMessage interface {
+ isWriteObjectRequest_FirstMessage()
}
-func (x *GetObjectRequest) GetSoftDeleted() bool {
- if x != nil && x.SoftDeleted != nil {
- return *x.SoftDeleted
- }
- return false
+type WriteObjectRequest_UploadId struct {
+ // For resumable uploads. This should be the `upload_id` returned from a
+ // call to `StartResumableWriteResponse`.
+ UploadId string `protobuf:"bytes,1,opt,name=upload_id,json=uploadId,proto3,oneof"`
}
-func (x *GetObjectRequest) GetIfGenerationMatch() int64 {
- if x != nil && x.IfGenerationMatch != nil {
- return *x.IfGenerationMatch
- }
- return 0
+type WriteObjectRequest_WriteObjectSpec struct {
+ // For non-resumable uploads. Describes the overall upload, including the
+ // destination bucket and object name, preconditions, etc.
+ WriteObjectSpec *WriteObjectSpec `protobuf:"bytes,2,opt,name=write_object_spec,json=writeObjectSpec,proto3,oneof"`
}
-func (x *GetObjectRequest) GetIfGenerationNotMatch() int64 {
- if x != nil && x.IfGenerationNotMatch != nil {
- return *x.IfGenerationNotMatch
- }
- return 0
-}
+func (*WriteObjectRequest_UploadId) isWriteObjectRequest_FirstMessage() {}
-func (x *GetObjectRequest) GetIfMetagenerationMatch() int64 {
- if x != nil && x.IfMetagenerationMatch != nil {
- return *x.IfMetagenerationMatch
- }
- return 0
-}
+func (*WriteObjectRequest_WriteObjectSpec) isWriteObjectRequest_FirstMessage() {}
-func (x *GetObjectRequest) GetIfMetagenerationNotMatch() int64 {
- if x != nil && x.IfMetagenerationNotMatch != nil {
- return *x.IfMetagenerationNotMatch
- }
- return 0
+type isWriteObjectRequest_Data interface {
+ isWriteObjectRequest_Data()
}
-func (x *GetObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams {
- if x != nil {
- return x.CommonObjectRequestParams
- }
- return nil
+type WriteObjectRequest_ChecksummedData struct {
+ // The data to insert. If a crc32c checksum is provided that doesn't match
+ // the checksum computed by the service, the request will fail.
+ ChecksummedData *ChecksummedData `protobuf:"bytes,4,opt,name=checksummed_data,json=checksummedData,proto3,oneof"`
}
-func (x *GetObjectRequest) GetReadMask() *fieldmaskpb.FieldMask {
- if x != nil {
- return x.ReadMask
- }
- return nil
-}
+func (*WriteObjectRequest_ChecksummedData) isWriteObjectRequest_Data() {}
-// Response message for ReadObject.
-type ReadObjectResponse struct {
+// Response message for WriteObject.
+type WriteObjectResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // A portion of the data for the object. The service **may** leave `data`
- // empty for any given `ReadResponse`. This enables the service to inform the
- // client that the request is still live while it is running an operation to
- // generate more data.
- ChecksummedData *ChecksummedData `protobuf:"bytes,1,opt,name=checksummed_data,json=checksummedData,proto3" json:"checksummed_data,omitempty"`
- // The checksums of the complete object. If the object is downloaded in full,
- // the client should compute one of these checksums over the downloaded object
- // and compare it against the value provided here.
- ObjectChecksums *ObjectChecksums `protobuf:"bytes,2,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"`
- // If read_offset and or read_limit was specified on the
- // ReadObjectRequest, ContentRange will be populated on the first
- // ReadObjectResponse message of the read stream.
- ContentRange *ContentRange `protobuf:"bytes,3,opt,name=content_range,json=contentRange,proto3" json:"content_range,omitempty"`
- // Metadata of the object whose media is being returned.
- // Only populated in the first response in the stream.
- Metadata *Object `protobuf:"bytes,4,opt,name=metadata,proto3" json:"metadata,omitempty"`
+ // The response will set one of the following.
+ //
+ // Types that are assignable to WriteStatus:
+ //
+ // *WriteObjectResponse_PersistedSize
+ // *WriteObjectResponse_Resource
+ WriteStatus isWriteObjectResponse_WriteStatus `protobuf_oneof:"write_status"`
}
-func (x *ReadObjectResponse) Reset() {
- *x = ReadObjectResponse{}
+func (x *WriteObjectResponse) Reset() {
+ *x = WriteObjectResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[19]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[17]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *ReadObjectResponse) String() string {
+func (x *WriteObjectResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*ReadObjectResponse) ProtoMessage() {}
+func (*WriteObjectResponse) ProtoMessage() {}
-func (x *ReadObjectResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[19]
+func (x *WriteObjectResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[17]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1856,159 +1935,54 @@ func (x *ReadObjectResponse) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use ReadObjectResponse.ProtoReflect.Descriptor instead.
-func (*ReadObjectResponse) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{19}
+// Deprecated: Use WriteObjectResponse.ProtoReflect.Descriptor instead.
+func (*WriteObjectResponse) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{17}
}
-func (x *ReadObjectResponse) GetChecksummedData() *ChecksummedData {
- if x != nil {
- return x.ChecksummedData
+func (m *WriteObjectResponse) GetWriteStatus() isWriteObjectResponse_WriteStatus {
+ if m != nil {
+ return m.WriteStatus
}
return nil
}
-func (x *ReadObjectResponse) GetObjectChecksums() *ObjectChecksums {
- if x != nil {
- return x.ObjectChecksums
+func (x *WriteObjectResponse) GetPersistedSize() int64 {
+ if x, ok := x.GetWriteStatus().(*WriteObjectResponse_PersistedSize); ok {
+ return x.PersistedSize
}
- return nil
+ return 0
}
-func (x *ReadObjectResponse) GetContentRange() *ContentRange {
- if x != nil {
- return x.ContentRange
+func (x *WriteObjectResponse) GetResource() *Object {
+ if x, ok := x.GetWriteStatus().(*WriteObjectResponse_Resource); ok {
+ return x.Resource
}
return nil
}
-func (x *ReadObjectResponse) GetMetadata() *Object {
- if x != nil {
- return x.Metadata
- }
- return nil
+type isWriteObjectResponse_WriteStatus interface {
+ isWriteObjectResponse_WriteStatus()
}
-// Describes an attempt to insert an object, possibly over multiple requests.
-type WriteObjectSpec struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
+type WriteObjectResponse_PersistedSize struct {
+ // The total number of bytes that have been processed for the given object
+ // from all `WriteObject` calls. Only set if the upload has not finalized.
+ PersistedSize int64 `protobuf:"varint,1,opt,name=persisted_size,json=persistedSize,proto3,oneof"`
+}
- // Required. Destination object, including its name and its metadata.
- Resource *Object `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"`
- // Apply a predefined set of access controls to this object.
- // Valid values are "authenticatedRead", "bucketOwnerFullControl",
- // "bucketOwnerRead", "private", "projectPrivate", or "publicRead".
- PredefinedAcl string `protobuf:"bytes,7,opt,name=predefined_acl,json=predefinedAcl,proto3" json:"predefined_acl,omitempty"`
- // Makes the operation conditional on whether the object's current
- // generation matches the given value. Setting to 0 makes the operation
- // succeed only if there are no live versions of the object.
- IfGenerationMatch *int64 `protobuf:"varint,3,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"`
- // Makes the operation conditional on whether the object's live
- // generation does not match the given value. If no live object exists, the
- // precondition fails. Setting to 0 makes the operation succeed only if
- // there is a live version of the object.
- IfGenerationNotMatch *int64 `protobuf:"varint,4,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"`
- // Makes the operation conditional on whether the object's current
- // metageneration matches the given value.
- IfMetagenerationMatch *int64 `protobuf:"varint,5,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"`
- // Makes the operation conditional on whether the object's current
- // metageneration does not match the given value.
- IfMetagenerationNotMatch *int64 `protobuf:"varint,6,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"`
- // The expected final object size being uploaded.
- // If this value is set, closing the stream after writing fewer or more than
- // `object_size` bytes will result in an OUT_OF_RANGE error.
- //
- // This situation is considered a client error, and if such an error occurs
- // you must start the upload over from scratch, this time sending the correct
- // number of bytes.
- ObjectSize *int64 `protobuf:"varint,8,opt,name=object_size,json=objectSize,proto3,oneof" json:"object_size,omitempty"`
-}
-
-func (x *WriteObjectSpec) Reset() {
- *x = WriteObjectSpec{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[20]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *WriteObjectSpec) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*WriteObjectSpec) ProtoMessage() {}
-
-func (x *WriteObjectSpec) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[20]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use WriteObjectSpec.ProtoReflect.Descriptor instead.
-func (*WriteObjectSpec) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{20}
-}
-
-func (x *WriteObjectSpec) GetResource() *Object {
- if x != nil {
- return x.Resource
- }
- return nil
-}
-
-func (x *WriteObjectSpec) GetPredefinedAcl() string {
- if x != nil {
- return x.PredefinedAcl
- }
- return ""
-}
-
-func (x *WriteObjectSpec) GetIfGenerationMatch() int64 {
- if x != nil && x.IfGenerationMatch != nil {
- return *x.IfGenerationMatch
- }
- return 0
-}
-
-func (x *WriteObjectSpec) GetIfGenerationNotMatch() int64 {
- if x != nil && x.IfGenerationNotMatch != nil {
- return *x.IfGenerationNotMatch
- }
- return 0
-}
-
-func (x *WriteObjectSpec) GetIfMetagenerationMatch() int64 {
- if x != nil && x.IfMetagenerationMatch != nil {
- return *x.IfMetagenerationMatch
- }
- return 0
+type WriteObjectResponse_Resource struct {
+ // A resource containing the metadata for the uploaded object. Only set if
+ // the upload has finalized.
+ Resource *Object `protobuf:"bytes,2,opt,name=resource,proto3,oneof"`
}
-func (x *WriteObjectSpec) GetIfMetagenerationNotMatch() int64 {
- if x != nil && x.IfMetagenerationNotMatch != nil {
- return *x.IfMetagenerationNotMatch
- }
- return 0
-}
+func (*WriteObjectResponse_PersistedSize) isWriteObjectResponse_WriteStatus() {}
-func (x *WriteObjectSpec) GetObjectSize() int64 {
- if x != nil && x.ObjectSize != nil {
- return *x.ObjectSize
- }
- return 0
-}
+func (*WriteObjectResponse_Resource) isWriteObjectResponse_WriteStatus() {}
-// Request message for WriteObject.
-type WriteObjectRequest struct {
+// Request message for BidiWriteObject.
+type BidiWriteObjectRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
@@ -2017,9 +1991,9 @@ type WriteObjectRequest struct {
//
// Types that are assignable to FirstMessage:
//
- // *WriteObjectRequest_UploadId
- // *WriteObjectRequest_WriteObjectSpec
- FirstMessage isWriteObjectRequest_FirstMessage `protobuf_oneof:"first_message"`
+ // *BidiWriteObjectRequest_UploadId
+ // *BidiWriteObjectRequest_WriteObjectSpec
+ FirstMessage isBidiWriteObjectRequest_FirstMessage `protobuf_oneof:"first_message"`
// Required. The offset from the beginning of the object at which the data
// should be written.
//
@@ -2032,47 +2006,62 @@ type WriteObjectRequest struct {
// first `write_offset` and the sizes of all `data` chunks sent previously on
// this stream.
//
- // An incorrect value will cause an error.
+ // An invalid value will cause an error.
WriteOffset int64 `protobuf:"varint,3,opt,name=write_offset,json=writeOffset,proto3" json:"write_offset,omitempty"`
// A portion of the data for the object.
//
// Types that are assignable to Data:
//
- // *WriteObjectRequest_ChecksummedData
- Data isWriteObjectRequest_Data `protobuf_oneof:"data"`
+ // *BidiWriteObjectRequest_ChecksummedData
+ Data isBidiWriteObjectRequest_Data `protobuf_oneof:"data"`
// Checksums for the complete object. If the checksums computed by the service
// don't match the specified checksums the call will fail. May only be
- // provided in the first or last request (either with first_message, or
- // finish_write set).
+ // provided in last request (with finish_write set).
ObjectChecksums *ObjectChecksums `protobuf:"bytes,6,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"`
+ // For each BidiWriteObjectRequest where state_lookup is `true` or the client
+ // closes the stream, the service will send a BidiWriteObjectResponse
+ // containing the current persisted size. The persisted size sent in responses
+ // covers all the bytes the server has persisted thus far and can be used to
+ // decide what data is safe for the client to drop. Note that the object's
+ // current size reported by the BidiWriteObjectResponse may lag behind the
+ // number of bytes written by the client. This field is ignored if
+ // `finish_write` is set to true.
+ StateLookup bool `protobuf:"varint,7,opt,name=state_lookup,json=stateLookup,proto3" json:"state_lookup,omitempty"`
+ // Persists data written on the stream, up to and including the current
+ // message, to permanent storage. This option should be used sparingly as it
+ // may reduce performance. Ongoing writes will periodically be persisted on
+ // the server even when `flush` is not set. This field is ignored if
+ // `finish_write` is set to true since there's no need to checkpoint or flush
+ // if this message completes the write.
+ Flush bool `protobuf:"varint,8,opt,name=flush,proto3" json:"flush,omitempty"`
// If `true`, this indicates that the write is complete. Sending any
// `WriteObjectRequest`s subsequent to one in which `finish_write` is `true`
// will cause an error.
// For a non-resumable write (where the upload_id was not set in the first
// message), it is an error not to set this field in the final message of the
// stream.
- FinishWrite bool `protobuf:"varint,7,opt,name=finish_write,json=finishWrite,proto3" json:"finish_write,omitempty"`
+ FinishWrite bool `protobuf:"varint,9,opt,name=finish_write,json=finishWrite,proto3" json:"finish_write,omitempty"`
// A set of parameters common to Storage API requests concerning an object.
- CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,8,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"`
+ CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,10,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"`
}
-func (x *WriteObjectRequest) Reset() {
- *x = WriteObjectRequest{}
+func (x *BidiWriteObjectRequest) Reset() {
+ *x = BidiWriteObjectRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[21]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[18]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *WriteObjectRequest) String() string {
+func (x *BidiWriteObjectRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*WriteObjectRequest) ProtoMessage() {}
+func (*BidiWriteObjectRequest) ProtoMessage() {}
-func (x *WriteObjectRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[21]
+func (x *BidiWriteObjectRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[18]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2083,108 +2072,122 @@ func (x *WriteObjectRequest) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use WriteObjectRequest.ProtoReflect.Descriptor instead.
-func (*WriteObjectRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{21}
+// Deprecated: Use BidiWriteObjectRequest.ProtoReflect.Descriptor instead.
+func (*BidiWriteObjectRequest) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{18}
}
-func (m *WriteObjectRequest) GetFirstMessage() isWriteObjectRequest_FirstMessage {
+func (m *BidiWriteObjectRequest) GetFirstMessage() isBidiWriteObjectRequest_FirstMessage {
if m != nil {
return m.FirstMessage
}
return nil
}
-func (x *WriteObjectRequest) GetUploadId() string {
- if x, ok := x.GetFirstMessage().(*WriteObjectRequest_UploadId); ok {
+func (x *BidiWriteObjectRequest) GetUploadId() string {
+ if x, ok := x.GetFirstMessage().(*BidiWriteObjectRequest_UploadId); ok {
return x.UploadId
}
return ""
}
-func (x *WriteObjectRequest) GetWriteObjectSpec() *WriteObjectSpec {
- if x, ok := x.GetFirstMessage().(*WriteObjectRequest_WriteObjectSpec); ok {
+func (x *BidiWriteObjectRequest) GetWriteObjectSpec() *WriteObjectSpec {
+ if x, ok := x.GetFirstMessage().(*BidiWriteObjectRequest_WriteObjectSpec); ok {
return x.WriteObjectSpec
}
return nil
}
-func (x *WriteObjectRequest) GetWriteOffset() int64 {
+func (x *BidiWriteObjectRequest) GetWriteOffset() int64 {
if x != nil {
return x.WriteOffset
}
return 0
}
-func (m *WriteObjectRequest) GetData() isWriteObjectRequest_Data {
+func (m *BidiWriteObjectRequest) GetData() isBidiWriteObjectRequest_Data {
if m != nil {
return m.Data
}
return nil
}
-func (x *WriteObjectRequest) GetChecksummedData() *ChecksummedData {
- if x, ok := x.GetData().(*WriteObjectRequest_ChecksummedData); ok {
+func (x *BidiWriteObjectRequest) GetChecksummedData() *ChecksummedData {
+ if x, ok := x.GetData().(*BidiWriteObjectRequest_ChecksummedData); ok {
return x.ChecksummedData
}
return nil
}
-func (x *WriteObjectRequest) GetObjectChecksums() *ObjectChecksums {
+func (x *BidiWriteObjectRequest) GetObjectChecksums() *ObjectChecksums {
if x != nil {
return x.ObjectChecksums
}
return nil
}
-func (x *WriteObjectRequest) GetFinishWrite() bool {
+func (x *BidiWriteObjectRequest) GetStateLookup() bool {
+ if x != nil {
+ return x.StateLookup
+ }
+ return false
+}
+
+func (x *BidiWriteObjectRequest) GetFlush() bool {
+ if x != nil {
+ return x.Flush
+ }
+ return false
+}
+
+func (x *BidiWriteObjectRequest) GetFinishWrite() bool {
if x != nil {
return x.FinishWrite
}
return false
}
-func (x *WriteObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams {
+func (x *BidiWriteObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams {
if x != nil {
return x.CommonObjectRequestParams
}
return nil
}
-type isWriteObjectRequest_FirstMessage interface {
- isWriteObjectRequest_FirstMessage()
+type isBidiWriteObjectRequest_FirstMessage interface {
+ isBidiWriteObjectRequest_FirstMessage()
}
-type WriteObjectRequest_UploadId struct {
+type BidiWriteObjectRequest_UploadId struct {
// For resumable uploads. This should be the `upload_id` returned from a
// call to `StartResumableWriteResponse`.
UploadId string `protobuf:"bytes,1,opt,name=upload_id,json=uploadId,proto3,oneof"`
}
-type WriteObjectRequest_WriteObjectSpec struct {
+type BidiWriteObjectRequest_WriteObjectSpec struct {
// For non-resumable uploads. Describes the overall upload, including the
// destination bucket and object name, preconditions, etc.
WriteObjectSpec *WriteObjectSpec `protobuf:"bytes,2,opt,name=write_object_spec,json=writeObjectSpec,proto3,oneof"`
}
-func (*WriteObjectRequest_UploadId) isWriteObjectRequest_FirstMessage() {}
+func (*BidiWriteObjectRequest_UploadId) isBidiWriteObjectRequest_FirstMessage() {}
-func (*WriteObjectRequest_WriteObjectSpec) isWriteObjectRequest_FirstMessage() {}
+func (*BidiWriteObjectRequest_WriteObjectSpec) isBidiWriteObjectRequest_FirstMessage() {}
-type isWriteObjectRequest_Data interface {
- isWriteObjectRequest_Data()
+type isBidiWriteObjectRequest_Data interface {
+ isBidiWriteObjectRequest_Data()
}
-type WriteObjectRequest_ChecksummedData struct {
+type BidiWriteObjectRequest_ChecksummedData struct {
// The data to insert. If a crc32c checksum is provided that doesn't match
// the checksum computed by the service, the request will fail.
ChecksummedData *ChecksummedData `protobuf:"bytes,4,opt,name=checksummed_data,json=checksummedData,proto3,oneof"`
}
-func (*WriteObjectRequest_ChecksummedData) isWriteObjectRequest_Data() {}
+func (*BidiWriteObjectRequest_ChecksummedData) isBidiWriteObjectRequest_Data() {}
-// Response message for WriteObject.
-type WriteObjectResponse struct {
+// Response message for BidiWriteObject.
+type BidiWriteObjectResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
@@ -2193,28 +2196,28 @@ type WriteObjectResponse struct {
//
// Types that are assignable to WriteStatus:
//
- // *WriteObjectResponse_PersistedSize
- // *WriteObjectResponse_Resource
- WriteStatus isWriteObjectResponse_WriteStatus `protobuf_oneof:"write_status"`
+ // *BidiWriteObjectResponse_PersistedSize
+ // *BidiWriteObjectResponse_Resource
+ WriteStatus isBidiWriteObjectResponse_WriteStatus `protobuf_oneof:"write_status"`
}
-func (x *WriteObjectResponse) Reset() {
- *x = WriteObjectResponse{}
+func (x *BidiWriteObjectResponse) Reset() {
+ *x = BidiWriteObjectResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[22]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[19]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *WriteObjectResponse) String() string {
+func (x *BidiWriteObjectResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*WriteObjectResponse) ProtoMessage() {}
+func (*BidiWriteObjectResponse) ProtoMessage() {}
-func (x *WriteObjectResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[22]
+func (x *BidiWriteObjectResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[19]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2225,134 +2228,131 @@ func (x *WriteObjectResponse) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use WriteObjectResponse.ProtoReflect.Descriptor instead.
-func (*WriteObjectResponse) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{22}
+// Deprecated: Use BidiWriteObjectResponse.ProtoReflect.Descriptor instead.
+func (*BidiWriteObjectResponse) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{19}
}
-func (m *WriteObjectResponse) GetWriteStatus() isWriteObjectResponse_WriteStatus {
+func (m *BidiWriteObjectResponse) GetWriteStatus() isBidiWriteObjectResponse_WriteStatus {
if m != nil {
return m.WriteStatus
}
return nil
}
-func (x *WriteObjectResponse) GetPersistedSize() int64 {
- if x, ok := x.GetWriteStatus().(*WriteObjectResponse_PersistedSize); ok {
+func (x *BidiWriteObjectResponse) GetPersistedSize() int64 {
+ if x, ok := x.GetWriteStatus().(*BidiWriteObjectResponse_PersistedSize); ok {
return x.PersistedSize
}
return 0
}
-func (x *WriteObjectResponse) GetResource() *Object {
- if x, ok := x.GetWriteStatus().(*WriteObjectResponse_Resource); ok {
+func (x *BidiWriteObjectResponse) GetResource() *Object {
+ if x, ok := x.GetWriteStatus().(*BidiWriteObjectResponse_Resource); ok {
return x.Resource
}
return nil
}
-type isWriteObjectResponse_WriteStatus interface {
- isWriteObjectResponse_WriteStatus()
+type isBidiWriteObjectResponse_WriteStatus interface {
+ isBidiWriteObjectResponse_WriteStatus()
}
-type WriteObjectResponse_PersistedSize struct {
+type BidiWriteObjectResponse_PersistedSize struct {
// The total number of bytes that have been processed for the given object
// from all `WriteObject` calls. Only set if the upload has not finalized.
PersistedSize int64 `protobuf:"varint,1,opt,name=persisted_size,json=persistedSize,proto3,oneof"`
}
-type WriteObjectResponse_Resource struct {
+type BidiWriteObjectResponse_Resource struct {
// A resource containing the metadata for the uploaded object. Only set if
// the upload has finalized.
Resource *Object `protobuf:"bytes,2,opt,name=resource,proto3,oneof"`
}
-func (*WriteObjectResponse_PersistedSize) isWriteObjectResponse_WriteStatus() {}
+func (*BidiWriteObjectResponse_PersistedSize) isBidiWriteObjectResponse_WriteStatus() {}
-func (*WriteObjectResponse_Resource) isWriteObjectResponse_WriteStatus() {}
+func (*BidiWriteObjectResponse_Resource) isBidiWriteObjectResponse_WriteStatus() {}
-// Request message for BidiWriteObject.
-type BidiWriteObjectRequest struct {
+// Request message for ListObjects.
+type ListObjectsRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // The first message of each stream should set one of the following.
- //
- // Types that are assignable to FirstMessage:
- //
- // *BidiWriteObjectRequest_UploadId
- // *BidiWriteObjectRequest_WriteObjectSpec
- FirstMessage isBidiWriteObjectRequest_FirstMessage `protobuf_oneof:"first_message"`
- // Required. The offset from the beginning of the object at which the data
- // should be written.
- //
- // In the first `WriteObjectRequest` of a `WriteObject()` action, it
- // indicates the initial offset for the `Write()` call. The value **must** be
- // equal to the `persisted_size` that a call to `QueryWriteStatus()` would
- // return (0 if this is the first write to the object).
- //
- // On subsequent calls, this value **must** be no larger than the sum of the
- // first `write_offset` and the sizes of all `data` chunks sent previously on
- // this stream.
- //
- // An invalid value will cause an error.
- WriteOffset int64 `protobuf:"varint,3,opt,name=write_offset,json=writeOffset,proto3" json:"write_offset,omitempty"`
- // A portion of the data for the object.
- //
- // Types that are assignable to Data:
- //
- // *BidiWriteObjectRequest_ChecksummedData
- Data isBidiWriteObjectRequest_Data `protobuf_oneof:"data"`
- // Checksums for the complete object. If the checksums computed by the service
- // don't match the specified checksums the call will fail. May only be
- // provided in the first or last request (either with first_message, or
- // finish_write set).
- ObjectChecksums *ObjectChecksums `protobuf:"bytes,6,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"`
- // For each BidiWriteObjectRequest where state_lookup is `true` or the client
- // closes the stream, the service will send a BidiWriteObjectResponse
- // containing the current persisted size. The persisted size sent in responses
- // covers all the bytes the server has persisted thus far and can be used to
- // decide what data is safe for the client to drop. Note that the object's
- // current size reported by the BidiWriteObjectResponse may lag behind the
- // number of bytes written by the client. This field is ignored if
- // `finish_write` is set to true.
- StateLookup bool `protobuf:"varint,7,opt,name=state_lookup,json=stateLookup,proto3" json:"state_lookup,omitempty"`
- // Persists data written on the stream, up to and including the current
- // message, to permanent storage. This option should be used sparingly as it
- // may reduce performance. Ongoing writes will periodically be persisted on
- // the server even when `flush` is not set. This field is ignored if
- // `finish_write` is set to true since there's no need to checkpoint or flush
- // if this message completes the write.
- Flush bool `protobuf:"varint,8,opt,name=flush,proto3" json:"flush,omitempty"`
- // If `true`, this indicates that the write is complete. Sending any
- // `WriteObjectRequest`s subsequent to one in which `finish_write` is `true`
- // will cause an error.
- // For a non-resumable write (where the upload_id was not set in the first
- // message), it is an error not to set this field in the final message of the
- // stream.
- FinishWrite bool `protobuf:"varint,9,opt,name=finish_write,json=finishWrite,proto3" json:"finish_write,omitempty"`
- // A set of parameters common to Storage API requests concerning an object.
- CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,10,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"`
+ // Required. Name of the bucket in which to look for objects.
+ Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
+ // Maximum number of `items` plus `prefixes` to return
+ // in a single page of responses. As duplicate `prefixes` are
+ // omitted, fewer total results may be returned than requested. The service
+ // will use this parameter or 1,000 items, whichever is smaller.
+ PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ // A previously-returned page token representing part of the larger set of
+ // results to view.
+ PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+ // If set, returns results in a directory-like mode. `items` will contain
+ // only objects whose names, aside from the `prefix`, do not
+ // contain `delimiter`. Objects whose names, aside from the
+ // `prefix`, contain `delimiter` will have their name,
+ // truncated after the `delimiter`, returned in
+ // `prefixes`. Duplicate `prefixes` are omitted.
+ Delimiter string `protobuf:"bytes,4,opt,name=delimiter,proto3" json:"delimiter,omitempty"`
+ // If true, objects that end in exactly one instance of `delimiter`
+ // will have their metadata included in `items` in addition to
+ // `prefixes`.
+ IncludeTrailingDelimiter bool `protobuf:"varint,5,opt,name=include_trailing_delimiter,json=includeTrailingDelimiter,proto3" json:"include_trailing_delimiter,omitempty"`
+ // Filter results to objects whose names begin with this prefix.
+ Prefix string `protobuf:"bytes,6,opt,name=prefix,proto3" json:"prefix,omitempty"`
+ // If `true`, lists all versions of an object as distinct results.
+ // For more information, see
+ // [Object
+ // Versioning](https://cloud.google.com/storage/docs/object-versioning).
+ Versions bool `protobuf:"varint,7,opt,name=versions,proto3" json:"versions,omitempty"`
+ // Mask specifying which fields to read from each result.
+ // If no mask is specified, will default to all fields except items.acl and
+ // items.owner.
+ // * may be used to mean "all fields".
+ ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,8,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"`
+ // Optional. Filter results to objects whose names are lexicographically equal
+ // to or after lexicographic_start. If lexicographic_end is also set, the
+ // objects listed have names between lexicographic_start (inclusive) and
+ // lexicographic_end (exclusive).
+ LexicographicStart string `protobuf:"bytes,10,opt,name=lexicographic_start,json=lexicographicStart,proto3" json:"lexicographic_start,omitempty"`
+ // Optional. Filter results to objects whose names are lexicographically
+ // before lexicographic_end. If lexicographic_start is also set, the objects
+ // listed have names between lexicographic_start (inclusive) and
+ // lexicographic_end (exclusive).
+ LexicographicEnd string `protobuf:"bytes,11,opt,name=lexicographic_end,json=lexicographicEnd,proto3" json:"lexicographic_end,omitempty"`
+ // Optional. If true, only list all soft-deleted versions of the object.
+ // Soft delete policy is required to set this option.
+ SoftDeleted bool `protobuf:"varint,12,opt,name=soft_deleted,json=softDeleted,proto3" json:"soft_deleted,omitempty"`
+ // Optional. If true, will also include folders and managed folders (besides
+ // objects) in the returned `prefixes`. Requires `delimiter` to be set to '/'.
+ IncludeFoldersAsPrefixes bool `protobuf:"varint,13,opt,name=include_folders_as_prefixes,json=includeFoldersAsPrefixes,proto3" json:"include_folders_as_prefixes,omitempty"`
+ // Optional. Filter results to objects and prefixes that match this glob
+ // pattern. See [List Objects Using
+ // Glob](https://cloud.google.com/storage/docs/json_api/v1/objects/list#list-objects-and-prefixes-using-glob)
+ // for the full syntax.
+ MatchGlob string `protobuf:"bytes,14,opt,name=match_glob,json=matchGlob,proto3" json:"match_glob,omitempty"`
}
-func (x *BidiWriteObjectRequest) Reset() {
- *x = BidiWriteObjectRequest{}
+func (x *ListObjectsRequest) Reset() {
+ *x = ListObjectsRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[23]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[20]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *BidiWriteObjectRequest) String() string {
+func (x *ListObjectsRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*BidiWriteObjectRequest) ProtoMessage() {}
+func (*ListObjectsRequest) ProtoMessage() {}
-func (x *BidiWriteObjectRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[23]
+func (x *ListObjectsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[20]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2363,152 +2363,132 @@ func (x *BidiWriteObjectRequest) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use BidiWriteObjectRequest.ProtoReflect.Descriptor instead.
-func (*BidiWriteObjectRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{23}
+// Deprecated: Use ListObjectsRequest.ProtoReflect.Descriptor instead.
+func (*ListObjectsRequest) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{20}
}
-func (m *BidiWriteObjectRequest) GetFirstMessage() isBidiWriteObjectRequest_FirstMessage {
- if m != nil {
- return m.FirstMessage
+func (x *ListObjectsRequest) GetParent() string {
+ if x != nil {
+ return x.Parent
}
- return nil
+ return ""
}
-func (x *BidiWriteObjectRequest) GetUploadId() string {
- if x, ok := x.GetFirstMessage().(*BidiWriteObjectRequest_UploadId); ok {
- return x.UploadId
+func (x *ListObjectsRequest) GetPageSize() int32 {
+ if x != nil {
+ return x.PageSize
+ }
+ return 0
+}
+
+func (x *ListObjectsRequest) GetPageToken() string {
+ if x != nil {
+ return x.PageToken
}
return ""
}
-func (x *BidiWriteObjectRequest) GetWriteObjectSpec() *WriteObjectSpec {
- if x, ok := x.GetFirstMessage().(*BidiWriteObjectRequest_WriteObjectSpec); ok {
- return x.WriteObjectSpec
+func (x *ListObjectsRequest) GetDelimiter() string {
+ if x != nil {
+ return x.Delimiter
}
- return nil
+ return ""
}
-func (x *BidiWriteObjectRequest) GetWriteOffset() int64 {
+func (x *ListObjectsRequest) GetIncludeTrailingDelimiter() bool {
if x != nil {
- return x.WriteOffset
+ return x.IncludeTrailingDelimiter
}
- return 0
+ return false
}
-func (m *BidiWriteObjectRequest) GetData() isBidiWriteObjectRequest_Data {
- if m != nil {
- return m.Data
+func (x *ListObjectsRequest) GetPrefix() string {
+ if x != nil {
+ return x.Prefix
}
- return nil
+ return ""
}
-func (x *BidiWriteObjectRequest) GetChecksummedData() *ChecksummedData {
- if x, ok := x.GetData().(*BidiWriteObjectRequest_ChecksummedData); ok {
- return x.ChecksummedData
+func (x *ListObjectsRequest) GetVersions() bool {
+ if x != nil {
+ return x.Versions
}
- return nil
+ return false
}
-func (x *BidiWriteObjectRequest) GetObjectChecksums() *ObjectChecksums {
+func (x *ListObjectsRequest) GetReadMask() *fieldmaskpb.FieldMask {
if x != nil {
- return x.ObjectChecksums
+ return x.ReadMask
}
return nil
}
-func (x *BidiWriteObjectRequest) GetStateLookup() bool {
+func (x *ListObjectsRequest) GetLexicographicStart() string {
if x != nil {
- return x.StateLookup
+ return x.LexicographicStart
}
- return false
+ return ""
}
-func (x *BidiWriteObjectRequest) GetFlush() bool {
+func (x *ListObjectsRequest) GetLexicographicEnd() string {
if x != nil {
- return x.Flush
+ return x.LexicographicEnd
}
- return false
+ return ""
}
-func (x *BidiWriteObjectRequest) GetFinishWrite() bool {
+func (x *ListObjectsRequest) GetSoftDeleted() bool {
if x != nil {
- return x.FinishWrite
+ return x.SoftDeleted
}
return false
}
-func (x *BidiWriteObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams {
+func (x *ListObjectsRequest) GetIncludeFoldersAsPrefixes() bool {
if x != nil {
- return x.CommonObjectRequestParams
+ return x.IncludeFoldersAsPrefixes
}
- return nil
+ return false
}
-type isBidiWriteObjectRequest_FirstMessage interface {
- isBidiWriteObjectRequest_FirstMessage()
+func (x *ListObjectsRequest) GetMatchGlob() string {
+ if x != nil {
+ return x.MatchGlob
+ }
+ return ""
}
-type BidiWriteObjectRequest_UploadId struct {
- // For resumable uploads. This should be the `upload_id` returned from a
- // call to `StartResumableWriteResponse`.
- UploadId string `protobuf:"bytes,1,opt,name=upload_id,json=uploadId,proto3,oneof"`
-}
+// Request object for `QueryWriteStatus`.
+type QueryWriteStatusRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-type BidiWriteObjectRequest_WriteObjectSpec struct {
- // For non-resumable uploads. Describes the overall upload, including the
- // destination bucket and object name, preconditions, etc.
- WriteObjectSpec *WriteObjectSpec `protobuf:"bytes,2,opt,name=write_object_spec,json=writeObjectSpec,proto3,oneof"`
-}
-
-func (*BidiWriteObjectRequest_UploadId) isBidiWriteObjectRequest_FirstMessage() {}
-
-func (*BidiWriteObjectRequest_WriteObjectSpec) isBidiWriteObjectRequest_FirstMessage() {}
-
-type isBidiWriteObjectRequest_Data interface {
- isBidiWriteObjectRequest_Data()
-}
-
-type BidiWriteObjectRequest_ChecksummedData struct {
- // The data to insert. If a crc32c checksum is provided that doesn't match
- // the checksum computed by the service, the request will fail.
- ChecksummedData *ChecksummedData `protobuf:"bytes,4,opt,name=checksummed_data,json=checksummedData,proto3,oneof"`
-}
-
-func (*BidiWriteObjectRequest_ChecksummedData) isBidiWriteObjectRequest_Data() {}
-
-// Response message for BidiWriteObject.
-type BidiWriteObjectResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The response will set one of the following.
- //
- // Types that are assignable to WriteStatus:
- //
- // *BidiWriteObjectResponse_PersistedSize
- // *BidiWriteObjectResponse_Resource
- WriteStatus isBidiWriteObjectResponse_WriteStatus `protobuf_oneof:"write_status"`
+ // Required. The name of the resume token for the object whose write status is
+ // being requested.
+ UploadId string `protobuf:"bytes,1,opt,name=upload_id,json=uploadId,proto3" json:"upload_id,omitempty"`
+ // A set of parameters common to Storage API requests concerning an object.
+ CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,2,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"`
}
-func (x *BidiWriteObjectResponse) Reset() {
- *x = BidiWriteObjectResponse{}
+func (x *QueryWriteStatusRequest) Reset() {
+ *x = QueryWriteStatusRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[24]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[21]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *BidiWriteObjectResponse) String() string {
+func (x *QueryWriteStatusRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*BidiWriteObjectResponse) ProtoMessage() {}
+func (*QueryWriteStatusRequest) ProtoMessage() {}
-func (x *BidiWriteObjectResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[24]
+func (x *QueryWriteStatusRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[21]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2519,131 +2499,57 @@ func (x *BidiWriteObjectResponse) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use BidiWriteObjectResponse.ProtoReflect.Descriptor instead.
-func (*BidiWriteObjectResponse) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{24}
-}
-
-func (m *BidiWriteObjectResponse) GetWriteStatus() isBidiWriteObjectResponse_WriteStatus {
- if m != nil {
- return m.WriteStatus
- }
- return nil
+// Deprecated: Use QueryWriteStatusRequest.ProtoReflect.Descriptor instead.
+func (*QueryWriteStatusRequest) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{21}
}
-func (x *BidiWriteObjectResponse) GetPersistedSize() int64 {
- if x, ok := x.GetWriteStatus().(*BidiWriteObjectResponse_PersistedSize); ok {
- return x.PersistedSize
+func (x *QueryWriteStatusRequest) GetUploadId() string {
+ if x != nil {
+ return x.UploadId
}
- return 0
+ return ""
}
-func (x *BidiWriteObjectResponse) GetResource() *Object {
- if x, ok := x.GetWriteStatus().(*BidiWriteObjectResponse_Resource); ok {
- return x.Resource
+func (x *QueryWriteStatusRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams {
+ if x != nil {
+ return x.CommonObjectRequestParams
}
return nil
}
-type isBidiWriteObjectResponse_WriteStatus interface {
- isBidiWriteObjectResponse_WriteStatus()
-}
-
-type BidiWriteObjectResponse_PersistedSize struct {
- // The total number of bytes that have been processed for the given object
- // from all `WriteObject` calls. Only set if the upload has not finalized.
- PersistedSize int64 `protobuf:"varint,1,opt,name=persisted_size,json=persistedSize,proto3,oneof"`
-}
-
-type BidiWriteObjectResponse_Resource struct {
- // A resource containing the metadata for the uploaded object. Only set if
- // the upload has finalized.
- Resource *Object `protobuf:"bytes,2,opt,name=resource,proto3,oneof"`
-}
-
-func (*BidiWriteObjectResponse_PersistedSize) isBidiWriteObjectResponse_WriteStatus() {}
-
-func (*BidiWriteObjectResponse_Resource) isBidiWriteObjectResponse_WriteStatus() {}
-
-// Request message for ListObjects.
-type ListObjectsRequest struct {
+// Response object for `QueryWriteStatus`.
+type QueryWriteStatusResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // Required. Name of the bucket in which to look for objects.
- Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
- // Maximum number of `items` plus `prefixes` to return
- // in a single page of responses. As duplicate `prefixes` are
- // omitted, fewer total results may be returned than requested. The service
- // will use this parameter or 1,000 items, whichever is smaller.
- PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
- // A previously-returned page token representing part of the larger set of
- // results to view.
- PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
- // If set, returns results in a directory-like mode. `items` will contain
- // only objects whose names, aside from the `prefix`, do not
- // contain `delimiter`. Objects whose names, aside from the
- // `prefix`, contain `delimiter` will have their name,
- // truncated after the `delimiter`, returned in
- // `prefixes`. Duplicate `prefixes` are omitted.
- Delimiter string `protobuf:"bytes,4,opt,name=delimiter,proto3" json:"delimiter,omitempty"`
- // If true, objects that end in exactly one instance of `delimiter`
- // will have their metadata included in `items` in addition to
- // `prefixes`.
- IncludeTrailingDelimiter bool `protobuf:"varint,5,opt,name=include_trailing_delimiter,json=includeTrailingDelimiter,proto3" json:"include_trailing_delimiter,omitempty"`
- // Filter results to objects whose names begin with this prefix.
- Prefix string `protobuf:"bytes,6,opt,name=prefix,proto3" json:"prefix,omitempty"`
- // If `true`, lists all versions of an object as distinct results.
- // For more information, see
- // [Object
- // Versioning](https://cloud.google.com/storage/docs/object-versioning).
- Versions bool `protobuf:"varint,7,opt,name=versions,proto3" json:"versions,omitempty"`
- // Mask specifying which fields to read from each result.
- // If no mask is specified, will default to all fields except items.acl and
- // items.owner.
- // * may be used to mean "all fields".
- ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,8,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"`
- // Optional. Filter results to objects whose names are lexicographically equal
- // to or after lexicographic_start. If lexicographic_end is also set, the
- // objects listed have names between lexicographic_start (inclusive) and
- // lexicographic_end (exclusive).
- LexicographicStart string `protobuf:"bytes,10,opt,name=lexicographic_start,json=lexicographicStart,proto3" json:"lexicographic_start,omitempty"`
- // Optional. Filter results to objects whose names are lexicographically
- // before lexicographic_end. If lexicographic_start is also set, the objects
- // listed have names between lexicographic_start (inclusive) and
- // lexicographic_end (exclusive).
- LexicographicEnd string `protobuf:"bytes,11,opt,name=lexicographic_end,json=lexicographicEnd,proto3" json:"lexicographic_end,omitempty"`
- // Optional. If true, only list all soft-deleted versions of the object.
- // Soft delete policy is required to set this option.
- SoftDeleted bool `protobuf:"varint,12,opt,name=soft_deleted,json=softDeleted,proto3" json:"soft_deleted,omitempty"`
- // Optional. If true, will also include folders and managed folders (besides
- // objects) in the returned `prefixes`. Requires `delimiter` to be set to '/'.
- IncludeFoldersAsPrefixes bool `protobuf:"varint,13,opt,name=include_folders_as_prefixes,json=includeFoldersAsPrefixes,proto3" json:"include_folders_as_prefixes,omitempty"`
- // Optional. Filter results to objects and prefixes that match this glob
- // pattern. See [List Objects Using
- // Glob](https://cloud.google.com/storage/docs/json_api/v1/objects/list#list-objects-and-prefixes-using-glob)
- // for the full syntax.
- MatchGlob string `protobuf:"bytes,14,opt,name=match_glob,json=matchGlob,proto3" json:"match_glob,omitempty"`
+ // The response will set one of the following.
+ //
+ // Types that are assignable to WriteStatus:
+ //
+ // *QueryWriteStatusResponse_PersistedSize
+ // *QueryWriteStatusResponse_Resource
+ WriteStatus isQueryWriteStatusResponse_WriteStatus `protobuf_oneof:"write_status"`
}
-func (x *ListObjectsRequest) Reset() {
- *x = ListObjectsRequest{}
+func (x *QueryWriteStatusResponse) Reset() {
+ *x = QueryWriteStatusResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[25]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[22]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *ListObjectsRequest) String() string {
+func (x *QueryWriteStatusResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*ListObjectsRequest) ProtoMessage() {}
+func (*QueryWriteStatusResponse) ProtoMessage() {}
-func (x *ListObjectsRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[25]
+func (x *QueryWriteStatusResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[22]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2654,250 +2560,53 @@ func (x *ListObjectsRequest) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use ListObjectsRequest.ProtoReflect.Descriptor instead.
-func (*ListObjectsRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{25}
+// Deprecated: Use QueryWriteStatusResponse.ProtoReflect.Descriptor instead.
+func (*QueryWriteStatusResponse) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{22}
}
-func (x *ListObjectsRequest) GetParent() string {
- if x != nil {
- return x.Parent
+func (m *QueryWriteStatusResponse) GetWriteStatus() isQueryWriteStatusResponse_WriteStatus {
+ if m != nil {
+ return m.WriteStatus
}
- return ""
+ return nil
}
-func (x *ListObjectsRequest) GetPageSize() int32 {
- if x != nil {
- return x.PageSize
+func (x *QueryWriteStatusResponse) GetPersistedSize() int64 {
+ if x, ok := x.GetWriteStatus().(*QueryWriteStatusResponse_PersistedSize); ok {
+ return x.PersistedSize
}
return 0
}
-func (x *ListObjectsRequest) GetPageToken() string {
- if x != nil {
- return x.PageToken
- }
- return ""
-}
-
-func (x *ListObjectsRequest) GetDelimiter() string {
- if x != nil {
- return x.Delimiter
+func (x *QueryWriteStatusResponse) GetResource() *Object {
+ if x, ok := x.GetWriteStatus().(*QueryWriteStatusResponse_Resource); ok {
+ return x.Resource
}
- return ""
+ return nil
}
-func (x *ListObjectsRequest) GetIncludeTrailingDelimiter() bool {
- if x != nil {
- return x.IncludeTrailingDelimiter
- }
- return false
+type isQueryWriteStatusResponse_WriteStatus interface {
+ isQueryWriteStatusResponse_WriteStatus()
}
-func (x *ListObjectsRequest) GetPrefix() string {
- if x != nil {
- return x.Prefix
- }
- return ""
+type QueryWriteStatusResponse_PersistedSize struct {
+ // The total number of bytes that have been processed for the given object
+ // from all `WriteObject` calls. This is the correct value for the
+ // 'write_offset' field to use when resuming the `WriteObject` operation.
+ // Only set if the upload has not finalized.
+ PersistedSize int64 `protobuf:"varint,1,opt,name=persisted_size,json=persistedSize,proto3,oneof"`
}
-func (x *ListObjectsRequest) GetVersions() bool {
- if x != nil {
- return x.Versions
- }
- return false
+type QueryWriteStatusResponse_Resource struct {
+ // A resource containing the metadata for the uploaded object. Only set if
+ // the upload has finalized.
+ Resource *Object `protobuf:"bytes,2,opt,name=resource,proto3,oneof"`
}
-func (x *ListObjectsRequest) GetReadMask() *fieldmaskpb.FieldMask {
- if x != nil {
- return x.ReadMask
- }
- return nil
-}
+func (*QueryWriteStatusResponse_PersistedSize) isQueryWriteStatusResponse_WriteStatus() {}
-func (x *ListObjectsRequest) GetLexicographicStart() string {
- if x != nil {
- return x.LexicographicStart
- }
- return ""
-}
-
-func (x *ListObjectsRequest) GetLexicographicEnd() string {
- if x != nil {
- return x.LexicographicEnd
- }
- return ""
-}
-
-func (x *ListObjectsRequest) GetSoftDeleted() bool {
- if x != nil {
- return x.SoftDeleted
- }
- return false
-}
-
-func (x *ListObjectsRequest) GetIncludeFoldersAsPrefixes() bool {
- if x != nil {
- return x.IncludeFoldersAsPrefixes
- }
- return false
-}
-
-func (x *ListObjectsRequest) GetMatchGlob() string {
- if x != nil {
- return x.MatchGlob
- }
- return ""
-}
-
-// Request object for `QueryWriteStatus`.
-type QueryWriteStatusRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The name of the resume token for the object whose write status is
- // being requested.
- UploadId string `protobuf:"bytes,1,opt,name=upload_id,json=uploadId,proto3" json:"upload_id,omitempty"`
- // A set of parameters common to Storage API requests concerning an object.
- CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,2,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"`
-}
-
-func (x *QueryWriteStatusRequest) Reset() {
- *x = QueryWriteStatusRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[26]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *QueryWriteStatusRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*QueryWriteStatusRequest) ProtoMessage() {}
-
-func (x *QueryWriteStatusRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[26]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use QueryWriteStatusRequest.ProtoReflect.Descriptor instead.
-func (*QueryWriteStatusRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{26}
-}
-
-func (x *QueryWriteStatusRequest) GetUploadId() string {
- if x != nil {
- return x.UploadId
- }
- return ""
-}
-
-func (x *QueryWriteStatusRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams {
- if x != nil {
- return x.CommonObjectRequestParams
- }
- return nil
-}
-
-// Response object for `QueryWriteStatus`.
-type QueryWriteStatusResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The response will set one of the following.
- //
- // Types that are assignable to WriteStatus:
- //
- // *QueryWriteStatusResponse_PersistedSize
- // *QueryWriteStatusResponse_Resource
- WriteStatus isQueryWriteStatusResponse_WriteStatus `protobuf_oneof:"write_status"`
-}
-
-func (x *QueryWriteStatusResponse) Reset() {
- *x = QueryWriteStatusResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[27]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *QueryWriteStatusResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*QueryWriteStatusResponse) ProtoMessage() {}
-
-func (x *QueryWriteStatusResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[27]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use QueryWriteStatusResponse.ProtoReflect.Descriptor instead.
-func (*QueryWriteStatusResponse) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{27}
-}
-
-func (m *QueryWriteStatusResponse) GetWriteStatus() isQueryWriteStatusResponse_WriteStatus {
- if m != nil {
- return m.WriteStatus
- }
- return nil
-}
-
-func (x *QueryWriteStatusResponse) GetPersistedSize() int64 {
- if x, ok := x.GetWriteStatus().(*QueryWriteStatusResponse_PersistedSize); ok {
- return x.PersistedSize
- }
- return 0
-}
-
-func (x *QueryWriteStatusResponse) GetResource() *Object {
- if x, ok := x.GetWriteStatus().(*QueryWriteStatusResponse_Resource); ok {
- return x.Resource
- }
- return nil
-}
-
-type isQueryWriteStatusResponse_WriteStatus interface {
- isQueryWriteStatusResponse_WriteStatus()
-}
-
-type QueryWriteStatusResponse_PersistedSize struct {
- // The total number of bytes that have been processed for the given object
- // from all `WriteObject` calls. This is the correct value for the
- // 'write_offset' field to use when resuming the `WriteObject` operation.
- // Only set if the upload has not finalized.
- PersistedSize int64 `protobuf:"varint,1,opt,name=persisted_size,json=persistedSize,proto3,oneof"`
-}
-
-type QueryWriteStatusResponse_Resource struct {
- // A resource containing the metadata for the uploaded object. Only set if
- // the upload has finalized.
- Resource *Object `protobuf:"bytes,2,opt,name=resource,proto3,oneof"`
-}
-
-func (*QueryWriteStatusResponse_PersistedSize) isQueryWriteStatusResponse_WriteStatus() {}
-
-func (*QueryWriteStatusResponse_Resource) isQueryWriteStatusResponse_WriteStatus() {}
+func (*QueryWriteStatusResponse_Resource) isQueryWriteStatusResponse_WriteStatus() {}
// Request message for RewriteObject.
// If the source object is encrypted using a Customer-Supplied Encryption Key
@@ -3011,7 +2720,7 @@ type RewriteObjectRequest struct {
func (x *RewriteObjectRequest) Reset() {
*x = RewriteObjectRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[28]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[23]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3024,7 +2733,7 @@ func (x *RewriteObjectRequest) String() string {
func (*RewriteObjectRequest) ProtoMessage() {}
func (x *RewriteObjectRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[28]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[23]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3037,7 +2746,7 @@ func (x *RewriteObjectRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use RewriteObjectRequest.ProtoReflect.Descriptor instead.
func (*RewriteObjectRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{28}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{23}
}
func (x *RewriteObjectRequest) GetDestinationName() string {
@@ -3227,7 +2936,7 @@ type RewriteResponse struct {
func (x *RewriteResponse) Reset() {
*x = RewriteResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[29]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[24]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3240,7 +2949,7 @@ func (x *RewriteResponse) String() string {
func (*RewriteResponse) ProtoMessage() {}
func (x *RewriteResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[29]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[24]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3253,7 +2962,7 @@ func (x *RewriteResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use RewriteResponse.ProtoReflect.Descriptor instead.
func (*RewriteResponse) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{29}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{24}
}
func (x *RewriteResponse) GetTotalBytesRewritten() int64 {
@@ -3312,7 +3021,7 @@ type StartResumableWriteRequest struct {
func (x *StartResumableWriteRequest) Reset() {
*x = StartResumableWriteRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[30]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[25]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3325,7 +3034,7 @@ func (x *StartResumableWriteRequest) String() string {
func (*StartResumableWriteRequest) ProtoMessage() {}
func (x *StartResumableWriteRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[30]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[25]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3338,7 +3047,7 @@ func (x *StartResumableWriteRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use StartResumableWriteRequest.ProtoReflect.Descriptor instead.
func (*StartResumableWriteRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{25}
}
func (x *StartResumableWriteRequest) GetWriteObjectSpec() *WriteObjectSpec {
@@ -3376,7 +3085,7 @@ type StartResumableWriteResponse struct {
func (x *StartResumableWriteResponse) Reset() {
*x = StartResumableWriteResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[31]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[26]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3389,7 +3098,7 @@ func (x *StartResumableWriteResponse) String() string {
func (*StartResumableWriteResponse) ProtoMessage() {}
func (x *StartResumableWriteResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[31]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[26]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3402,7 +3111,7 @@ func (x *StartResumableWriteResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use StartResumableWriteResponse.ProtoReflect.Descriptor instead.
func (*StartResumableWriteResponse) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{31}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{26}
}
func (x *StartResumableWriteResponse) GetUploadId() string {
@@ -3459,7 +3168,7 @@ type UpdateObjectRequest struct {
func (x *UpdateObjectRequest) Reset() {
*x = UpdateObjectRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[32]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[27]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3472,7 +3181,7 @@ func (x *UpdateObjectRequest) String() string {
func (*UpdateObjectRequest) ProtoMessage() {}
func (x *UpdateObjectRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[32]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[27]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3485,7 +3194,7 @@ func (x *UpdateObjectRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use UpdateObjectRequest.ProtoReflect.Descriptor instead.
func (*UpdateObjectRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{32}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{27}
}
func (x *UpdateObjectRequest) GetObject() *Object {
@@ -3558,7 +3267,7 @@ type GetServiceAccountRequest struct {
func (x *GetServiceAccountRequest) Reset() {
*x = GetServiceAccountRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[33]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[28]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3571,7 +3280,7 @@ func (x *GetServiceAccountRequest) String() string {
func (*GetServiceAccountRequest) ProtoMessage() {}
func (x *GetServiceAccountRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[33]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[28]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3584,7 +3293,7 @@ func (x *GetServiceAccountRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetServiceAccountRequest.ProtoReflect.Descriptor instead.
func (*GetServiceAccountRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{33}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{28}
}
func (x *GetServiceAccountRequest) GetProject() string {
@@ -3594,37 +3303,35 @@ func (x *GetServiceAccountRequest) GetProject() string {
return ""
}
-// Request message for CreateHmacKey.
-type CreateHmacKeyRequest struct {
+// A service account, owned by Cloud Storage, which may be used when taking
+// action on behalf of a given project, for example to publish Pub/Sub
+// notifications or to retrieve security keys.
+type ServiceAccount struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // Required. The project that the HMAC-owning service account lives in, in the
- // format of "projects/{projectIdentifier}". {projectIdentifier} can be the
- // project ID or project number.
- Project string `protobuf:"bytes,1,opt,name=project,proto3" json:"project,omitempty"`
- // Required. The service account to create the HMAC for.
- ServiceAccountEmail string `protobuf:"bytes,2,opt,name=service_account_email,json=serviceAccountEmail,proto3" json:"service_account_email,omitempty"`
+ // The ID of the notification.
+ EmailAddress string `protobuf:"bytes,1,opt,name=email_address,json=emailAddress,proto3" json:"email_address,omitempty"`
}
-func (x *CreateHmacKeyRequest) Reset() {
- *x = CreateHmacKeyRequest{}
+func (x *ServiceAccount) Reset() {
+ *x = ServiceAccount{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[34]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[29]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *CreateHmacKeyRequest) String() string {
+func (x *ServiceAccount) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*CreateHmacKeyRequest) ProtoMessage() {}
+func (*ServiceAccount) ProtoMessage() {}
-func (x *CreateHmacKeyRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[34]
+func (x *ServiceAccount) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[29]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3635,21 +3342,74 @@ func (x *CreateHmacKeyRequest) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use CreateHmacKeyRequest.ProtoReflect.Descriptor instead.
-func (*CreateHmacKeyRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{34}
-}
-
-func (x *CreateHmacKeyRequest) GetProject() string {
- if x != nil {
- return x.Project
- }
- return ""
+// Deprecated: Use ServiceAccount.ProtoReflect.Descriptor instead.
+func (*ServiceAccount) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{29}
}
-func (x *CreateHmacKeyRequest) GetServiceAccountEmail() string {
+func (x *ServiceAccount) GetEmailAddress() string {
if x != nil {
- return x.ServiceAccountEmail
+ return x.EmailAddress
+ }
+ return ""
+}
+
+// Request message for CreateHmacKey.
+type CreateHmacKeyRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The project that the HMAC-owning service account lives in, in the
+ // format of "projects/{projectIdentifier}". {projectIdentifier} can be the
+ // project ID or project number.
+ Project string `protobuf:"bytes,1,opt,name=project,proto3" json:"project,omitempty"`
+ // Required. The service account to create the HMAC for.
+ ServiceAccountEmail string `protobuf:"bytes,2,opt,name=service_account_email,json=serviceAccountEmail,proto3" json:"service_account_email,omitempty"`
+}
+
+func (x *CreateHmacKeyRequest) Reset() {
+ *x = CreateHmacKeyRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[30]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CreateHmacKeyRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CreateHmacKeyRequest) ProtoMessage() {}
+
+func (x *CreateHmacKeyRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[30]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CreateHmacKeyRequest.ProtoReflect.Descriptor instead.
+func (*CreateHmacKeyRequest) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30}
+}
+
+func (x *CreateHmacKeyRequest) GetProject() string {
+ if x != nil {
+ return x.Project
+ }
+ return ""
+}
+
+func (x *CreateHmacKeyRequest) GetServiceAccountEmail() string {
+ if x != nil {
+ return x.ServiceAccountEmail
}
return ""
}
@@ -3670,7 +3430,7 @@ type CreateHmacKeyResponse struct {
func (x *CreateHmacKeyResponse) Reset() {
*x = CreateHmacKeyResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[35]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[31]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3683,7 +3443,7 @@ func (x *CreateHmacKeyResponse) String() string {
func (*CreateHmacKeyResponse) ProtoMessage() {}
func (x *CreateHmacKeyResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[35]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[31]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3696,7 +3456,7 @@ func (x *CreateHmacKeyResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use CreateHmacKeyResponse.ProtoReflect.Descriptor instead.
func (*CreateHmacKeyResponse) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{35}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{31}
}
func (x *CreateHmacKeyResponse) GetMetadata() *HmacKeyMetadata {
@@ -3730,7 +3490,7 @@ type DeleteHmacKeyRequest struct {
func (x *DeleteHmacKeyRequest) Reset() {
*x = DeleteHmacKeyRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[36]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[32]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3743,7 +3503,7 @@ func (x *DeleteHmacKeyRequest) String() string {
func (*DeleteHmacKeyRequest) ProtoMessage() {}
func (x *DeleteHmacKeyRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[36]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[32]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3756,7 +3516,7 @@ func (x *DeleteHmacKeyRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use DeleteHmacKeyRequest.ProtoReflect.Descriptor instead.
func (*DeleteHmacKeyRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{36}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{32}
}
func (x *DeleteHmacKeyRequest) GetAccessId() string {
@@ -3790,7 +3550,7 @@ type GetHmacKeyRequest struct {
func (x *GetHmacKeyRequest) Reset() {
*x = GetHmacKeyRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[37]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[33]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3803,7 +3563,7 @@ func (x *GetHmacKeyRequest) String() string {
func (*GetHmacKeyRequest) ProtoMessage() {}
func (x *GetHmacKeyRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[37]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[33]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3816,7 +3576,7 @@ func (x *GetHmacKeyRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetHmacKeyRequest.ProtoReflect.Descriptor instead.
func (*GetHmacKeyRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{37}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{33}
}
func (x *GetHmacKeyRequest) GetAccessId() string {
@@ -3856,7 +3616,7 @@ type ListHmacKeysRequest struct {
func (x *ListHmacKeysRequest) Reset() {
*x = ListHmacKeysRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[38]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[34]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3869,7 +3629,7 @@ func (x *ListHmacKeysRequest) String() string {
func (*ListHmacKeysRequest) ProtoMessage() {}
func (x *ListHmacKeysRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[38]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[34]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3882,7 +3642,7 @@ func (x *ListHmacKeysRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ListHmacKeysRequest.ProtoReflect.Descriptor instead.
func (*ListHmacKeysRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{38}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{34}
}
func (x *ListHmacKeysRequest) GetProject() string {
@@ -3936,7 +3696,7 @@ type ListHmacKeysResponse struct {
func (x *ListHmacKeysResponse) Reset() {
*x = ListHmacKeysResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[39]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[35]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3949,7 +3709,7 @@ func (x *ListHmacKeysResponse) String() string {
func (*ListHmacKeysResponse) ProtoMessage() {}
func (x *ListHmacKeysResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[39]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[35]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3962,7 +3722,7 @@ func (x *ListHmacKeysResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use ListHmacKeysResponse.ProtoReflect.Descriptor instead.
func (*ListHmacKeysResponse) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{39}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{35}
}
func (x *ListHmacKeysResponse) GetHmacKeys() []*HmacKeyMetadata {
@@ -4002,7 +3762,7 @@ type UpdateHmacKeyRequest struct {
func (x *UpdateHmacKeyRequest) Reset() {
*x = UpdateHmacKeyRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[40]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[36]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4015,7 +3775,7 @@ func (x *UpdateHmacKeyRequest) String() string {
func (*UpdateHmacKeyRequest) ProtoMessage() {}
func (x *UpdateHmacKeyRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[40]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[36]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4028,7 +3788,7 @@ func (x *UpdateHmacKeyRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use UpdateHmacKeyRequest.ProtoReflect.Descriptor instead.
func (*UpdateHmacKeyRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{36}
}
func (x *UpdateHmacKeyRequest) GetHmacKey() *HmacKeyMetadata {
@@ -4045,6 +3805,123 @@ func (x *UpdateHmacKeyRequest) GetUpdateMask() *fieldmaskpb.FieldMask {
return nil
}
+// Hmac Key Metadata, which includes all information other than the secret.
+type HmacKeyMetadata struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Immutable. Resource name ID of the key in the format
+ // {projectIdentifier}/{accessId}.
+ // {projectIdentifier} can be the project ID or project number.
+ Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+ // Immutable. Globally unique id for keys.
+ AccessId string `protobuf:"bytes,2,opt,name=access_id,json=accessId,proto3" json:"access_id,omitempty"`
+ // Immutable. Identifies the project that owns the service account of the
+ // specified HMAC key, in the format "projects/{projectIdentifier}".
+ // {projectIdentifier} can be the project ID or project number.
+ Project string `protobuf:"bytes,3,opt,name=project,proto3" json:"project,omitempty"`
+ // Output only. Email of the service account the key authenticates as.
+ ServiceAccountEmail string `protobuf:"bytes,4,opt,name=service_account_email,json=serviceAccountEmail,proto3" json:"service_account_email,omitempty"`
+ // Optional. State of the key. One of ACTIVE, INACTIVE, or DELETED.
+ // Writable, can be updated by UpdateHmacKey operation.
+ State string `protobuf:"bytes,5,opt,name=state,proto3" json:"state,omitempty"`
+ // Output only. The creation time of the HMAC key.
+ CreateTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`
+ // Output only. The last modification time of the HMAC key metadata.
+ UpdateTime *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"`
+ // Optional. The etag of the HMAC key.
+ Etag string `protobuf:"bytes,8,opt,name=etag,proto3" json:"etag,omitempty"`
+}
+
+func (x *HmacKeyMetadata) Reset() {
+ *x = HmacKeyMetadata{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[37]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HmacKeyMetadata) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HmacKeyMetadata) ProtoMessage() {}
+
+func (x *HmacKeyMetadata) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[37]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HmacKeyMetadata.ProtoReflect.Descriptor instead.
+func (*HmacKeyMetadata) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{37}
+}
+
+func (x *HmacKeyMetadata) GetId() string {
+ if x != nil {
+ return x.Id
+ }
+ return ""
+}
+
+func (x *HmacKeyMetadata) GetAccessId() string {
+ if x != nil {
+ return x.AccessId
+ }
+ return ""
+}
+
+func (x *HmacKeyMetadata) GetProject() string {
+ if x != nil {
+ return x.Project
+ }
+ return ""
+}
+
+func (x *HmacKeyMetadata) GetServiceAccountEmail() string {
+ if x != nil {
+ return x.ServiceAccountEmail
+ }
+ return ""
+}
+
+func (x *HmacKeyMetadata) GetState() string {
+ if x != nil {
+ return x.State
+ }
+ return ""
+}
+
+func (x *HmacKeyMetadata) GetCreateTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.CreateTime
+ }
+ return nil
+}
+
+func (x *HmacKeyMetadata) GetUpdateTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.UpdateTime
+ }
+ return nil
+}
+
+func (x *HmacKeyMetadata) GetEtag() string {
+ if x != nil {
+ return x.Etag
+ }
+ return ""
+}
+
// Parameters that can be passed to any object request.
type CommonObjectRequestParams struct {
state protoimpl.MessageState
@@ -4065,7 +3942,7 @@ type CommonObjectRequestParams struct {
func (x *CommonObjectRequestParams) Reset() {
*x = CommonObjectRequestParams{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[41]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[38]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4078,7 +3955,7 @@ func (x *CommonObjectRequestParams) String() string {
func (*CommonObjectRequestParams) ProtoMessage() {}
func (x *CommonObjectRequestParams) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[41]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[38]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4091,7 +3968,7 @@ func (x *CommonObjectRequestParams) ProtoReflect() protoreflect.Message {
// Deprecated: Use CommonObjectRequestParams.ProtoReflect.Descriptor instead.
func (*CommonObjectRequestParams) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{41}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{38}
}
func (x *CommonObjectRequestParams) GetEncryptionAlgorithm() string {
@@ -4125,7 +4002,7 @@ type ServiceConstants struct {
func (x *ServiceConstants) Reset() {
*x = ServiceConstants{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[42]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[39]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4138,7 +4015,7 @@ func (x *ServiceConstants) String() string {
func (*ServiceConstants) ProtoMessage() {}
func (x *ServiceConstants) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[42]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[39]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4151,7 +4028,7 @@ func (x *ServiceConstants) ProtoReflect() protoreflect.Message {
// Deprecated: Use ServiceConstants.ProtoReflect.Descriptor instead.
func (*ServiceConstants) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{42}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{39}
}
// A bucket.
@@ -4268,7 +4145,8 @@ type Bucket struct {
// Reserved for future use.
SatisfiesPzs bool `protobuf:"varint,25,opt,name=satisfies_pzs,json=satisfiesPzs,proto3" json:"satisfies_pzs,omitempty"`
// Configuration that, if present, specifies the data placement for a
- // [https://cloud.google.com/storage/docs/use-dual-regions][Dual Region].
+ // [https://cloud.google.com/storage/docs/locations#location-dr][configurable
+ // dual-region].
CustomPlacementConfig *Bucket_CustomPlacementConfig `protobuf:"bytes,26,opt,name=custom_placement_config,json=customPlacementConfig,proto3" json:"custom_placement_config,omitempty"`
// The bucket's Autoclass configuration. If there is no configuration, the
// Autoclass feature will be disabled and have no effect on the bucket.
@@ -4285,7 +4163,7 @@ type Bucket struct {
func (x *Bucket) Reset() {
*x = Bucket{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[43]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[40]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4298,7 +4176,7 @@ func (x *Bucket) String() string {
func (*Bucket) ProtoMessage() {}
func (x *Bucket) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[43]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[40]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4311,7 +4189,7 @@ func (x *Bucket) ProtoReflect() protoreflect.Message {
// Deprecated: Use Bucket.ProtoReflect.Descriptor instead.
func (*Bucket) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40}
}
func (x *Bucket) GetName() string {
@@ -4574,7 +4452,7 @@ type BucketAccessControl struct {
func (x *BucketAccessControl) Reset() {
*x = BucketAccessControl{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[44]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[41]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4587,7 +4465,7 @@ func (x *BucketAccessControl) String() string {
func (*BucketAccessControl) ProtoMessage() {}
func (x *BucketAccessControl) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[44]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[41]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4600,7 +4478,7 @@ func (x *BucketAccessControl) ProtoReflect() protoreflect.Message {
// Deprecated: Use BucketAccessControl.ProtoReflect.Descriptor instead.
func (*BucketAccessControl) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{44}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{41}
}
func (x *BucketAccessControl) GetRole() string {
@@ -4682,7 +4560,7 @@ type ChecksummedData struct {
func (x *ChecksummedData) Reset() {
*x = ChecksummedData{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[45]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[42]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4695,7 +4573,7 @@ func (x *ChecksummedData) String() string {
func (*ChecksummedData) ProtoMessage() {}
func (x *ChecksummedData) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[45]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[42]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4708,7 +4586,7 @@ func (x *ChecksummedData) ProtoReflect() protoreflect.Message {
// Deprecated: Use ChecksummedData.ProtoReflect.Descriptor instead.
func (*ChecksummedData) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{45}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{42}
}
func (x *ChecksummedData) GetContent() []byte {
@@ -4741,15 +4619,15 @@ type ObjectChecksums struct {
// [https://cloud.google.com/storage/docs/hashes-etags#json-api][Hashes and
// ETags: Best Practices].
// Not all objects will provide an MD5 hash. For example, composite objects
- // provide only crc32c hashes.
- // This value is equivalent to running `cat object.txt | openssl md5 -binary`
+ // provide only crc32c hashes. This value is equivalent to running `cat
+ // object.txt | openssl md5 -binary`
Md5Hash []byte `protobuf:"bytes,2,opt,name=md5_hash,json=md5Hash,proto3" json:"md5_hash,omitempty"`
}
func (x *ObjectChecksums) Reset() {
*x = ObjectChecksums{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[46]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[43]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4762,7 +4640,7 @@ func (x *ObjectChecksums) String() string {
func (*ObjectChecksums) ProtoMessage() {}
func (x *ObjectChecksums) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[46]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[43]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4775,7 +4653,7 @@ func (x *ObjectChecksums) ProtoReflect() protoreflect.Message {
// Deprecated: Use ObjectChecksums.ProtoReflect.Descriptor instead.
func (*ObjectChecksums) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{46}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43}
}
func (x *ObjectChecksums) GetCrc32C() uint32 {
@@ -4792,239 +4670,9 @@ func (x *ObjectChecksums) GetMd5Hash() []byte {
return nil
}
-// Hmac Key Metadata, which includes all information other than the secret.
-type HmacKeyMetadata struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Immutable. Resource name ID of the key in the format
- // {projectIdentifier}/{accessId}.
- // {projectIdentifier} can be the project ID or project number.
- Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
- // Immutable. Globally unique id for keys.
- AccessId string `protobuf:"bytes,2,opt,name=access_id,json=accessId,proto3" json:"access_id,omitempty"`
- // Immutable. Identifies the project that owns the service account of the
- // specified HMAC key, in the format "projects/{projectIdentifier}".
- // {projectIdentifier} can be the project ID or project number.
- Project string `protobuf:"bytes,3,opt,name=project,proto3" json:"project,omitempty"`
- // Output only. Email of the service account the key authenticates as.
- ServiceAccountEmail string `protobuf:"bytes,4,opt,name=service_account_email,json=serviceAccountEmail,proto3" json:"service_account_email,omitempty"`
- // State of the key. One of ACTIVE, INACTIVE, or DELETED.
- // Writable, can be updated by UpdateHmacKey operation.
- State string `protobuf:"bytes,5,opt,name=state,proto3" json:"state,omitempty"`
- // Output only. The creation time of the HMAC key.
- CreateTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`
- // Output only. The last modification time of the HMAC key metadata.
- UpdateTime *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"`
- // The etag of the HMAC key.
- Etag string `protobuf:"bytes,8,opt,name=etag,proto3" json:"etag,omitempty"`
-}
-
-func (x *HmacKeyMetadata) Reset() {
- *x = HmacKeyMetadata{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[47]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *HmacKeyMetadata) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*HmacKeyMetadata) ProtoMessage() {}
-
-func (x *HmacKeyMetadata) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[47]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use HmacKeyMetadata.ProtoReflect.Descriptor instead.
-func (*HmacKeyMetadata) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{47}
-}
-
-func (x *HmacKeyMetadata) GetId() string {
- if x != nil {
- return x.Id
- }
- return ""
-}
-
-func (x *HmacKeyMetadata) GetAccessId() string {
- if x != nil {
- return x.AccessId
- }
- return ""
-}
-
-func (x *HmacKeyMetadata) GetProject() string {
- if x != nil {
- return x.Project
- }
- return ""
-}
-
-func (x *HmacKeyMetadata) GetServiceAccountEmail() string {
- if x != nil {
- return x.ServiceAccountEmail
- }
- return ""
-}
-
-func (x *HmacKeyMetadata) GetState() string {
- if x != nil {
- return x.State
- }
- return ""
-}
-
-func (x *HmacKeyMetadata) GetCreateTime() *timestamppb.Timestamp {
- if x != nil {
- return x.CreateTime
- }
- return nil
-}
-
-func (x *HmacKeyMetadata) GetUpdateTime() *timestamppb.Timestamp {
- if x != nil {
- return x.UpdateTime
- }
- return nil
-}
-
-func (x *HmacKeyMetadata) GetEtag() string {
- if x != nil {
- return x.Etag
- }
- return ""
-}
-
-// A directive to publish Pub/Sub notifications upon changes to a bucket.
-type NotificationConfig struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The resource name of this NotificationConfig.
- // Format:
- // `projects/{project}/buckets/{bucket}/notificationConfigs/{notificationConfig}`
- // The `{project}` portion may be `_` for globally unique buckets.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- // Required. The Pub/Sub topic to which this subscription publishes. Formatted
- // as:
- // '//pubsub.googleapis.com/projects/{project-identifier}/topics/{my-topic}'
- Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"`
- // The etag of the NotificationConfig.
- // If included in the metadata of GetNotificationConfigRequest, the operation
- // will only be performed if the etag matches that of the NotificationConfig.
- Etag string `protobuf:"bytes,7,opt,name=etag,proto3" json:"etag,omitempty"`
- // If present, only send notifications about listed event types. If
- // empty, sent notifications for all event types.
- EventTypes []string `protobuf:"bytes,3,rep,name=event_types,json=eventTypes,proto3" json:"event_types,omitempty"`
- // A list of additional attributes to attach to each Pub/Sub
- // message published for this NotificationConfig.
- CustomAttributes map[string]string `protobuf:"bytes,4,rep,name=custom_attributes,json=customAttributes,proto3" json:"custom_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
- // If present, only apply this NotificationConfig to object names that
- // begin with this prefix.
- ObjectNamePrefix string `protobuf:"bytes,5,opt,name=object_name_prefix,json=objectNamePrefix,proto3" json:"object_name_prefix,omitempty"`
- // Required. The desired content of the Payload.
- PayloadFormat string `protobuf:"bytes,6,opt,name=payload_format,json=payloadFormat,proto3" json:"payload_format,omitempty"`
-}
-
-func (x *NotificationConfig) Reset() {
- *x = NotificationConfig{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[48]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *NotificationConfig) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*NotificationConfig) ProtoMessage() {}
-
-func (x *NotificationConfig) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[48]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use NotificationConfig.ProtoReflect.Descriptor instead.
-func (*NotificationConfig) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{48}
-}
-
-func (x *NotificationConfig) GetName() string {
- if x != nil {
- return x.Name
- }
- return ""
-}
-
-func (x *NotificationConfig) GetTopic() string {
- if x != nil {
- return x.Topic
- }
- return ""
-}
-
-func (x *NotificationConfig) GetEtag() string {
- if x != nil {
- return x.Etag
- }
- return ""
-}
-
-func (x *NotificationConfig) GetEventTypes() []string {
- if x != nil {
- return x.EventTypes
- }
- return nil
-}
-
-func (x *NotificationConfig) GetCustomAttributes() map[string]string {
- if x != nil {
- return x.CustomAttributes
- }
- return nil
-}
-
-func (x *NotificationConfig) GetObjectNamePrefix() string {
- if x != nil {
- return x.ObjectNamePrefix
- }
- return ""
-}
-
-func (x *NotificationConfig) GetPayloadFormat() string {
- if x != nil {
- return x.PayloadFormat
- }
- return ""
-}
-
-// Describes the Customer-Supplied Encryption Key mechanism used to store an
-// Object's data at rest.
-type CustomerEncryption struct {
+// Describes the Customer-Supplied Encryption Key mechanism used to store an
+// Object's data at rest.
+type CustomerEncryption struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
@@ -5039,7 +4687,7 @@ type CustomerEncryption struct {
func (x *CustomerEncryption) Reset() {
*x = CustomerEncryption{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[49]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[44]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5052,7 +4700,7 @@ func (x *CustomerEncryption) String() string {
func (*CustomerEncryption) ProtoMessage() {}
func (x *CustomerEncryption) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[49]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[44]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5065,7 +4713,7 @@ func (x *CustomerEncryption) ProtoReflect() protoreflect.Message {
// Deprecated: Use CustomerEncryption.ProtoReflect.Descriptor instead.
func (*CustomerEncryption) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{49}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{44}
}
func (x *CustomerEncryption) GetEncryptionAlgorithm() string {
@@ -5148,7 +4796,10 @@ type Object struct {
// Components are accumulated by compose operations.
ComponentCount int32 `protobuf:"varint,15,opt,name=component_count,json=componentCount,proto3" json:"component_count,omitempty"`
// Output only. Hashes for the data part of this object. This field is used
- // for output only and will be silently ignored if provided in requests.
+ // for output only and will be silently ignored if provided in requests. The
+ // checksums of the complete object regardless of data range. If the object is
+ // downloaded in full, the client should compute one of these checksums over
+ // the downloaded object and compare it against the value provided here.
Checksums *ObjectChecksums `protobuf:"bytes,16,opt,name=checksums,proto3" json:"checksums,omitempty"`
// Output only. The modification time of the object metadata.
// Set initially to object creation time and then updated whenever any
@@ -5214,7 +4865,7 @@ type Object struct {
func (x *Object) Reset() {
*x = Object{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[50]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[45]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5227,7 +4878,7 @@ func (x *Object) String() string {
func (*Object) ProtoMessage() {}
func (x *Object) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[50]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[45]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5240,7 +4891,7 @@ func (x *Object) ProtoReflect() protoreflect.Message {
// Deprecated: Use Object.ProtoReflect.Descriptor instead.
func (*Object) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{50}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{45}
}
func (x *Object) GetName() string {
@@ -5452,7 +5103,10 @@ type ObjectAccessControl struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // The access permission for the entity.
+ // The access permission for the entity. One of the following values:
+ // * `READER`
+ // * `WRITER`
+ // * `OWNER`
Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"`
// The ID of the access-control entry.
Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
@@ -5496,7 +5150,7 @@ type ObjectAccessControl struct {
func (x *ObjectAccessControl) Reset() {
*x = ObjectAccessControl{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[51]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[46]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5509,7 +5163,7 @@ func (x *ObjectAccessControl) String() string {
func (*ObjectAccessControl) ProtoMessage() {}
func (x *ObjectAccessControl) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[51]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[46]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5522,7 +5176,7 @@ func (x *ObjectAccessControl) ProtoReflect() protoreflect.Message {
// Deprecated: Use ObjectAccessControl.ProtoReflect.Descriptor instead.
func (*ObjectAccessControl) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{51}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{46}
}
func (x *ObjectAccessControl) GetRole() string {
@@ -5607,7 +5261,7 @@ type ListObjectsResponse struct {
func (x *ListObjectsResponse) Reset() {
*x = ListObjectsResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[52]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[47]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5620,7 +5274,7 @@ func (x *ListObjectsResponse) String() string {
func (*ListObjectsResponse) ProtoMessage() {}
func (x *ListObjectsResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[52]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[47]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5633,7 +5287,7 @@ func (x *ListObjectsResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use ListObjectsResponse.ProtoReflect.Descriptor instead.
func (*ListObjectsResponse) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{52}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{47}
}
func (x *ListObjectsResponse) GetObjects() []*Object {
@@ -5672,7 +5326,7 @@ type ProjectTeam struct {
func (x *ProjectTeam) Reset() {
*x = ProjectTeam{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[53]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[48]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5685,7 +5339,7 @@ func (x *ProjectTeam) String() string {
func (*ProjectTeam) ProtoMessage() {}
func (x *ProjectTeam) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[53]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[48]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5698,7 +5352,7 @@ func (x *ProjectTeam) ProtoReflect() protoreflect.Message {
// Deprecated: Use ProjectTeam.ProtoReflect.Descriptor instead.
func (*ProjectTeam) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{53}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{48}
}
func (x *ProjectTeam) GetProjectNumber() string {
@@ -5715,57 +5369,6 @@ func (x *ProjectTeam) GetTeam() string {
return ""
}
-// A service account, owned by Cloud Storage, which may be used when taking
-// action on behalf of a given project, for example to publish Pub/Sub
-// notifications or to retrieve security keys.
-type ServiceAccount struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The ID of the notification.
- EmailAddress string `protobuf:"bytes,1,opt,name=email_address,json=emailAddress,proto3" json:"email_address,omitempty"`
-}
-
-func (x *ServiceAccount) Reset() {
- *x = ServiceAccount{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[54]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ServiceAccount) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ServiceAccount) ProtoMessage() {}
-
-func (x *ServiceAccount) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[54]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ServiceAccount.ProtoReflect.Descriptor instead.
-func (*ServiceAccount) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{54}
-}
-
-func (x *ServiceAccount) GetEmailAddress() string {
- if x != nil {
- return x.EmailAddress
- }
- return ""
-}
-
// The owner of a specific resource.
type Owner struct {
state protoimpl.MessageState
@@ -5781,7 +5384,7 @@ type Owner struct {
func (x *Owner) Reset() {
*x = Owner{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[55]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[49]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5794,7 +5397,7 @@ func (x *Owner) String() string {
func (*Owner) ProtoMessage() {}
func (x *Owner) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[55]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[49]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5807,7 +5410,7 @@ func (x *Owner) ProtoReflect() protoreflect.Message {
// Deprecated: Use Owner.ProtoReflect.Descriptor instead.
func (*Owner) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{55}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{49}
}
func (x *Owner) GetEntity() string {
@@ -5841,7 +5444,7 @@ type ContentRange struct {
func (x *ContentRange) Reset() {
*x = ContentRange{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[56]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[50]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5854,7 +5457,7 @@ func (x *ContentRange) String() string {
func (*ContentRange) ProtoMessage() {}
func (x *ContentRange) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[56]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[50]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5867,7 +5470,7 @@ func (x *ContentRange) ProtoReflect() protoreflect.Message {
// Deprecated: Use ContentRange.ProtoReflect.Descriptor instead.
func (*ContentRange) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{56}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{50}
}
func (x *ContentRange) GetStart() int64 {
@@ -5891,38 +5494,33 @@ func (x *ContentRange) GetCompleteLength() int64 {
return 0
}
-// Description of a source object for a composition request.
-type ComposeObjectRequest_SourceObject struct {
+// Request message for DeleteNotificationConfig.
+type DeleteNotificationConfigRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // Required. The source object's name. All source objects must reside in the
- // same bucket.
+ // Required. The parent bucket of the NotificationConfig.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- // The generation of this object to use as the source.
- Generation int64 `protobuf:"varint,2,opt,name=generation,proto3" json:"generation,omitempty"`
- // Conditions that must be met for this operation to execute.
- ObjectPreconditions *ComposeObjectRequest_SourceObject_ObjectPreconditions `protobuf:"bytes,3,opt,name=object_preconditions,json=objectPreconditions,proto3" json:"object_preconditions,omitempty"`
}
-func (x *ComposeObjectRequest_SourceObject) Reset() {
- *x = ComposeObjectRequest_SourceObject{}
+func (x *DeleteNotificationConfigRequest) Reset() {
+ *x = DeleteNotificationConfigRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[57]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[51]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *ComposeObjectRequest_SourceObject) String() string {
+func (x *DeleteNotificationConfigRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*ComposeObjectRequest_SourceObject) ProtoMessage() {}
+func (*DeleteNotificationConfigRequest) ProtoMessage() {}
-func (x *ComposeObjectRequest_SourceObject) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[57]
+func (x *DeleteNotificationConfigRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[51]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5933,61 +5531,47 @@ func (x *ComposeObjectRequest_SourceObject) ProtoReflect() protoreflect.Message
return mi.MessageOf(x)
}
-// Deprecated: Use ComposeObjectRequest_SourceObject.ProtoReflect.Descriptor instead.
-func (*ComposeObjectRequest_SourceObject) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{12, 0}
+// Deprecated: Use DeleteNotificationConfigRequest.ProtoReflect.Descriptor instead.
+func (*DeleteNotificationConfigRequest) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{51}
}
-func (x *ComposeObjectRequest_SourceObject) GetName() string {
+func (x *DeleteNotificationConfigRequest) GetName() string {
if x != nil {
return x.Name
}
return ""
}
-func (x *ComposeObjectRequest_SourceObject) GetGeneration() int64 {
- if x != nil {
- return x.Generation
- }
- return 0
-}
-
-func (x *ComposeObjectRequest_SourceObject) GetObjectPreconditions() *ComposeObjectRequest_SourceObject_ObjectPreconditions {
- if x != nil {
- return x.ObjectPreconditions
- }
- return nil
-}
-
-// Preconditions for a source object of a composition request.
-type ComposeObjectRequest_SourceObject_ObjectPreconditions struct {
+// Request message for GetNotificationConfig.
+type GetNotificationConfigRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // Only perform the composition if the generation of the source object
- // that would be used matches this value. If this value and a generation
- // are both specified, they must be the same value or the call will fail.
- IfGenerationMatch *int64 `protobuf:"varint,1,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"`
+ // Required. The parent bucket of the NotificationConfig.
+ // Format:
+ // `projects/{project}/buckets/{bucket}/notificationConfigs/{notificationConfig}`
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
}
-func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) Reset() {
- *x = ComposeObjectRequest_SourceObject_ObjectPreconditions{}
+func (x *GetNotificationConfigRequest) Reset() {
+ *x = GetNotificationConfigRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[58]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[52]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) String() string {
+func (x *GetNotificationConfigRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*ComposeObjectRequest_SourceObject_ObjectPreconditions) ProtoMessage() {}
+func (*GetNotificationConfigRequest) ProtoMessage() {}
-func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[58]
+func (x *GetNotificationConfigRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[52]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5998,45 +5582,47 @@ func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) ProtoReflect() p
return mi.MessageOf(x)
}
-// Deprecated: Use ComposeObjectRequest_SourceObject_ObjectPreconditions.ProtoReflect.Descriptor instead.
-func (*ComposeObjectRequest_SourceObject_ObjectPreconditions) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{12, 0, 0}
+// Deprecated: Use GetNotificationConfigRequest.ProtoReflect.Descriptor instead.
+func (*GetNotificationConfigRequest) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{52}
}
-func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) GetIfGenerationMatch() int64 {
- if x != nil && x.IfGenerationMatch != nil {
- return *x.IfGenerationMatch
+func (x *GetNotificationConfigRequest) GetName() string {
+ if x != nil {
+ return x.Name
}
- return 0
+ return ""
}
-// Billing properties of a bucket.
-type Bucket_Billing struct {
+// Request message for CreateNotificationConfig.
+type CreateNotificationConfigRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // When set to true, Requester Pays is enabled for this bucket.
- RequesterPays bool `protobuf:"varint,1,opt,name=requester_pays,json=requesterPays,proto3" json:"requester_pays,omitempty"`
+ // Required. The bucket to which this NotificationConfig belongs.
+ Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
+ // Required. Properties of the NotificationConfig to be inserted.
+ NotificationConfig *NotificationConfig `protobuf:"bytes,2,opt,name=notification_config,json=notificationConfig,proto3" json:"notification_config,omitempty"`
}
-func (x *Bucket_Billing) Reset() {
- *x = Bucket_Billing{}
+func (x *CreateNotificationConfigRequest) Reset() {
+ *x = CreateNotificationConfigRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[59]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[53]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *Bucket_Billing) String() string {
+func (x *CreateNotificationConfigRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*Bucket_Billing) ProtoMessage() {}
+func (*CreateNotificationConfigRequest) ProtoMessage() {}
-func (x *Bucket_Billing) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[59]
+func (x *CreateNotificationConfigRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[53]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6047,62 +5633,62 @@ func (x *Bucket_Billing) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use Bucket_Billing.ProtoReflect.Descriptor instead.
-func (*Bucket_Billing) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 0}
+// Deprecated: Use CreateNotificationConfigRequest.ProtoReflect.Descriptor instead.
+func (*CreateNotificationConfigRequest) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{53}
}
-func (x *Bucket_Billing) GetRequesterPays() bool {
+func (x *CreateNotificationConfigRequest) GetParent() string {
if x != nil {
- return x.RequesterPays
+ return x.Parent
}
- return false
+ return ""
}
-// Cross-Origin Response sharing (CORS) properties for a bucket.
-// For more on Cloud Storage and CORS, see
-// https://cloud.google.com/storage/docs/cross-origin.
-// For more on CORS in general, see https://tools.ietf.org/html/rfc6454.
-type Bucket_Cors struct {
+func (x *CreateNotificationConfigRequest) GetNotificationConfig() *NotificationConfig {
+ if x != nil {
+ return x.NotificationConfig
+ }
+ return nil
+}
+
+// Request message for ListNotifications.
+type ListNotificationConfigsRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // The list of Origins eligible to receive CORS response headers. See
- // [https://tools.ietf.org/html/rfc6454][RFC 6454] for more on origins.
- // Note: "*" is permitted in the list of origins, and means "any Origin".
- Origin []string `protobuf:"bytes,1,rep,name=origin,proto3" json:"origin,omitempty"`
- // The list of HTTP methods on which to include CORS response headers,
- // (`GET`, `OPTIONS`, `POST`, etc) Note: "*" is permitted in the list of
- // methods, and means "any method".
- Method []string `protobuf:"bytes,2,rep,name=method,proto3" json:"method,omitempty"`
- // The list of HTTP headers other than the
- // [https://www.w3.org/TR/cors/#simple-response-header][simple response
- // headers] to give permission for the user-agent to share across domains.
- ResponseHeader []string `protobuf:"bytes,3,rep,name=response_header,json=responseHeader,proto3" json:"response_header,omitempty"`
- // The value, in seconds, to return in the
- // [https://www.w3.org/TR/cors/#access-control-max-age-response-header][Access-Control-Max-Age
- // header] used in preflight responses.
- MaxAgeSeconds int32 `protobuf:"varint,4,opt,name=max_age_seconds,json=maxAgeSeconds,proto3" json:"max_age_seconds,omitempty"`
+ // Required. Name of a Google Cloud Storage bucket.
+ Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
+ // Optional. The maximum number of NotificationConfigs to return. The service
+ // may return fewer than this value. The default value is 100. Specifying a
+ // value above 100 will result in a page_size of 100.
+ PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ // Optional. A page token, received from a previous `ListNotificationConfigs`
+ // call. Provide this to retrieve the subsequent page.
+ //
+ // When paginating, all other parameters provided to `ListNotificationConfigs`
+ // must match the call that provided the page token.
+ PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
}
-func (x *Bucket_Cors) Reset() {
- *x = Bucket_Cors{}
+func (x *ListNotificationConfigsRequest) Reset() {
+ *x = ListNotificationConfigsRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[60]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[54]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *Bucket_Cors) String() string {
+func (x *ListNotificationConfigsRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*Bucket_Cors) ProtoMessage() {}
+func (*ListNotificationConfigsRequest) ProtoMessage() {}
-func (x *Bucket_Cors) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[60]
+func (x *ListNotificationConfigsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[54]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6113,67 +5699,62 @@ func (x *Bucket_Cors) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use Bucket_Cors.ProtoReflect.Descriptor instead.
-func (*Bucket_Cors) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 1}
+// Deprecated: Use ListNotificationConfigsRequest.ProtoReflect.Descriptor instead.
+func (*ListNotificationConfigsRequest) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{54}
}
-func (x *Bucket_Cors) GetOrigin() []string {
+func (x *ListNotificationConfigsRequest) GetParent() string {
if x != nil {
- return x.Origin
+ return x.Parent
}
- return nil
+ return ""
}
-func (x *Bucket_Cors) GetMethod() []string {
+func (x *ListNotificationConfigsRequest) GetPageSize() int32 {
if x != nil {
- return x.Method
+ return x.PageSize
}
- return nil
+ return 0
}
-func (x *Bucket_Cors) GetResponseHeader() []string {
+func (x *ListNotificationConfigsRequest) GetPageToken() string {
if x != nil {
- return x.ResponseHeader
+ return x.PageToken
}
- return nil
+ return ""
}
-func (x *Bucket_Cors) GetMaxAgeSeconds() int32 {
- if x != nil {
- return x.MaxAgeSeconds
- }
- return 0
-}
-
-// Encryption properties of a bucket.
-type Bucket_Encryption struct {
+// The result of a call to ListNotificationConfigs
+type ListNotificationConfigsResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // The name of the Cloud KMS key that will be used to encrypt objects
- // inserted into this bucket, if no encryption method is specified.
- DefaultKmsKey string `protobuf:"bytes,1,opt,name=default_kms_key,json=defaultKmsKey,proto3" json:"default_kms_key,omitempty"`
+ // The list of items.
+ NotificationConfigs []*NotificationConfig `protobuf:"bytes,1,rep,name=notification_configs,json=notificationConfigs,proto3" json:"notification_configs,omitempty"`
+ // A token, which can be sent as `page_token` to retrieve the next page.
+ // If this field is omitted, there are no subsequent pages.
+ NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
}
-func (x *Bucket_Encryption) Reset() {
- *x = Bucket_Encryption{}
+func (x *ListNotificationConfigsResponse) Reset() {
+ *x = ListNotificationConfigsResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[61]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[55]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *Bucket_Encryption) String() string {
+func (x *ListNotificationConfigsResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*Bucket_Encryption) ProtoMessage() {}
+func (*ListNotificationConfigsResponse) ProtoMessage() {}
-func (x *Bucket_Encryption) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[61]
+func (x *ListNotificationConfigsResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[55]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6184,48 +5765,74 @@ func (x *Bucket_Encryption) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use Bucket_Encryption.ProtoReflect.Descriptor instead.
-func (*Bucket_Encryption) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 2}
+// Deprecated: Use ListNotificationConfigsResponse.ProtoReflect.Descriptor instead.
+func (*ListNotificationConfigsResponse) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{55}
}
-func (x *Bucket_Encryption) GetDefaultKmsKey() string {
+func (x *ListNotificationConfigsResponse) GetNotificationConfigs() []*NotificationConfig {
if x != nil {
- return x.DefaultKmsKey
+ return x.NotificationConfigs
+ }
+ return nil
+}
+
+func (x *ListNotificationConfigsResponse) GetNextPageToken() string {
+ if x != nil {
+ return x.NextPageToken
}
return ""
}
-// Bucket restriction options.
-type Bucket_IamConfig struct {
+// A directive to publish Pub/Sub notifications upon changes to a bucket.
+type NotificationConfig struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // Bucket restriction options currently enforced on the bucket.
- UniformBucketLevelAccess *Bucket_IamConfig_UniformBucketLevelAccess `protobuf:"bytes,1,opt,name=uniform_bucket_level_access,json=uniformBucketLevelAccess,proto3" json:"uniform_bucket_level_access,omitempty"`
- // Whether IAM will enforce public access prevention. Valid values are
- // "enforced" or "inherited".
- PublicAccessPrevention string `protobuf:"bytes,3,opt,name=public_access_prevention,json=publicAccessPrevention,proto3" json:"public_access_prevention,omitempty"`
+ // Required. The resource name of this NotificationConfig.
+ // Format:
+ // `projects/{project}/buckets/{bucket}/notificationConfigs/{notificationConfig}`
+ // The `{project}` portion may be `_` for globally unique buckets.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Required. The Pub/Sub topic to which this subscription publishes. Formatted
+ // as:
+ // '//pubsub.googleapis.com/projects/{project-identifier}/topics/{my-topic}'
+ Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"`
+ // Optional. The etag of the NotificationConfig.
+ // If included in the metadata of GetNotificationConfigRequest, the operation
+ // will only be performed if the etag matches that of the NotificationConfig.
+ Etag string `protobuf:"bytes,7,opt,name=etag,proto3" json:"etag,omitempty"`
+ // Optional. If present, only send notifications about listed event types. If
+ // empty, sent notifications for all event types.
+ EventTypes []string `protobuf:"bytes,3,rep,name=event_types,json=eventTypes,proto3" json:"event_types,omitempty"`
+ // Optional. A list of additional attributes to attach to each Pub/Sub
+ // message published for this NotificationConfig.
+ CustomAttributes map[string]string `protobuf:"bytes,4,rep,name=custom_attributes,json=customAttributes,proto3" json:"custom_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ // Optional. If present, only apply this NotificationConfig to object names
+ // that begin with this prefix.
+ ObjectNamePrefix string `protobuf:"bytes,5,opt,name=object_name_prefix,json=objectNamePrefix,proto3" json:"object_name_prefix,omitempty"`
+ // Required. The desired content of the Payload.
+ PayloadFormat string `protobuf:"bytes,6,opt,name=payload_format,json=payloadFormat,proto3" json:"payload_format,omitempty"`
}
-func (x *Bucket_IamConfig) Reset() {
- *x = Bucket_IamConfig{}
+func (x *NotificationConfig) Reset() {
+ *x = NotificationConfig{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[62]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[56]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *Bucket_IamConfig) String() string {
+func (x *NotificationConfig) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*Bucket_IamConfig) ProtoMessage() {}
+func (*NotificationConfig) ProtoMessage() {}
-func (x *Bucket_IamConfig) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[62]
+func (x *NotificationConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[56]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6236,106 +5843,92 @@ func (x *Bucket_IamConfig) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use Bucket_IamConfig.ProtoReflect.Descriptor instead.
-func (*Bucket_IamConfig) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 3}
+// Deprecated: Use NotificationConfig.ProtoReflect.Descriptor instead.
+func (*NotificationConfig) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{56}
}
-func (x *Bucket_IamConfig) GetUniformBucketLevelAccess() *Bucket_IamConfig_UniformBucketLevelAccess {
+func (x *NotificationConfig) GetName() string {
if x != nil {
- return x.UniformBucketLevelAccess
+ return x.Name
}
- return nil
+ return ""
}
-func (x *Bucket_IamConfig) GetPublicAccessPrevention() string {
+func (x *NotificationConfig) GetTopic() string {
if x != nil {
- return x.PublicAccessPrevention
+ return x.Topic
}
return ""
}
-// Lifecycle properties of a bucket.
-// For more information, see https://cloud.google.com/storage/docs/lifecycle.
-type Bucket_Lifecycle struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // A lifecycle management rule, which is made of an action to take and the
- // condition(s) under which the action will be taken.
- Rule []*Bucket_Lifecycle_Rule `protobuf:"bytes,1,rep,name=rule,proto3" json:"rule,omitempty"`
-}
-
-func (x *Bucket_Lifecycle) Reset() {
- *x = Bucket_Lifecycle{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[63]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
+func (x *NotificationConfig) GetEtag() string {
+ if x != nil {
+ return x.Etag
}
+ return ""
}
-func (x *Bucket_Lifecycle) String() string {
- return protoimpl.X.MessageStringOf(x)
+func (x *NotificationConfig) GetEventTypes() []string {
+ if x != nil {
+ return x.EventTypes
+ }
+ return nil
}
-func (*Bucket_Lifecycle) ProtoMessage() {}
-
-func (x *Bucket_Lifecycle) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[63]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
+func (x *NotificationConfig) GetCustomAttributes() map[string]string {
+ if x != nil {
+ return x.CustomAttributes
}
- return mi.MessageOf(x)
+ return nil
}
-// Deprecated: Use Bucket_Lifecycle.ProtoReflect.Descriptor instead.
-func (*Bucket_Lifecycle) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 4}
+func (x *NotificationConfig) GetObjectNamePrefix() string {
+ if x != nil {
+ return x.ObjectNamePrefix
+ }
+ return ""
}
-func (x *Bucket_Lifecycle) GetRule() []*Bucket_Lifecycle_Rule {
+func (x *NotificationConfig) GetPayloadFormat() string {
if x != nil {
- return x.Rule
+ return x.PayloadFormat
}
- return nil
+ return ""
}
-// Logging-related properties of a bucket.
-type Bucket_Logging struct {
+// Description of a source object for a composition request.
+type ComposeObjectRequest_SourceObject struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // The destination bucket where the current bucket's logs should be placed,
- // using path format (like `projects/123456/buckets/foo`).
- LogBucket string `protobuf:"bytes,1,opt,name=log_bucket,json=logBucket,proto3" json:"log_bucket,omitempty"`
- // A prefix for log object names.
- LogObjectPrefix string `protobuf:"bytes,2,opt,name=log_object_prefix,json=logObjectPrefix,proto3" json:"log_object_prefix,omitempty"`
+ // Required. The source object's name. All source objects must reside in the
+ // same bucket.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // The generation of this object to use as the source.
+ Generation int64 `protobuf:"varint,2,opt,name=generation,proto3" json:"generation,omitempty"`
+ // Conditions that must be met for this operation to execute.
+ ObjectPreconditions *ComposeObjectRequest_SourceObject_ObjectPreconditions `protobuf:"bytes,3,opt,name=object_preconditions,json=objectPreconditions,proto3" json:"object_preconditions,omitempty"`
}
-func (x *Bucket_Logging) Reset() {
- *x = Bucket_Logging{}
+func (x *ComposeObjectRequest_SourceObject) Reset() {
+ *x = ComposeObjectRequest_SourceObject{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[64]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[57]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *Bucket_Logging) String() string {
+func (x *ComposeObjectRequest_SourceObject) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*Bucket_Logging) ProtoMessage() {}
+func (*ComposeObjectRequest_SourceObject) ProtoMessage() {}
-func (x *Bucket_Logging) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[64]
+func (x *ComposeObjectRequest_SourceObject) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[57]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6346,61 +5939,61 @@ func (x *Bucket_Logging) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use Bucket_Logging.ProtoReflect.Descriptor instead.
-func (*Bucket_Logging) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 5}
+// Deprecated: Use ComposeObjectRequest_SourceObject.ProtoReflect.Descriptor instead.
+func (*ComposeObjectRequest_SourceObject) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{7, 0}
}
-func (x *Bucket_Logging) GetLogBucket() string {
+func (x *ComposeObjectRequest_SourceObject) GetName() string {
if x != nil {
- return x.LogBucket
+ return x.Name
}
return ""
}
-func (x *Bucket_Logging) GetLogObjectPrefix() string {
+func (x *ComposeObjectRequest_SourceObject) GetGeneration() int64 {
if x != nil {
- return x.LogObjectPrefix
+ return x.Generation
}
- return ""
+ return 0
}
-// Retention policy properties of a bucket.
-type Bucket_RetentionPolicy struct {
+func (x *ComposeObjectRequest_SourceObject) GetObjectPreconditions() *ComposeObjectRequest_SourceObject_ObjectPreconditions {
+ if x != nil {
+ return x.ObjectPreconditions
+ }
+ return nil
+}
+
+// Preconditions for a source object of a composition request.
+type ComposeObjectRequest_SourceObject_ObjectPreconditions struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // Server-determined value that indicates the time from which policy was
- // enforced and effective.
- EffectiveTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=effective_time,json=effectiveTime,proto3" json:"effective_time,omitempty"`
- // Once locked, an object retention policy cannot be modified.
- IsLocked bool `protobuf:"varint,2,opt,name=is_locked,json=isLocked,proto3" json:"is_locked,omitempty"`
- // The duration that objects need to be retained. Retention duration must be
- // greater than zero and less than 100 years. Note that enforcement of
- // retention periods less than a day is not guaranteed. Such periods should
- // only be used for testing purposes. Any `nanos` value specified will be
- // rounded down to the nearest second.
- RetentionDuration *durationpb.Duration `protobuf:"bytes,4,opt,name=retention_duration,json=retentionDuration,proto3" json:"retention_duration,omitempty"`
+ // Only perform the composition if the generation of the source object
+ // that would be used matches this value. If this value and a generation
+ // are both specified, they must be the same value or the call will fail.
+ IfGenerationMatch *int64 `protobuf:"varint,1,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"`
}
-func (x *Bucket_RetentionPolicy) Reset() {
- *x = Bucket_RetentionPolicy{}
+func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) Reset() {
+ *x = ComposeObjectRequest_SourceObject_ObjectPreconditions{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[65]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[58]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *Bucket_RetentionPolicy) String() string {
+func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*Bucket_RetentionPolicy) ProtoMessage() {}
+func (*ComposeObjectRequest_SourceObject_ObjectPreconditions) ProtoMessage() {}
-func (x *Bucket_RetentionPolicy) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[65]
+func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[58]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6411,63 +6004,45 @@ func (x *Bucket_RetentionPolicy) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use Bucket_RetentionPolicy.ProtoReflect.Descriptor instead.
-func (*Bucket_RetentionPolicy) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 6}
+// Deprecated: Use ComposeObjectRequest_SourceObject_ObjectPreconditions.ProtoReflect.Descriptor instead.
+func (*ComposeObjectRequest_SourceObject_ObjectPreconditions) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{7, 0, 0}
}
-func (x *Bucket_RetentionPolicy) GetEffectiveTime() *timestamppb.Timestamp {
- if x != nil {
- return x.EffectiveTime
+func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) GetIfGenerationMatch() int64 {
+ if x != nil && x.IfGenerationMatch != nil {
+ return *x.IfGenerationMatch
}
- return nil
+ return 0
}
-func (x *Bucket_RetentionPolicy) GetIsLocked() bool {
- if x != nil {
- return x.IsLocked
- }
- return false
-}
-
-func (x *Bucket_RetentionPolicy) GetRetentionDuration() *durationpb.Duration {
- if x != nil {
- return x.RetentionDuration
- }
- return nil
-}
-
-// Soft delete policy properties of a bucket.
-type Bucket_SoftDeletePolicy struct {
+// Billing properties of a bucket.
+type Bucket_Billing struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // The period of time that soft-deleted objects in the bucket must be
- // retained and cannot be permanently deleted. The duration must be greater
- // than or equal to 7 days and less than 1 year.
- RetentionDuration *durationpb.Duration `protobuf:"bytes,1,opt,name=retention_duration,json=retentionDuration,proto3,oneof" json:"retention_duration,omitempty"`
- // Time from which the policy was effective. This is service-provided.
- EffectiveTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=effective_time,json=effectiveTime,proto3,oneof" json:"effective_time,omitempty"`
+ // When set to true, Requester Pays is enabled for this bucket.
+ RequesterPays bool `protobuf:"varint,1,opt,name=requester_pays,json=requesterPays,proto3" json:"requester_pays,omitempty"`
}
-func (x *Bucket_SoftDeletePolicy) Reset() {
- *x = Bucket_SoftDeletePolicy{}
+func (x *Bucket_Billing) Reset() {
+ *x = Bucket_Billing{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[66]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[59]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *Bucket_SoftDeletePolicy) String() string {
+func (x *Bucket_Billing) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*Bucket_SoftDeletePolicy) ProtoMessage() {}
+func (*Bucket_Billing) ProtoMessage() {}
-func (x *Bucket_SoftDeletePolicy) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[66]
+func (x *Bucket_Billing) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[59]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6478,54 +6053,62 @@ func (x *Bucket_SoftDeletePolicy) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use Bucket_SoftDeletePolicy.ProtoReflect.Descriptor instead.
-func (*Bucket_SoftDeletePolicy) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 7}
-}
-
-func (x *Bucket_SoftDeletePolicy) GetRetentionDuration() *durationpb.Duration {
- if x != nil {
- return x.RetentionDuration
- }
- return nil
+// Deprecated: Use Bucket_Billing.ProtoReflect.Descriptor instead.
+func (*Bucket_Billing) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 0}
}
-func (x *Bucket_SoftDeletePolicy) GetEffectiveTime() *timestamppb.Timestamp {
+func (x *Bucket_Billing) GetRequesterPays() bool {
if x != nil {
- return x.EffectiveTime
+ return x.RequesterPays
}
- return nil
+ return false
}
-// Properties of a bucket related to versioning.
-// For more on Cloud Storage versioning, see
-// https://cloud.google.com/storage/docs/object-versioning.
-type Bucket_Versioning struct {
+// Cross-Origin Response sharing (CORS) properties for a bucket.
+// For more on Cloud Storage and CORS, see
+// https://cloud.google.com/storage/docs/cross-origin.
+// For more on CORS in general, see https://tools.ietf.org/html/rfc6454.
+type Bucket_Cors struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // While set to true, versioning is fully enabled for this bucket.
- Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"`
+ // The list of Origins eligible to receive CORS response headers. See
+ // [https://tools.ietf.org/html/rfc6454][RFC 6454] for more on origins.
+ // Note: "*" is permitted in the list of origins, and means "any Origin".
+ Origin []string `protobuf:"bytes,1,rep,name=origin,proto3" json:"origin,omitempty"`
+ // The list of HTTP methods on which to include CORS response headers,
+ // (`GET`, `OPTIONS`, `POST`, etc) Note: "*" is permitted in the list of
+ // methods, and means "any method".
+ Method []string `protobuf:"bytes,2,rep,name=method,proto3" json:"method,omitempty"`
+ // The list of HTTP headers other than the
+ // [https://www.w3.org/TR/cors/#simple-response-header][simple response
+ // headers] to give permission for the user-agent to share across domains.
+ ResponseHeader []string `protobuf:"bytes,3,rep,name=response_header,json=responseHeader,proto3" json:"response_header,omitempty"`
+ // The value, in seconds, to return in the
+ // [https://www.w3.org/TR/cors/#access-control-max-age-response-header][Access-Control-Max-Age
+ // header] used in preflight responses.
+ MaxAgeSeconds int32 `protobuf:"varint,4,opt,name=max_age_seconds,json=maxAgeSeconds,proto3" json:"max_age_seconds,omitempty"`
}
-func (x *Bucket_Versioning) Reset() {
- *x = Bucket_Versioning{}
+func (x *Bucket_Cors) Reset() {
+ *x = Bucket_Cors{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[67]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[60]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *Bucket_Versioning) String() string {
+func (x *Bucket_Cors) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*Bucket_Versioning) ProtoMessage() {}
+func (*Bucket_Cors) ProtoMessage() {}
-func (x *Bucket_Versioning) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[67]
+func (x *Bucket_Cors) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[60]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6536,56 +6119,67 @@ func (x *Bucket_Versioning) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use Bucket_Versioning.ProtoReflect.Descriptor instead.
-func (*Bucket_Versioning) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 8}
+// Deprecated: Use Bucket_Cors.ProtoReflect.Descriptor instead.
+func (*Bucket_Cors) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 1}
}
-func (x *Bucket_Versioning) GetEnabled() bool {
+func (x *Bucket_Cors) GetOrigin() []string {
if x != nil {
- return x.Enabled
+ return x.Origin
}
- return false
+ return nil
}
-// Properties of a bucket related to accessing the contents as a static
-// website. For more on hosting a static website via Cloud Storage, see
-// https://cloud.google.com/storage/docs/hosting-static-website.
-type Bucket_Website struct {
+func (x *Bucket_Cors) GetMethod() []string {
+ if x != nil {
+ return x.Method
+ }
+ return nil
+}
+
+func (x *Bucket_Cors) GetResponseHeader() []string {
+ if x != nil {
+ return x.ResponseHeader
+ }
+ return nil
+}
+
+func (x *Bucket_Cors) GetMaxAgeSeconds() int32 {
+ if x != nil {
+ return x.MaxAgeSeconds
+ }
+ return 0
+}
+
+// Encryption properties of a bucket.
+type Bucket_Encryption struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // If the requested object path is missing, the service will ensure the path
- // has a trailing '/', append this suffix, and attempt to retrieve the
- // resulting object. This allows the creation of `index.html`
- // objects to represent directory pages.
- MainPageSuffix string `protobuf:"bytes,1,opt,name=main_page_suffix,json=mainPageSuffix,proto3" json:"main_page_suffix,omitempty"`
- // If the requested object path is missing, and any
- // `mainPageSuffix` object is missing, if applicable, the service
- // will return the named object from this bucket as the content for a
- // [https://tools.ietf.org/html/rfc7231#section-6.5.4][404 Not Found]
- // result.
- NotFoundPage string `protobuf:"bytes,2,opt,name=not_found_page,json=notFoundPage,proto3" json:"not_found_page,omitempty"`
+ // The name of the Cloud KMS key that will be used to encrypt objects
+ // inserted into this bucket, if no encryption method is specified.
+ DefaultKmsKey string `protobuf:"bytes,1,opt,name=default_kms_key,json=defaultKmsKey,proto3" json:"default_kms_key,omitempty"`
}
-func (x *Bucket_Website) Reset() {
- *x = Bucket_Website{}
+func (x *Bucket_Encryption) Reset() {
+ *x = Bucket_Encryption{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[68]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[61]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *Bucket_Website) String() string {
+func (x *Bucket_Encryption) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*Bucket_Website) ProtoMessage() {}
+func (*Bucket_Encryption) ProtoMessage() {}
-func (x *Bucket_Website) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[68]
+func (x *Bucket_Encryption) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[61]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6596,54 +6190,48 @@ func (x *Bucket_Website) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use Bucket_Website.ProtoReflect.Descriptor instead.
-func (*Bucket_Website) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 9}
-}
-
-func (x *Bucket_Website) GetMainPageSuffix() string {
- if x != nil {
- return x.MainPageSuffix
- }
- return ""
+// Deprecated: Use Bucket_Encryption.ProtoReflect.Descriptor instead.
+func (*Bucket_Encryption) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 2}
}
-func (x *Bucket_Website) GetNotFoundPage() string {
+func (x *Bucket_Encryption) GetDefaultKmsKey() string {
if x != nil {
- return x.NotFoundPage
+ return x.DefaultKmsKey
}
return ""
}
-// Configuration for Custom Dual Regions. It should specify precisely two
-// eligible regions within the same Multiregion. More information on regions
-// may be found [https://cloud.google.com/storage/docs/locations][here].
-type Bucket_CustomPlacementConfig struct {
+// Bucket restriction options.
+type Bucket_IamConfig struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // List of locations to use for data placement.
- DataLocations []string `protobuf:"bytes,1,rep,name=data_locations,json=dataLocations,proto3" json:"data_locations,omitempty"`
+ // Bucket restriction options currently enforced on the bucket.
+ UniformBucketLevelAccess *Bucket_IamConfig_UniformBucketLevelAccess `protobuf:"bytes,1,opt,name=uniform_bucket_level_access,json=uniformBucketLevelAccess,proto3" json:"uniform_bucket_level_access,omitempty"`
+ // Whether IAM will enforce public access prevention. Valid values are
+ // "enforced" or "inherited".
+ PublicAccessPrevention string `protobuf:"bytes,3,opt,name=public_access_prevention,json=publicAccessPrevention,proto3" json:"public_access_prevention,omitempty"`
}
-func (x *Bucket_CustomPlacementConfig) Reset() {
- *x = Bucket_CustomPlacementConfig{}
+func (x *Bucket_IamConfig) Reset() {
+ *x = Bucket_IamConfig{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[69]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[62]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *Bucket_CustomPlacementConfig) String() string {
+func (x *Bucket_IamConfig) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*Bucket_CustomPlacementConfig) ProtoMessage() {}
+func (*Bucket_IamConfig) ProtoMessage() {}
-func (x *Bucket_CustomPlacementConfig) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[69]
+func (x *Bucket_IamConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[62]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6654,57 +6242,54 @@ func (x *Bucket_CustomPlacementConfig) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use Bucket_CustomPlacementConfig.ProtoReflect.Descriptor instead.
-func (*Bucket_CustomPlacementConfig) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 10}
+// Deprecated: Use Bucket_IamConfig.ProtoReflect.Descriptor instead.
+func (*Bucket_IamConfig) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 3}
}
-func (x *Bucket_CustomPlacementConfig) GetDataLocations() []string {
+func (x *Bucket_IamConfig) GetUniformBucketLevelAccess() *Bucket_IamConfig_UniformBucketLevelAccess {
if x != nil {
- return x.DataLocations
+ return x.UniformBucketLevelAccess
}
return nil
}
-// Configuration for a bucket's Autoclass feature.
-type Bucket_Autoclass struct {
+func (x *Bucket_IamConfig) GetPublicAccessPrevention() string {
+ if x != nil {
+ return x.PublicAccessPrevention
+ }
+ return ""
+}
+
+// Lifecycle properties of a bucket.
+// For more information, see https://cloud.google.com/storage/docs/lifecycle.
+type Bucket_Lifecycle struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // Enables Autoclass.
- Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"`
- // Output only. Latest instant at which the `enabled` field was set to true
- // after being disabled/unconfigured or set to false after being enabled. If
- // Autoclass is enabled when the bucket is created, the toggle_time is set
- // to the bucket creation time.
- ToggleTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=toggle_time,json=toggleTime,proto3" json:"toggle_time,omitempty"`
- // An object in an Autoclass bucket will eventually cool down to the
- // terminal storage class if there is no access to the object.
- // The only valid values are NEARLINE and ARCHIVE.
- TerminalStorageClass *string `protobuf:"bytes,3,opt,name=terminal_storage_class,json=terminalStorageClass,proto3,oneof" json:"terminal_storage_class,omitempty"`
- // Output only. Latest instant at which the autoclass terminal storage class
- // was updated.
- TerminalStorageClassUpdateTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=terminal_storage_class_update_time,json=terminalStorageClassUpdateTime,proto3,oneof" json:"terminal_storage_class_update_time,omitempty"`
+ // A lifecycle management rule, which is made of an action to take and the
+ // condition(s) under which the action will be taken.
+ Rule []*Bucket_Lifecycle_Rule `protobuf:"bytes,1,rep,name=rule,proto3" json:"rule,omitempty"`
}
-func (x *Bucket_Autoclass) Reset() {
- *x = Bucket_Autoclass{}
+func (x *Bucket_Lifecycle) Reset() {
+ *x = Bucket_Lifecycle{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[70]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[63]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *Bucket_Autoclass) String() string {
+func (x *Bucket_Lifecycle) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*Bucket_Autoclass) ProtoMessage() {}
+func (*Bucket_Lifecycle) ProtoMessage() {}
-func (x *Bucket_Autoclass) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[70]
+func (x *Bucket_Lifecycle) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[63]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6715,66 +6300,113 @@ func (x *Bucket_Autoclass) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use Bucket_Autoclass.ProtoReflect.Descriptor instead.
-func (*Bucket_Autoclass) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 11}
+// Deprecated: Use Bucket_Lifecycle.ProtoReflect.Descriptor instead.
+func (*Bucket_Lifecycle) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 4}
}
-func (x *Bucket_Autoclass) GetEnabled() bool {
+func (x *Bucket_Lifecycle) GetRule() []*Bucket_Lifecycle_Rule {
if x != nil {
- return x.Enabled
+ return x.Rule
}
- return false
+ return nil
}
-func (x *Bucket_Autoclass) GetToggleTime() *timestamppb.Timestamp {
- if x != nil {
- return x.ToggleTime
+// Logging-related properties of a bucket.
+type Bucket_Logging struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The destination bucket where the current bucket's logs should be placed,
+ // using path format (like `projects/123456/buckets/foo`).
+ LogBucket string `protobuf:"bytes,1,opt,name=log_bucket,json=logBucket,proto3" json:"log_bucket,omitempty"`
+ // A prefix for log object names.
+ LogObjectPrefix string `protobuf:"bytes,2,opt,name=log_object_prefix,json=logObjectPrefix,proto3" json:"log_object_prefix,omitempty"`
+}
+
+func (x *Bucket_Logging) Reset() {
+ *x = Bucket_Logging{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[64]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return nil
}
-func (x *Bucket_Autoclass) GetTerminalStorageClass() string {
- if x != nil && x.TerminalStorageClass != nil {
- return *x.TerminalStorageClass
+func (x *Bucket_Logging) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Bucket_Logging) ProtoMessage() {}
+
+func (x *Bucket_Logging) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[64]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Bucket_Logging.ProtoReflect.Descriptor instead.
+func (*Bucket_Logging) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 5}
+}
+
+func (x *Bucket_Logging) GetLogBucket() string {
+ if x != nil {
+ return x.LogBucket
}
return ""
}
-func (x *Bucket_Autoclass) GetTerminalStorageClassUpdateTime() *timestamppb.Timestamp {
+func (x *Bucket_Logging) GetLogObjectPrefix() string {
if x != nil {
- return x.TerminalStorageClassUpdateTime
+ return x.LogObjectPrefix
}
- return nil
+ return ""
}
-// Configuration for a bucket's hierarchical namespace feature.
-type Bucket_HierarchicalNamespace struct {
+// Retention policy properties of a bucket.
+type Bucket_RetentionPolicy struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // Optional. Enables the hierarchical namespace feature.
- Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"`
+ // Server-determined value that indicates the time from which policy was
+ // enforced and effective.
+ EffectiveTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=effective_time,json=effectiveTime,proto3" json:"effective_time,omitempty"`
+ // Once locked, an object retention policy cannot be modified.
+ IsLocked bool `protobuf:"varint,2,opt,name=is_locked,json=isLocked,proto3" json:"is_locked,omitempty"`
+ // The duration that objects need to be retained. Retention duration must be
+ // greater than zero and less than 100 years. Note that enforcement of
+ // retention periods less than a day is not guaranteed. Such periods should
+ // only be used for testing purposes. Any `nanos` value specified will be
+ // rounded down to the nearest second.
+ RetentionDuration *durationpb.Duration `protobuf:"bytes,4,opt,name=retention_duration,json=retentionDuration,proto3" json:"retention_duration,omitempty"`
}
-func (x *Bucket_HierarchicalNamespace) Reset() {
- *x = Bucket_HierarchicalNamespace{}
+func (x *Bucket_RetentionPolicy) Reset() {
+ *x = Bucket_RetentionPolicy{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[71]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[65]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *Bucket_HierarchicalNamespace) String() string {
+func (x *Bucket_RetentionPolicy) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*Bucket_HierarchicalNamespace) ProtoMessage() {}
+func (*Bucket_RetentionPolicy) ProtoMessage() {}
-func (x *Bucket_HierarchicalNamespace) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[71]
+func (x *Bucket_RetentionPolicy) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[65]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6785,51 +6417,63 @@ func (x *Bucket_HierarchicalNamespace) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use Bucket_HierarchicalNamespace.ProtoReflect.Descriptor instead.
-func (*Bucket_HierarchicalNamespace) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 12}
+// Deprecated: Use Bucket_RetentionPolicy.ProtoReflect.Descriptor instead.
+func (*Bucket_RetentionPolicy) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 6}
}
-func (x *Bucket_HierarchicalNamespace) GetEnabled() bool {
+func (x *Bucket_RetentionPolicy) GetEffectiveTime() *timestamppb.Timestamp {
if x != nil {
- return x.Enabled
+ return x.EffectiveTime
+ }
+ return nil
+}
+
+func (x *Bucket_RetentionPolicy) GetIsLocked() bool {
+ if x != nil {
+ return x.IsLocked
}
return false
}
-// Settings for Uniform Bucket level access.
-// See https://cloud.google.com/storage/docs/uniform-bucket-level-access.
-type Bucket_IamConfig_UniformBucketLevelAccess struct {
+func (x *Bucket_RetentionPolicy) GetRetentionDuration() *durationpb.Duration {
+ if x != nil {
+ return x.RetentionDuration
+ }
+ return nil
+}
+
+// Soft delete policy properties of a bucket.
+type Bucket_SoftDeletePolicy struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // If set, access checks only use bucket-level IAM policies or above.
- Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"`
- // The deadline time for changing
- // `iam_config.uniform_bucket_level_access.enabled` from `true` to
- // `false`. Mutable until the specified deadline is reached, but not
- // afterward.
- LockTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=lock_time,json=lockTime,proto3" json:"lock_time,omitempty"`
+ // The period of time that soft-deleted objects in the bucket must be
+ // retained and cannot be permanently deleted. The duration must be greater
+ // than or equal to 7 days and less than 1 year.
+ RetentionDuration *durationpb.Duration `protobuf:"bytes,1,opt,name=retention_duration,json=retentionDuration,proto3,oneof" json:"retention_duration,omitempty"`
+ // Time from which the policy was effective. This is service-provided.
+ EffectiveTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=effective_time,json=effectiveTime,proto3,oneof" json:"effective_time,omitempty"`
}
-func (x *Bucket_IamConfig_UniformBucketLevelAccess) Reset() {
- *x = Bucket_IamConfig_UniformBucketLevelAccess{}
+func (x *Bucket_SoftDeletePolicy) Reset() {
+ *x = Bucket_SoftDeletePolicy{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[73]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[66]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *Bucket_IamConfig_UniformBucketLevelAccess) String() string {
+func (x *Bucket_SoftDeletePolicy) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*Bucket_IamConfig_UniformBucketLevelAccess) ProtoMessage() {}
+func (*Bucket_SoftDeletePolicy) ProtoMessage() {}
-func (x *Bucket_IamConfig_UniformBucketLevelAccess) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[73]
+func (x *Bucket_SoftDeletePolicy) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[66]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6840,55 +6484,54 @@ func (x *Bucket_IamConfig_UniformBucketLevelAccess) ProtoReflect() protoreflect.
return mi.MessageOf(x)
}
-// Deprecated: Use Bucket_IamConfig_UniformBucketLevelAccess.ProtoReflect.Descriptor instead.
-func (*Bucket_IamConfig_UniformBucketLevelAccess) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 3, 0}
+// Deprecated: Use Bucket_SoftDeletePolicy.ProtoReflect.Descriptor instead.
+func (*Bucket_SoftDeletePolicy) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 7}
}
-func (x *Bucket_IamConfig_UniformBucketLevelAccess) GetEnabled() bool {
+func (x *Bucket_SoftDeletePolicy) GetRetentionDuration() *durationpb.Duration {
if x != nil {
- return x.Enabled
+ return x.RetentionDuration
}
- return false
+ return nil
}
-func (x *Bucket_IamConfig_UniformBucketLevelAccess) GetLockTime() *timestamppb.Timestamp {
+func (x *Bucket_SoftDeletePolicy) GetEffectiveTime() *timestamppb.Timestamp {
if x != nil {
- return x.LockTime
+ return x.EffectiveTime
}
return nil
}
-// A lifecycle Rule, combining an action to take on an object and a
-// condition which will trigger that action.
-type Bucket_Lifecycle_Rule struct {
+// Properties of a bucket related to versioning.
+// For more on Cloud Storage versioning, see
+// https://cloud.google.com/storage/docs/object-versioning.
+type Bucket_Versioning struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // The action to take.
- Action *Bucket_Lifecycle_Rule_Action `protobuf:"bytes,1,opt,name=action,proto3" json:"action,omitempty"`
- // The condition(s) under which the action will be taken.
- Condition *Bucket_Lifecycle_Rule_Condition `protobuf:"bytes,2,opt,name=condition,proto3" json:"condition,omitempty"`
+ // While set to true, versioning is fully enabled for this bucket.
+ Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"`
}
-func (x *Bucket_Lifecycle_Rule) Reset() {
- *x = Bucket_Lifecycle_Rule{}
+func (x *Bucket_Versioning) Reset() {
+ *x = Bucket_Versioning{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[74]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[67]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *Bucket_Lifecycle_Rule) String() string {
+func (x *Bucket_Versioning) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*Bucket_Lifecycle_Rule) ProtoMessage() {}
+func (*Bucket_Versioning) ProtoMessage() {}
-func (x *Bucket_Lifecycle_Rule) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[74]
+func (x *Bucket_Versioning) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[67]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6899,56 +6542,56 @@ func (x *Bucket_Lifecycle_Rule) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use Bucket_Lifecycle_Rule.ProtoReflect.Descriptor instead.
-func (*Bucket_Lifecycle_Rule) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 4, 0}
-}
-
-func (x *Bucket_Lifecycle_Rule) GetAction() *Bucket_Lifecycle_Rule_Action {
- if x != nil {
- return x.Action
- }
- return nil
+// Deprecated: Use Bucket_Versioning.ProtoReflect.Descriptor instead.
+func (*Bucket_Versioning) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 8}
}
-func (x *Bucket_Lifecycle_Rule) GetCondition() *Bucket_Lifecycle_Rule_Condition {
+func (x *Bucket_Versioning) GetEnabled() bool {
if x != nil {
- return x.Condition
+ return x.Enabled
}
- return nil
+ return false
}
-// An action to take on an object.
-type Bucket_Lifecycle_Rule_Action struct {
+// Properties of a bucket related to accessing the contents as a static
+// website. For more on hosting a static website via Cloud Storage, see
+// https://cloud.google.com/storage/docs/hosting-static-website.
+type Bucket_Website struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // Type of the action. Currently, only `Delete`, `SetStorageClass`, and
- // `AbortIncompleteMultipartUpload` are supported.
- Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
- // Target storage class. Required iff the type of the action is
- // SetStorageClass.
- StorageClass string `protobuf:"bytes,2,opt,name=storage_class,json=storageClass,proto3" json:"storage_class,omitempty"`
+ // If the requested object path is missing, the service will ensure the path
+ // has a trailing '/', append this suffix, and attempt to retrieve the
+ // resulting object. This allows the creation of `index.html`
+ // objects to represent directory pages.
+ MainPageSuffix string `protobuf:"bytes,1,opt,name=main_page_suffix,json=mainPageSuffix,proto3" json:"main_page_suffix,omitempty"`
+ // If the requested object path is missing, and any
+ // `mainPageSuffix` object is missing, if applicable, the service
+ // will return the named object from this bucket as the content for a
+ // [https://tools.ietf.org/html/rfc7231#section-6.5.4][404 Not Found]
+ // result.
+ NotFoundPage string `protobuf:"bytes,2,opt,name=not_found_page,json=notFoundPage,proto3" json:"not_found_page,omitempty"`
}
-func (x *Bucket_Lifecycle_Rule_Action) Reset() {
- *x = Bucket_Lifecycle_Rule_Action{}
+func (x *Bucket_Website) Reset() {
+ *x = Bucket_Website{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[75]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[68]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *Bucket_Lifecycle_Rule_Action) String() string {
+func (x *Bucket_Website) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*Bucket_Lifecycle_Rule_Action) ProtoMessage() {}
+func (*Bucket_Website) ProtoMessage() {}
-func (x *Bucket_Lifecycle_Rule_Action) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[75]
+func (x *Bucket_Website) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[68]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6959,94 +6602,54 @@ func (x *Bucket_Lifecycle_Rule_Action) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use Bucket_Lifecycle_Rule_Action.ProtoReflect.Descriptor instead.
-func (*Bucket_Lifecycle_Rule_Action) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 4, 0, 0}
+// Deprecated: Use Bucket_Website.ProtoReflect.Descriptor instead.
+func (*Bucket_Website) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 9}
}
-func (x *Bucket_Lifecycle_Rule_Action) GetType() string {
+func (x *Bucket_Website) GetMainPageSuffix() string {
if x != nil {
- return x.Type
+ return x.MainPageSuffix
}
return ""
}
-func (x *Bucket_Lifecycle_Rule_Action) GetStorageClass() string {
+func (x *Bucket_Website) GetNotFoundPage() string {
if x != nil {
- return x.StorageClass
+ return x.NotFoundPage
}
return ""
}
-// A condition of an object which triggers some action.
-type Bucket_Lifecycle_Rule_Condition struct {
+// Configuration for Custom Dual Regions. It should specify precisely two
+// eligible regions within the same Multiregion. More information on regions
+// may be found [https://cloud.google.com/storage/docs/locations][here].
+type Bucket_CustomPlacementConfig struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // Age of an object (in days). This condition is satisfied when an
- // object reaches the specified age.
- // A value of 0 indicates that all objects immediately match this
- // condition.
- AgeDays *int32 `protobuf:"varint,1,opt,name=age_days,json=ageDays,proto3,oneof" json:"age_days,omitempty"`
- // This condition is satisfied when an object is created before midnight
- // of the specified date in UTC.
- CreatedBefore *date.Date `protobuf:"bytes,2,opt,name=created_before,json=createdBefore,proto3" json:"created_before,omitempty"`
- // Relevant only for versioned objects. If the value is
- // `true`, this condition matches live objects; if the value
- // is `false`, it matches archived objects.
- IsLive *bool `protobuf:"varint,3,opt,name=is_live,json=isLive,proto3,oneof" json:"is_live,omitempty"`
- // Relevant only for versioned objects. If the value is N, this
- // condition is satisfied when there are at least N versions (including
- // the live version) newer than this version of the object.
- NumNewerVersions *int32 `protobuf:"varint,4,opt,name=num_newer_versions,json=numNewerVersions,proto3,oneof" json:"num_newer_versions,omitempty"`
- // Objects having any of the storage classes specified by this condition
- // will be matched. Values include `MULTI_REGIONAL`, `REGIONAL`,
- // `NEARLINE`, `COLDLINE`, `STANDARD`, and
- // `DURABLE_REDUCED_AVAILABILITY`.
- MatchesStorageClass []string `protobuf:"bytes,5,rep,name=matches_storage_class,json=matchesStorageClass,proto3" json:"matches_storage_class,omitempty"`
- // Number of days that have elapsed since the custom timestamp set on an
- // object.
- // The value of the field must be a nonnegative integer.
- DaysSinceCustomTime *int32 `protobuf:"varint,7,opt,name=days_since_custom_time,json=daysSinceCustomTime,proto3,oneof" json:"days_since_custom_time,omitempty"`
- // An object matches this condition if the custom timestamp set on the
- // object is before the specified date in UTC.
- CustomTimeBefore *date.Date `protobuf:"bytes,8,opt,name=custom_time_before,json=customTimeBefore,proto3" json:"custom_time_before,omitempty"`
- // This condition is relevant only for versioned objects. An object
- // version satisfies this condition only if these many days have been
- // passed since it became noncurrent. The value of the field must be a
- // nonnegative integer. If it's zero, the object version will become
- // eligible for Lifecycle action as soon as it becomes noncurrent.
- DaysSinceNoncurrentTime *int32 `protobuf:"varint,9,opt,name=days_since_noncurrent_time,json=daysSinceNoncurrentTime,proto3,oneof" json:"days_since_noncurrent_time,omitempty"`
- // This condition is relevant only for versioned objects. An object
- // version satisfies this condition only if it became noncurrent before
- // the specified date in UTC.
- NoncurrentTimeBefore *date.Date `protobuf:"bytes,10,opt,name=noncurrent_time_before,json=noncurrentTimeBefore,proto3" json:"noncurrent_time_before,omitempty"`
- // List of object name prefixes. If any prefix exactly matches the
- // beginning of the object name, the condition evaluates to true.
- MatchesPrefix []string `protobuf:"bytes,11,rep,name=matches_prefix,json=matchesPrefix,proto3" json:"matches_prefix,omitempty"`
- // List of object name suffixes. If any suffix exactly matches the
- // end of the object name, the condition evaluates to true.
- MatchesSuffix []string `protobuf:"bytes,12,rep,name=matches_suffix,json=matchesSuffix,proto3" json:"matches_suffix,omitempty"`
+ // List of locations to use for data placement.
+ DataLocations []string `protobuf:"bytes,1,rep,name=data_locations,json=dataLocations,proto3" json:"data_locations,omitempty"`
}
-func (x *Bucket_Lifecycle_Rule_Condition) Reset() {
- *x = Bucket_Lifecycle_Rule_Condition{}
+func (x *Bucket_CustomPlacementConfig) Reset() {
+ *x = Bucket_CustomPlacementConfig{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[76]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[69]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *Bucket_Lifecycle_Rule_Condition) String() string {
+func (x *Bucket_CustomPlacementConfig) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*Bucket_Lifecycle_Rule_Condition) ProtoMessage() {}
+func (*Bucket_CustomPlacementConfig) ProtoMessage() {}
-func (x *Bucket_Lifecycle_Rule_Condition) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[76]
+func (x *Bucket_CustomPlacementConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[69]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -7057,108 +6660,511 @@ func (x *Bucket_Lifecycle_Rule_Condition) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use Bucket_Lifecycle_Rule_Condition.ProtoReflect.Descriptor instead.
-func (*Bucket_Lifecycle_Rule_Condition) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 4, 0, 1}
+// Deprecated: Use Bucket_CustomPlacementConfig.ProtoReflect.Descriptor instead.
+func (*Bucket_CustomPlacementConfig) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 10}
}
-func (x *Bucket_Lifecycle_Rule_Condition) GetAgeDays() int32 {
- if x != nil && x.AgeDays != nil {
- return *x.AgeDays
+func (x *Bucket_CustomPlacementConfig) GetDataLocations() []string {
+ if x != nil {
+ return x.DataLocations
}
- return 0
+ return nil
}
-func (x *Bucket_Lifecycle_Rule_Condition) GetCreatedBefore() *date.Date {
- if x != nil {
- return x.CreatedBefore
+// Configuration for a bucket's Autoclass feature.
+type Bucket_Autoclass struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Enables Autoclass.
+ Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"`
+ // Output only. Latest instant at which the `enabled` field was set to true
+ // after being disabled/unconfigured or set to false after being enabled. If
+ // Autoclass is enabled when the bucket is created, the toggle_time is set
+ // to the bucket creation time.
+ ToggleTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=toggle_time,json=toggleTime,proto3" json:"toggle_time,omitempty"`
+ // An object in an Autoclass bucket will eventually cool down to the
+ // terminal storage class if there is no access to the object.
+ // The only valid values are NEARLINE and ARCHIVE.
+ TerminalStorageClass *string `protobuf:"bytes,3,opt,name=terminal_storage_class,json=terminalStorageClass,proto3,oneof" json:"terminal_storage_class,omitempty"`
+ // Output only. Latest instant at which the autoclass terminal storage class
+ // was updated.
+ TerminalStorageClassUpdateTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=terminal_storage_class_update_time,json=terminalStorageClassUpdateTime,proto3,oneof" json:"terminal_storage_class_update_time,omitempty"`
+}
+
+func (x *Bucket_Autoclass) Reset() {
+ *x = Bucket_Autoclass{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[70]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return nil
}
-func (x *Bucket_Lifecycle_Rule_Condition) GetIsLive() bool {
- if x != nil && x.IsLive != nil {
- return *x.IsLive
+func (x *Bucket_Autoclass) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Bucket_Autoclass) ProtoMessage() {}
+
+func (x *Bucket_Autoclass) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[70]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return false
+ return mi.MessageOf(x)
}
-func (x *Bucket_Lifecycle_Rule_Condition) GetNumNewerVersions() int32 {
- if x != nil && x.NumNewerVersions != nil {
- return *x.NumNewerVersions
+// Deprecated: Use Bucket_Autoclass.ProtoReflect.Descriptor instead.
+func (*Bucket_Autoclass) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 11}
+}
+
+func (x *Bucket_Autoclass) GetEnabled() bool {
+ if x != nil {
+ return x.Enabled
}
- return 0
+ return false
}
-func (x *Bucket_Lifecycle_Rule_Condition) GetMatchesStorageClass() []string {
+func (x *Bucket_Autoclass) GetToggleTime() *timestamppb.Timestamp {
if x != nil {
- return x.MatchesStorageClass
+ return x.ToggleTime
}
return nil
}
-func (x *Bucket_Lifecycle_Rule_Condition) GetDaysSinceCustomTime() int32 {
- if x != nil && x.DaysSinceCustomTime != nil {
- return *x.DaysSinceCustomTime
+func (x *Bucket_Autoclass) GetTerminalStorageClass() string {
+ if x != nil && x.TerminalStorageClass != nil {
+ return *x.TerminalStorageClass
}
- return 0
+ return ""
}
-func (x *Bucket_Lifecycle_Rule_Condition) GetCustomTimeBefore() *date.Date {
+func (x *Bucket_Autoclass) GetTerminalStorageClassUpdateTime() *timestamppb.Timestamp {
if x != nil {
- return x.CustomTimeBefore
+ return x.TerminalStorageClassUpdateTime
}
return nil
}
-func (x *Bucket_Lifecycle_Rule_Condition) GetDaysSinceNoncurrentTime() int32 {
- if x != nil && x.DaysSinceNoncurrentTime != nil {
- return *x.DaysSinceNoncurrentTime
+// Configuration for a bucket's hierarchical namespace feature.
+type Bucket_HierarchicalNamespace struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Optional. Enables the hierarchical namespace feature.
+ Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"`
+}
+
+func (x *Bucket_HierarchicalNamespace) Reset() {
+ *x = Bucket_HierarchicalNamespace{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[71]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return 0
}
-func (x *Bucket_Lifecycle_Rule_Condition) GetNoncurrentTimeBefore() *date.Date {
- if x != nil {
- return x.NoncurrentTimeBefore
+func (x *Bucket_HierarchicalNamespace) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Bucket_HierarchicalNamespace) ProtoMessage() {}
+
+func (x *Bucket_HierarchicalNamespace) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[71]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return nil
+ return mi.MessageOf(x)
}
-func (x *Bucket_Lifecycle_Rule_Condition) GetMatchesPrefix() []string {
+// Deprecated: Use Bucket_HierarchicalNamespace.ProtoReflect.Descriptor instead.
+func (*Bucket_HierarchicalNamespace) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 12}
+}
+
+func (x *Bucket_HierarchicalNamespace) GetEnabled() bool {
if x != nil {
- return x.MatchesPrefix
+ return x.Enabled
}
- return nil
+ return false
}
-func (x *Bucket_Lifecycle_Rule_Condition) GetMatchesSuffix() []string {
- if x != nil {
- return x.MatchesSuffix
+// Settings for Uniform Bucket level access.
+// See https://cloud.google.com/storage/docs/uniform-bucket-level-access.
+type Bucket_IamConfig_UniformBucketLevelAccess struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // If set, access checks only use bucket-level IAM policies or above.
+ Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"`
+ // The deadline time for changing
+ // `iam_config.uniform_bucket_level_access.enabled` from `true` to
+ // `false`. Mutable until the specified deadline is reached, but not
+ // afterward.
+ LockTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=lock_time,json=lockTime,proto3" json:"lock_time,omitempty"`
+}
+
+func (x *Bucket_IamConfig_UniformBucketLevelAccess) Reset() {
+ *x = Bucket_IamConfig_UniformBucketLevelAccess{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[73]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return nil
}
-var File_google_storage_v2_storage_proto protoreflect.FileDescriptor
+func (x *Bucket_IamConfig_UniformBucketLevelAccess) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
-var file_google_storage_v2_storage_proto_rawDesc = []byte{
- 0x0a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
- 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x12, 0x11, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
- 0x65, 0x2e, 0x76, 0x32, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69,
- 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f,
- 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75,
- 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x18, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f,
- 0x76, 0x31, 0x2f, 0x69, 0x61, 0x6d, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x1a, 0x1a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f,
- 0x76, 0x31, 0x2f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
- 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
- 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+func (*Bucket_IamConfig_UniformBucketLevelAccess) ProtoMessage() {}
+
+func (x *Bucket_IamConfig_UniformBucketLevelAccess) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[73]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Bucket_IamConfig_UniformBucketLevelAccess.ProtoReflect.Descriptor instead.
+func (*Bucket_IamConfig_UniformBucketLevelAccess) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 3, 0}
+}
+
+func (x *Bucket_IamConfig_UniformBucketLevelAccess) GetEnabled() bool {
+ if x != nil {
+ return x.Enabled
+ }
+ return false
+}
+
+func (x *Bucket_IamConfig_UniformBucketLevelAccess) GetLockTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.LockTime
+ }
+ return nil
+}
+
+// A lifecycle Rule, combining an action to take on an object and a
+// condition which will trigger that action.
+type Bucket_Lifecycle_Rule struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The action to take.
+ Action *Bucket_Lifecycle_Rule_Action `protobuf:"bytes,1,opt,name=action,proto3" json:"action,omitempty"`
+ // The condition(s) under which the action will be taken.
+ Condition *Bucket_Lifecycle_Rule_Condition `protobuf:"bytes,2,opt,name=condition,proto3" json:"condition,omitempty"`
+}
+
+func (x *Bucket_Lifecycle_Rule) Reset() {
+ *x = Bucket_Lifecycle_Rule{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[74]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Bucket_Lifecycle_Rule) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Bucket_Lifecycle_Rule) ProtoMessage() {}
+
+func (x *Bucket_Lifecycle_Rule) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[74]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Bucket_Lifecycle_Rule.ProtoReflect.Descriptor instead.
+func (*Bucket_Lifecycle_Rule) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 4, 0}
+}
+
+func (x *Bucket_Lifecycle_Rule) GetAction() *Bucket_Lifecycle_Rule_Action {
+ if x != nil {
+ return x.Action
+ }
+ return nil
+}
+
+func (x *Bucket_Lifecycle_Rule) GetCondition() *Bucket_Lifecycle_Rule_Condition {
+ if x != nil {
+ return x.Condition
+ }
+ return nil
+}
+
+// An action to take on an object.
+type Bucket_Lifecycle_Rule_Action struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Type of the action. Currently, only `Delete`, `SetStorageClass`, and
+ // `AbortIncompleteMultipartUpload` are supported.
+ Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+ // Target storage class. Required iff the type of the action is
+ // SetStorageClass.
+ StorageClass string `protobuf:"bytes,2,opt,name=storage_class,json=storageClass,proto3" json:"storage_class,omitempty"`
+}
+
+func (x *Bucket_Lifecycle_Rule_Action) Reset() {
+ *x = Bucket_Lifecycle_Rule_Action{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[75]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Bucket_Lifecycle_Rule_Action) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Bucket_Lifecycle_Rule_Action) ProtoMessage() {}
+
+func (x *Bucket_Lifecycle_Rule_Action) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[75]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Bucket_Lifecycle_Rule_Action.ProtoReflect.Descriptor instead.
+func (*Bucket_Lifecycle_Rule_Action) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 4, 0, 0}
+}
+
+func (x *Bucket_Lifecycle_Rule_Action) GetType() string {
+ if x != nil {
+ return x.Type
+ }
+ return ""
+}
+
+func (x *Bucket_Lifecycle_Rule_Action) GetStorageClass() string {
+ if x != nil {
+ return x.StorageClass
+ }
+ return ""
+}
+
+// A condition of an object which triggers some action.
+type Bucket_Lifecycle_Rule_Condition struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Age of an object (in days). This condition is satisfied when an
+ // object reaches the specified age.
+ // A value of 0 indicates that all objects immediately match this
+ // condition.
+ AgeDays *int32 `protobuf:"varint,1,opt,name=age_days,json=ageDays,proto3,oneof" json:"age_days,omitempty"`
+ // This condition is satisfied when an object is created before midnight
+ // of the specified date in UTC.
+ CreatedBefore *date.Date `protobuf:"bytes,2,opt,name=created_before,json=createdBefore,proto3" json:"created_before,omitempty"`
+ // Relevant only for versioned objects. If the value is
+ // `true`, this condition matches live objects; if the value
+ // is `false`, it matches archived objects.
+ IsLive *bool `protobuf:"varint,3,opt,name=is_live,json=isLive,proto3,oneof" json:"is_live,omitempty"`
+ // Relevant only for versioned objects. If the value is N, this
+ // condition is satisfied when there are at least N versions (including
+ // the live version) newer than this version of the object.
+ NumNewerVersions *int32 `protobuf:"varint,4,opt,name=num_newer_versions,json=numNewerVersions,proto3,oneof" json:"num_newer_versions,omitempty"`
+ // Objects having any of the storage classes specified by this condition
+ // will be matched. Values include `MULTI_REGIONAL`, `REGIONAL`,
+ // `NEARLINE`, `COLDLINE`, `STANDARD`, and
+ // `DURABLE_REDUCED_AVAILABILITY`.
+ MatchesStorageClass []string `protobuf:"bytes,5,rep,name=matches_storage_class,json=matchesStorageClass,proto3" json:"matches_storage_class,omitempty"`
+ // Number of days that have elapsed since the custom timestamp set on an
+ // object.
+ // The value of the field must be a nonnegative integer.
+ DaysSinceCustomTime *int32 `protobuf:"varint,7,opt,name=days_since_custom_time,json=daysSinceCustomTime,proto3,oneof" json:"days_since_custom_time,omitempty"`
+ // An object matches this condition if the custom timestamp set on the
+ // object is before the specified date in UTC.
+ CustomTimeBefore *date.Date `protobuf:"bytes,8,opt,name=custom_time_before,json=customTimeBefore,proto3" json:"custom_time_before,omitempty"`
+ // This condition is relevant only for versioned objects. An object
+ // version satisfies this condition only if these many days have been
+ // passed since it became noncurrent. The value of the field must be a
+ // nonnegative integer. If it's zero, the object version will become
+ // eligible for Lifecycle action as soon as it becomes noncurrent.
+ DaysSinceNoncurrentTime *int32 `protobuf:"varint,9,opt,name=days_since_noncurrent_time,json=daysSinceNoncurrentTime,proto3,oneof" json:"days_since_noncurrent_time,omitempty"`
+ // This condition is relevant only for versioned objects. An object
+ // version satisfies this condition only if it became noncurrent before
+ // the specified date in UTC.
+ NoncurrentTimeBefore *date.Date `protobuf:"bytes,10,opt,name=noncurrent_time_before,json=noncurrentTimeBefore,proto3" json:"noncurrent_time_before,omitempty"`
+ // List of object name prefixes. If any prefix exactly matches the
+ // beginning of the object name, the condition evaluates to true.
+ MatchesPrefix []string `protobuf:"bytes,11,rep,name=matches_prefix,json=matchesPrefix,proto3" json:"matches_prefix,omitempty"`
+ // List of object name suffixes. If any suffix exactly matches the
+ // end of the object name, the condition evaluates to true.
+ MatchesSuffix []string `protobuf:"bytes,12,rep,name=matches_suffix,json=matchesSuffix,proto3" json:"matches_suffix,omitempty"`
+}
+
+func (x *Bucket_Lifecycle_Rule_Condition) Reset() {
+ *x = Bucket_Lifecycle_Rule_Condition{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[76]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Bucket_Lifecycle_Rule_Condition) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Bucket_Lifecycle_Rule_Condition) ProtoMessage() {}
+
+func (x *Bucket_Lifecycle_Rule_Condition) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[76]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Bucket_Lifecycle_Rule_Condition.ProtoReflect.Descriptor instead.
+func (*Bucket_Lifecycle_Rule_Condition) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 4, 0, 1}
+}
+
+func (x *Bucket_Lifecycle_Rule_Condition) GetAgeDays() int32 {
+ if x != nil && x.AgeDays != nil {
+ return *x.AgeDays
+ }
+ return 0
+}
+
+func (x *Bucket_Lifecycle_Rule_Condition) GetCreatedBefore() *date.Date {
+ if x != nil {
+ return x.CreatedBefore
+ }
+ return nil
+}
+
+func (x *Bucket_Lifecycle_Rule_Condition) GetIsLive() bool {
+ if x != nil && x.IsLive != nil {
+ return *x.IsLive
+ }
+ return false
+}
+
+func (x *Bucket_Lifecycle_Rule_Condition) GetNumNewerVersions() int32 {
+ if x != nil && x.NumNewerVersions != nil {
+ return *x.NumNewerVersions
+ }
+ return 0
+}
+
+func (x *Bucket_Lifecycle_Rule_Condition) GetMatchesStorageClass() []string {
+ if x != nil {
+ return x.MatchesStorageClass
+ }
+ return nil
+}
+
+func (x *Bucket_Lifecycle_Rule_Condition) GetDaysSinceCustomTime() int32 {
+ if x != nil && x.DaysSinceCustomTime != nil {
+ return *x.DaysSinceCustomTime
+ }
+ return 0
+}
+
+func (x *Bucket_Lifecycle_Rule_Condition) GetCustomTimeBefore() *date.Date {
+ if x != nil {
+ return x.CustomTimeBefore
+ }
+ return nil
+}
+
+func (x *Bucket_Lifecycle_Rule_Condition) GetDaysSinceNoncurrentTime() int32 {
+ if x != nil && x.DaysSinceNoncurrentTime != nil {
+ return *x.DaysSinceNoncurrentTime
+ }
+ return 0
+}
+
+func (x *Bucket_Lifecycle_Rule_Condition) GetNoncurrentTimeBefore() *date.Date {
+ if x != nil {
+ return x.NoncurrentTimeBefore
+ }
+ return nil
+}
+
+func (x *Bucket_Lifecycle_Rule_Condition) GetMatchesPrefix() []string {
+ if x != nil {
+ return x.MatchesPrefix
+ }
+ return nil
+}
+
+func (x *Bucket_Lifecycle_Rule_Condition) GetMatchesSuffix() []string {
+ if x != nil {
+ return x.MatchesSuffix
+ }
+ return nil
+}
+
+var File_google_storage_v2_storage_proto protoreflect.FileDescriptor
+
+var file_google_storage_v2_storage_proto_rawDesc = []byte{
+ 0x0a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
+ 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x12, 0x11, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
+ 0x65, 0x2e, 0x76, 0x32, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69,
+ 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f,
+ 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x18, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f,
+ 0x76, 0x31, 0x2f, 0x69, 0x61, 0x6d, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x1a, 0x1a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f,
+ 0x76, 0x31, 0x2f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
+ 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
+ 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69,
0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f,
@@ -7281,1677 +7287,1685 @@ var file_google_storage_v2_storage_proto_rawDesc = []byte{
0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69,
0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x68, 0x0a, 0x1f, 0x44, 0x65,
- 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x45, 0x0a,
- 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x31, 0xe0, 0x41, 0x02,
- 0xfa, 0x41, 0x2b, 0x0a, 0x29, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69,
- 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x04,
- 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x65, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66,
- 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x12, 0x45, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x09, 0x42, 0x31, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2b, 0x0a, 0x29, 0x73, 0x74, 0x6f, 0x72,
- 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63,
- 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43,
- 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xc9, 0x01, 0x0a, 0x1f,
- 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
- 0x49, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42,
- 0x31, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2b, 0x12, 0x29, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
- 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66,
- 0x69, 0x67, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x5b, 0x0a, 0x13, 0x6e, 0x6f,
- 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69,
- 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69,
- 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03,
- 0xe0, 0x41, 0x02, 0x52, 0x12, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xa7, 0x01, 0x0a, 0x1e, 0x4c, 0x69, 0x73, 0x74,
- 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66,
- 0x69, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x06, 0x70, 0x61,
- 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x31, 0xe0, 0x41, 0x02, 0xfa,
- 0x41, 0x2b, 0x12, 0x29, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66,
- 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x70,
- 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69,
- 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69,
- 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e,
- 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65,
- 0x6e, 0x22, 0xa3, 0x01, 0x0a, 0x1f, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69,
- 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x58, 0x0a, 0x14, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x01, 0x20,
- 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
- 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x13, 0x6e, 0x6f, 0x74, 0x69,
- 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12,
- 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b,
- 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61,
- 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xc3, 0x07, 0x0a, 0x14, 0x43, 0x6f, 0x6d, 0x70,
- 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x12, 0x40, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
- 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
- 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x12, 0x5b, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6f, 0x62, 0x6a,
- 0x65, 0x63, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43,
+ 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xc3, 0x07, 0x0a, 0x14, 0x43,
0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
- 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12,
- 0x3c, 0x0a, 0x1a, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70,
- 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x50, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, 0x0a,
- 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d,
- 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66,
- 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88,
- 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e,
- 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20,
- 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e,
- 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12,
- 0x3f, 0x0a, 0x07, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09,
- 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43,
- 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79,
- 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63,
- 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73,
- 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f,
- 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61,
- 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65,
- 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12,
- 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73,
- 0x75, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62,
- 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f,
- 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x1a, 0xa8,
- 0x02, 0x0a, 0x0c, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12,
- 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0,
- 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65,
- 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65,
- 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7b, 0x0a, 0x14, 0x6f, 0x62, 0x6a, 0x65,
- 0x63, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f,
- 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e,
- 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4f, 0x62, 0x6a,
- 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x52, 0x13, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x62, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50,
- 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x33, 0x0a, 0x13,
- 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61,
- 0x74, 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47,
- 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01,
- 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66,
- 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63,
- 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e,
- 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xe2, 0x04,
- 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73,
- 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
- 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75,
- 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63,
- 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
- 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00,
- 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61,
- 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e,
- 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63,
- 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e,
- 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88,
- 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e,
- 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20,
- 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e,
- 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12,
- 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08,
- 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65,
- 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68,
- 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62,
+ 0x65, 0x73, 0x74, 0x12, 0x40, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a,
+ 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5b, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f,
+ 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76,
+ 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a,
+ 0x65, 0x63, 0x74, 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63,
+ 0x74, 0x73, 0x12, 0x3c, 0x0a, 0x1a, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c,
+ 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c,
+ 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52,
+ 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74,
+ 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61,
+ 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61,
+ 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88,
+ 0x01, 0x01, 0x12, 0x3f, 0x0a, 0x07, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20,
+ 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b,
+ 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f,
+ 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x6b, 0x6d, 0x73,
+ 0x4b, 0x65, 0x79, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62,
0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72,
- 0x61, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x61, 0x6d, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f,
0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f,
0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61,
- 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69,
- 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74,
- 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65,
- 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74,
- 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65,
- 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74,
- 0x63, 0x68, 0x22, 0xa9, 0x05, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62,
- 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62,
- 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02,
- 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b,
- 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62,
- 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52,
- 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x23, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02,
- 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x13,
- 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61,
- 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47,
- 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01,
- 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01,
- 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a,
- 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02,
- 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66,
+ 0x6d, 0x73, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65,
+ 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32,
+ 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73,
+ 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d,
+ 0x73, 0x1a, 0xa8, 0x02, 0x0a, 0x0c, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x67,
+ 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52,
+ 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7b, 0x0a, 0x14, 0x6f,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f,
+ 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e,
+ 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x52, 0x13, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f,
+ 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x62, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12,
+ 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11,
+ 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63,
+ 0x68, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x16, 0x0a, 0x14,
+ 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d,
+ 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61,
+ 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x22, 0xe2, 0x04, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63,
+ 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b,
+ 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f,
+ 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52,
+ 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63,
+ 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62,
+ 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28,
+ 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f,
+ 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d,
+ 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66,
+ 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74,
+ 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61,
+ 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61,
+ 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88,
+ 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e,
+ 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63,
+ 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74,
+ 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61,
+ 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e,
+ 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f,
+ 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32,
+ 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d,
+ 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50,
+ 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e,
+ 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a,
+ 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
+ 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66,
0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
- 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48,
- 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x2b,
- 0x0a, 0x0f, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x61, 0x63,
- 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x48, 0x04, 0x52, 0x0d, 0x63, 0x6f, 0x70, 0x79, 0x53,
- 0x6f, 0x75, 0x72, 0x63, 0x65, 0x41, 0x63, 0x6c, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63,
- 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61,
- 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65,
- 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52,
- 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74,
+ 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xa9, 0x05, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72,
+ 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d,
+ 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25,
+ 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42,
+ 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a,
+ 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0,
+ 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x23, 0x0a, 0x0a, 0x67, 0x65,
+ 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03,
+ 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
+ 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11,
+ 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63,
+ 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18,
+ 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01,
+ 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28,
+ 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a,
+ 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01,
+ 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01,
+ 0x01, 0x12, 0x2b, 0x0a, 0x0f, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x48, 0x04, 0x52, 0x0d, 0x63, 0x6f,
+ 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x41, 0x63, 0x6c, 0x88, 0x01, 0x01, 0x12, 0x6d,
+ 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f,
+ 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
+ 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61,
+ 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x16, 0x0a,
+ 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e,
+ 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63,
+ 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e,
+ 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a,
+ 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x12, 0x0a,
+ 0x10, 0x5f, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x61, 0x63,
+ 0x6c, 0x22, 0x3f, 0x0a, 0x1b, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d,
+ 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x12, 0x20, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64,
+ 0x49, 0x64, 0x22, 0x1e, 0x0a, 0x1c, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75,
+ 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x22, 0xec, 0x05, 0x0a, 0x11, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63,
+ 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b,
+ 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f,
+ 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52,
+ 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63,
+ 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62,
+ 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x66, 0x66,
+ 0x73, 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x4f,
+ 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6c, 0x69,
+ 0x6d, 0x69, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x65, 0x61, 0x64, 0x4c,
+ 0x69, 0x6d, 0x69, 0x74, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28,
+ 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f,
+ 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d,
+ 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66,
+ 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74,
+ 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61,
+ 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61,
+ 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88,
+ 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e,
+ 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63,
+ 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74,
+ 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61,
+ 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e,
+ 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f,
+ 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32,
+ 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d,
+ 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50,
+ 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61,
+ 0x73, 0x6b, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64,
+ 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x04, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b,
+ 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f,
+ 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f,
+ 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d,
+ 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61,
+ 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67,
+ 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61,
+ 0x74, 0x63, 0x68, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73,
+ 0x6b, 0x22, 0xe4, 0x05, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d,
+ 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70,
+ 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62,
+ 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0c, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74,
+ 0x65, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x6f, 0x66, 0x74,
+ 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66,
+ 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63,
+ 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e,
+ 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12,
+ 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03,
+ 0x48, 0x02, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69,
+ 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x15,
+ 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d,
+ 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f,
+ 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52,
+ 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c,
+ 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
+ 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a,
+ 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73,
+ 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72,
+ 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x05, 0x52, 0x08, 0x72, 0x65,
+ 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x73, 0x6f,
+ 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69,
0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74,
0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a,
0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69,
0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x12, 0x0a, 0x10, 0x5f, 0x63,
- 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x6c, 0x22, 0x3f,
- 0x0a, 0x1b, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c,
- 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a,
- 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
- 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x22,
- 0x1e, 0x0a, 0x1c, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62,
- 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
- 0xec, 0x05, 0x0a, 0x11, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73,
- 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
- 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75,
- 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63,
- 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74,
- 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x66, 0x66, 0x73,
- 0x65, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74,
- 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x65, 0x61, 0x64, 0x4c, 0x69, 0x6d, 0x69,
- 0x74, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00,
+ 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72,
+ 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xaf, 0x02, 0x0a, 0x12, 0x52, 0x65, 0x61,
+ 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
+ 0x4d, 0x0a, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64,
+ 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68,
+ 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x52, 0x0f, 0x63,
+ 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4d,
+ 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75,
+ 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a,
+ 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62,
+ 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x44, 0x0a,
+ 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
+ 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74,
+ 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61,
+ 0x6e, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18,
+ 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
+ 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
+ 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x8c, 0x04, 0x0a, 0x0f, 0x57,
+ 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x3a,
+ 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
+ 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02,
+ 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72,
+ 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x07, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63,
+ 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00,
0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61,
0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e,
0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63,
- 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e,
+ 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e,
0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88,
0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e,
- 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20,
+ 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20,
0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e,
0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12,
0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x09,
+ 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06,
0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65,
0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68,
- 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62,
- 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72,
- 0x61, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f,
- 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f,
+ 0x88, 0x01, 0x01, 0x12, 0x24, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69,
+ 0x7a, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x53, 0x69, 0x7a, 0x65, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66,
+ 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63,
+ 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a,
+ 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66,
+ 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
+ 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x6f, 0x62,
+ 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x22, 0xf8, 0x03, 0x0a, 0x12, 0x57, 0x72,
+ 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x12, 0x1d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12,
+ 0x50, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f,
+ 0x73, 0x70, 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57,
+ 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x48, 0x00,
+ 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65,
+ 0x63, 0x12, 0x26, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65,
+ 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x77, 0x72,
+ 0x69, 0x74, 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x4f, 0x0a, 0x10, 0x63, 0x68, 0x65,
+ 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
+ 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d,
+ 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x48, 0x01, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63, 0x6b,
+ 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62,
+ 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x06,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
+ 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43,
+ 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74,
+ 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69, 0x6e,
+ 0x69, 0x73, 0x68, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x0b, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x6d, 0x0a, 0x1c,
+ 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
+ 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a,
+ 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73,
+ 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x0f, 0x0a, 0x0d, 0x66,
+ 0x69, 0x72, 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x06, 0x0a, 0x04,
+ 0x64, 0x61, 0x74, 0x61, 0x22, 0x87, 0x01, 0x0a, 0x13, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62,
+ 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0e,
+ 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65,
+ 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x0e,
+ 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xb5,
+ 0x04, 0x0a, 0x16, 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x09, 0x75, 0x70, 0x6c,
+ 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08,
+ 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x50, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74,
+ 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
+ 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a,
+ 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x48, 0x00, 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65,
+ 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x26, 0x0a, 0x0c, 0x77, 0x72,
+ 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03,
+ 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x66, 0x66, 0x73,
+ 0x65, 0x74, 0x12, 0x4f, 0x0a, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65,
+ 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32,
+ 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61,
+ 0x48, 0x01, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44,
+ 0x61, 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68,
+ 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76,
+ 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d,
+ 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75,
+ 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x6f, 0x6f, 0x6b,
+ 0x75, 0x70, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x65, 0x4c,
+ 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x18, 0x08,
+ 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x12, 0x21, 0x0a, 0x0c, 0x66,
+ 0x69, 0x6e, 0x69, 0x73, 0x68, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x0b, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x6d,
+ 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f,
+ 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x0a,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
+ 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f,
0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61,
- 0x6d, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18,
- 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73,
- 0x6b, 0x48, 0x04, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01,
- 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f,
- 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d,
- 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61,
- 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68,
- 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65,
- 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68,
- 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xe4,
- 0x05, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f,
- 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
- 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b,
- 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12,
- 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20,
- 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
- 0x26, 0x0a, 0x0c, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18,
- 0x0b, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c,
- 0x65, 0x74, 0x65, 0x64, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65,
- 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04,
- 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17,
- 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f,
- 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52,
- 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74,
- 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d,
- 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61,
- 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x15, 0x69, 0x66, 0x4d,
- 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74,
- 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61,
- 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d,
- 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x18, 0x69, 0x66,
- 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f,
- 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d,
- 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
- 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63,
+ 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x0f, 0x0a,
+ 0x0d, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x06,
+ 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x8b, 0x01, 0x0a, 0x17, 0x42, 0x69, 0x64, 0x69, 0x57,
+ 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f,
+ 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65,
+ 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72,
+ 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76,
+ 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74,
+ 0x61, 0x74, 0x75, 0x73, 0x22, 0xe3, 0x04, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a,
+ 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x70,
+ 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02,
+ 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b,
+ 0x65, 0x74, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61,
+ 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70,
+ 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f,
+ 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67,
+ 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69,
+ 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d,
+ 0x69, 0x74, 0x65, 0x72, 0x12, 0x3c, 0x0a, 0x1a, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f,
+ 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74,
+ 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64,
+ 0x65, 0x54, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74,
+ 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x06, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x1a, 0x0a, 0x08, 0x76, 0x65,
+ 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x76, 0x65,
+ 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d,
+ 0x61, 0x73, 0x6b, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c,
+ 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73,
+ 0x6b, 0x88, 0x01, 0x01, 0x12, 0x34, 0x0a, 0x13, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72,
+ 0x61, 0x70, 0x68, 0x69, 0x63, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28,
+ 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x12, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72,
+ 0x61, 0x70, 0x68, 0x69, 0x63, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x30, 0x0a, 0x11, 0x6c, 0x65,
+ 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x5f, 0x65, 0x6e, 0x64, 0x18,
+ 0x0b, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x6c, 0x65, 0x78, 0x69,
+ 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x45, 0x6e, 0x64, 0x12, 0x26, 0x0a, 0x0c,
+ 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x0c, 0x20, 0x01,
+ 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0b, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c,
+ 0x65, 0x74, 0x65, 0x64, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f,
+ 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x61, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69,
+ 0x78, 0x65, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x18,
+ 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x46, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x41, 0x73,
+ 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x65, 0x73, 0x12, 0x22, 0x0a, 0x0a, 0x6d, 0x61, 0x74, 0x63,
+ 0x68, 0x5f, 0x67, 0x6c, 0x6f, 0x62, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41,
+ 0x01, 0x52, 0x09, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x47, 0x6c, 0x6f, 0x62, 0x42, 0x0c, 0x0a, 0x0a,
+ 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xaa, 0x01, 0x0a, 0x17, 0x51,
+ 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64,
+ 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08,
+ 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d,
+ 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
+ 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f,
+ 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x22, 0x8c, 0x01, 0x0a, 0x18, 0x51, 0x75, 0x65, 0x72,
+ 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65,
+ 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d,
+ 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a,
+ 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
+ 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f,
+ 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xb5, 0x0e, 0x0a, 0x14, 0x52, 0x65, 0x77, 0x72, 0x69,
+ 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x31, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e,
+ 0x61, 0x6d, 0x65, 0x18, 0x18, 0x20, 0x01, 0x28, 0x09, 0x42, 0x06, 0xe0, 0x41, 0x02, 0xe0, 0x41,
+ 0x05, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61,
+ 0x6d, 0x65, 0x12, 0x57, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x19, 0x20, 0x01, 0x28, 0x09, 0x42, 0x28,
+ 0xe0, 0x41, 0x02, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61,
+ 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f,
+ 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x56, 0x0a, 0x13, 0x64,
+ 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b,
+ 0x65, 0x79, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63,
+ 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70,
+ 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79,
+ 0x52, 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x6d, 0x73,
+ 0x4b, 0x65, 0x79, 0x12, 0x3b, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a,
+ 0x65, 0x63, 0x74, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x12, 0x4a, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65,
+ 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a,
+ 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
+ 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x0c,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x28, 0x0a, 0x0d,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x03, 0x52, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74,
+ 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x77, 0x72,
+ 0x69, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x3c, 0x0a, 0x1a, 0x64, 0x65, 0x73, 0x74,
+ 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e,
+ 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x65,
+ 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69,
+ 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e,
+ 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20,
+ 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69,
+ 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74,
+ 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14,
+ 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d,
+ 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65,
+ 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65,
+ 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63,
+ 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67,
+ 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61,
+ 0x74, 0x63, 0x68, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d,
+ 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74,
+ 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x40, 0x0a, 0x1a, 0x69, 0x66, 0x5f, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x17,
+ 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x47, 0x0a, 0x1e, 0x69, 0x66,
+ 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0c, 0x20, 0x01,
+ 0x28, 0x03, 0x48, 0x05, 0x52, 0x1a, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65,
+ 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68,
+ 0x88, 0x01, 0x01, 0x12, 0x48, 0x0a, 0x1e, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x03, 0x48, 0x06, 0x52, 0x1b, 0x69,
+ 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x4f, 0x0a,
+ 0x22, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67,
+ 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61,
+ 0x74, 0x63, 0x68, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x48, 0x07, 0x52, 0x1e, 0x69, 0x66, 0x53,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3e,
+ 0x0a, 0x1c, 0x6d, 0x61, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x77, 0x72,
+ 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x18, 0x0f,
+ 0x20, 0x01, 0x28, 0x03, 0x52, 0x18, 0x6d, 0x61, 0x78, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65,
+ 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x50, 0x65, 0x72, 0x43, 0x61, 0x6c, 0x6c, 0x12, 0x47,
+ 0x0a, 0x20, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e,
+ 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74,
+ 0x68, 0x6d, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1d, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c,
+ 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x46, 0x0a, 0x20, 0x63, 0x6f, 0x70, 0x79, 0x5f,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28,
+ 0x0c, 0x52, 0x1c, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63,
+ 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12,
+ 0x53, 0x0a, 0x27, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65,
+ 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68,
+ 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0c,
+ 0x52, 0x22, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72,
+ 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42,
+ 0x79, 0x74, 0x65, 0x73, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61,
+ 0x72, 0x61, 0x6d, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43,
0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64,
- 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69,
- 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x05, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d,
- 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x73, 0x6f, 0x66, 0x74, 0x5f,
- 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67,
- 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42,
- 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f,
- 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d,
- 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f,
- 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64,
- 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xaf, 0x02, 0x0a, 0x12, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62,
- 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x10,
- 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b,
- 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63,
- 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x10, 0x6f,
- 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
- 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
- 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63,
- 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x44, 0x0a, 0x0d, 0x63, 0x6f,
- 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61,
- 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e,
- 0x67, 0x65, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65,
- 0x12, 0x35, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
- 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x08, 0x6d,
- 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x8c, 0x04, 0x0a, 0x0f, 0x57, 0x72, 0x69, 0x74,
- 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x3a, 0x0a, 0x08, 0x72,
- 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e,
+ 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e,
+ 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72,
+ 0x61, 0x6d, 0x73, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68,
+ 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76,
- 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x72,
- 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65,
- 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33,
- 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
- 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69,
- 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68,
- 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04,
- 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12,
- 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03,
- 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b,
- 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28,
- 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01,
- 0x12, 0x24, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18,
- 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53,
- 0x69, 0x7a, 0x65, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65,
- 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a,
- 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69,
- 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65,
- 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74,
- 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63,
- 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x22, 0xf8, 0x03, 0x0a, 0x12, 0x57, 0x72, 0x69, 0x74, 0x65,
- 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a,
- 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
- 0x48, 0x00, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x50, 0x0a, 0x11,
- 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65,
- 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74,
- 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x48, 0x00, 0x52, 0x0f, 0x77,
- 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x26,
- 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x77, 0x72, 0x69, 0x74, 0x65,
- 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x4f, 0x0a, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73,
- 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
- 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64,
- 0x44, 0x61, 0x74, 0x61, 0x48, 0x01, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d,
- 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63,
- 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28,
+ 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d,
+ 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75,
+ 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69,
+ 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74,
+ 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65,
+ 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65,
+ 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x42, 0x1d, 0x0a, 0x1b, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63,
+ 0x68, 0x42, 0x21, 0x0a, 0x1f, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f,
+ 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d,
+ 0x61, 0x74, 0x63, 0x68, 0x42, 0x21, 0x0a, 0x1f, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x25, 0x0a, 0x23, 0x5f, 0x69, 0x66, 0x5f, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xd6,
+ 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x62, 0x79, 0x74, 0x65,
+ 0x73, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x03, 0x52, 0x13, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x77,
+ 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74,
+ 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x6f, 0x62, 0x6a,
+ 0x65, 0x63, 0x74, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72,
+ 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e,
+ 0x12, 0x35, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
+ 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x08, 0x72,
+ 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0xaf, 0x02, 0x0a, 0x1a, 0x53, 0x74, 0x61, 0x72,
+ 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x53, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f,
+ 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61,
- 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63,
- 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65,
- 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68,
- 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x66, 0x69,
- 0x6e, 0x69, 0x73, 0x68, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d,
- 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
- 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63,
- 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x0f, 0x0a, 0x0d, 0x66, 0x69, 0x72, 0x73,
- 0x74, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x64, 0x61, 0x74,
- 0x61, 0x22, 0x87, 0x01, 0x0a, 0x13, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63,
- 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72,
- 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x53, 0x69,
- 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
- 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48,
- 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77,
- 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xb5, 0x04, 0x0a, 0x16,
- 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64,
- 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x75, 0x70, 0x6c,
- 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x50, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f,
- 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
- 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
- 0x53, 0x70, 0x65, 0x63, 0x48, 0x00, 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a,
- 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x26, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65,
- 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0,
- 0x41, 0x02, 0x52, 0x0b, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12,
- 0x4f, 0x0a, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64,
- 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68,
- 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x48, 0x01, 0x52,
- 0x0f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61,
- 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b,
- 0x73, 0x75, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f,
- 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f,
- 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12,
- 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x18,
- 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b,
- 0x75, 0x70, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28,
- 0x08, 0x52, 0x05, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69, 0x6e, 0x69,
- 0x73, 0x68, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b,
- 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x6d, 0x0a, 0x1c, 0x63,
+ 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63,
+ 0x74, 0x53, 0x70, 0x65, 0x63, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74,
+ 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x6d, 0x0a, 0x1c, 0x63,
0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28,
+ 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61,
0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65,
0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52,
0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x0f, 0x0a, 0x0d, 0x66, 0x69,
- 0x72, 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x64,
- 0x61, 0x74, 0x61, 0x22, 0x8b, 0x01, 0x0a, 0x17, 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74,
- 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
- 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a,
- 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69,
- 0x73, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f,
- 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f,
- 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
- 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75,
- 0x73, 0x22, 0xe3, 0x04, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
- 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65,
- 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f,
- 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52,
- 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f,
- 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65,
- 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b,
- 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f,
- 0x6b, 0x65, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72,
- 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65,
- 0x72, 0x12, 0x3c, 0x0a, 0x1a, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x72, 0x61,
- 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18,
- 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x72,
- 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12,
- 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x1a, 0x0a, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69,
- 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69,
- 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b,
- 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61,
- 0x73, 0x6b, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01,
- 0x01, 0x12, 0x34, 0x0a, 0x13, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68,
- 0x69, 0x63, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03,
- 0xe0, 0x41, 0x01, 0x52, 0x12, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68,
- 0x69, 0x63, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x30, 0x0a, 0x11, 0x6c, 0x65, 0x78, 0x69, 0x63,
- 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x5f, 0x65, 0x6e, 0x64, 0x18, 0x0b, 0x20, 0x01,
- 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67,
- 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x45, 0x6e, 0x64, 0x12, 0x26, 0x0a, 0x0c, 0x73, 0x6f, 0x66,
- 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x42,
- 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0b, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65,
- 0x64, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x66, 0x6f, 0x6c,
- 0x64, 0x65, 0x72, 0x73, 0x5f, 0x61, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x65, 0x73,
- 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x18, 0x69, 0x6e, 0x63,
- 0x6c, 0x75, 0x64, 0x65, 0x46, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x41, 0x73, 0x50, 0x72, 0x65,
- 0x66, 0x69, 0x78, 0x65, 0x73, 0x12, 0x22, 0x0a, 0x0a, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x67,
- 0x6c, 0x6f, 0x62, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09,
- 0x6d, 0x61, 0x74, 0x63, 0x68, 0x47, 0x6c, 0x6f, 0x62, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65,
- 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xaa, 0x01, 0x0a, 0x17, 0x51, 0x75, 0x65, 0x72,
- 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x75, 0x70, 0x6c,
- 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f,
- 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70,
- 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e,
- 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f,
+ 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62,
+ 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x05,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
+ 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43,
+ 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74,
+ 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x22, 0x3a, 0x0a, 0x1b, 0x53, 0x74, 0x61,
+ 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f,
+ 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x70, 0x6c,
+ 0x6f, 0x61, 0x64, 0x49, 0x64, 0x22, 0x87, 0x05, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65,
+ 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a,
+ 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76,
+ 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66,
+ 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69,
+ 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61,
+ 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74,
+ 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63,
+ 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74,
+ 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68,
+ 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65,
+ 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65,
+ 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d,
+ 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65,
+ 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x40,
+ 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x07, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42,
+ 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b,
+ 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63,
+ 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73,
+ 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f,
0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61,
- 0x72, 0x61, 0x6d, 0x73, 0x22, 0x8c, 0x01, 0x0a, 0x18, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72,
- 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x12, 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73,
- 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72,
- 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65,
- 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32,
- 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75,
- 0x72, 0x63, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61,
- 0x74, 0x75, 0x73, 0x22, 0xb5, 0x0e, 0x0a, 0x14, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f,
- 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x10,
- 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65,
- 0x18, 0x18, 0x20, 0x01, 0x28, 0x09, 0x42, 0x06, 0xe0, 0x41, 0x02, 0xe0, 0x41, 0x05, 0x52, 0x0f,
- 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12,
- 0x57, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62,
- 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x19, 0x20, 0x01, 0x28, 0x09, 0x42, 0x28, 0xe0, 0x41, 0x02,
- 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42,
- 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x56, 0x0a, 0x13, 0x64, 0x65, 0x73, 0x74,
- 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18,
- 0x1b, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75,
- 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
- 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x11, 0x64,
- 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79,
- 0x12, 0x3b, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
- 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
- 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4a, 0x0a,
- 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74,
- 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
- 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x0c, 0x73, 0x6f, 0x75,
- 0x72, 0x63, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x28, 0x0a, 0x0d, 0x73, 0x6f, 0x75,
- 0x72, 0x63, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
- 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a,
- 0x65, 0x63, 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65,
- 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10,
- 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65,
- 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65,
- 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x3c, 0x0a, 0x1a, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f,
- 0x61, 0x63, 0x6c, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x65, 0x73, 0x74, 0x69,
- 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64,
- 0x41, 0x63, 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03,
- 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67,
- 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61,
- 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47,
- 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63,
- 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67,
- 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18,
- 0x09, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67,
- 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01,
- 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65,
- 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68,
- 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61,
- 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74,
- 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x40, 0x0a, 0x1a, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72,
- 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61,
- 0x74, 0x63, 0x68, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x17, 0x69, 0x66, 0x53,
- 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d,
- 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x47, 0x0a, 0x1e, 0x69, 0x66, 0x5f, 0x73, 0x6f,
- 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
- 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x03, 0x48,
- 0x05, 0x52, 0x1a, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01,
- 0x12, 0x48, 0x0a, 0x1e, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65,
- 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74,
- 0x63, 0x68, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x03, 0x48, 0x06, 0x52, 0x1b, 0x69, 0x66, 0x53, 0x6f,
- 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x4f, 0x0a, 0x22, 0x69, 0x66,
- 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65,
- 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68,
- 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x48, 0x07, 0x52, 0x1e, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72,
- 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3e, 0x0a, 0x1c, 0x6d,
- 0x61, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74,
- 0x65, 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x18, 0x0f, 0x20, 0x01, 0x28,
- 0x03, 0x52, 0x18, 0x6d, 0x61, 0x78, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x77, 0x72, 0x69,
- 0x74, 0x74, 0x65, 0x6e, 0x50, 0x65, 0x72, 0x43, 0x61, 0x6c, 0x6c, 0x12, 0x47, 0x0a, 0x20, 0x63,
- 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18,
- 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1d, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63,
- 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72,
- 0x69, 0x74, 0x68, 0x6d, 0x12, 0x46, 0x0a, 0x20, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75,
- 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b,
- 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x1c,
- 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x53, 0x0a, 0x27,
- 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72,
- 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35,
- 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x22, 0x63,
- 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65,
- 0x73, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65,
- 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d,
- 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d,
- 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50,
- 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a,
- 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73,
- 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b,
- 0x73, 0x75, 0x6d, 0x73, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f,
- 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f,
- 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x42,
+ 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42,
0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67,
0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61,
0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67,
0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42,
0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42,
- 0x1d, 0x0a, 0x1b, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65,
- 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x21,
- 0x0a, 0x1f, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e,
- 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63,
- 0x68, 0x42, 0x21, 0x0a, 0x1f, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f,
- 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d,
- 0x61, 0x74, 0x63, 0x68, 0x42, 0x25, 0x0a, 0x23, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72,
- 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xd6, 0x01, 0x0a, 0x0f,
- 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
- 0x32, 0x0a, 0x15, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72,
- 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x13,
- 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74,
- 0x74, 0x65, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69,
- 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74,
- 0x53, 0x69, 0x7a, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01,
- 0x28, 0x08, 0x52, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x77, 0x72,
- 0x69, 0x74, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x0c, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x35, 0x0a,
- 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
- 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f,
- 0x75, 0x72, 0x63, 0x65, 0x22, 0xaf, 0x02, 0x0a, 0x1a, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65,
- 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x12, 0x53, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a,
- 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
- 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70,
- 0x65, 0x63, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62,
- 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d,
- 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22,
+ 0x69, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63,
+ 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70,
+ 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41,
+ 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63,
+ 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x35, 0x0a, 0x0e, 0x53, 0x65,
+ 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x0d,
+ 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73,
+ 0x73, 0x22, 0x9e, 0x01, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63,
+ 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72,
+ 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02,
+ 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
+ 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x37, 0x0a, 0x15, 0x73, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61,
+ 0x69, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x13, 0x73,
+ 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61,
+ 0x69, 0x6c, 0x22, 0x81, 0x01, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61,
+ 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x08,
+ 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22,
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
- 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f,
- 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63,
- 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61,
- 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63,
- 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65,
- 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x22, 0x3a, 0x0a, 0x1b, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52,
- 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f,
- 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64,
- 0x49, 0x64, 0x22, 0x87, 0x05, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a,
- 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x06, 0x6f, 0x62,
- 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f,
- 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65,
- 0x63, 0x74, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48,
- 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d,
- 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65,
- 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74,
- 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65,
- 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68,
- 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65,
- 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04,
- 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65,
- 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01,
- 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18,
- 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67,
- 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63,
- 0x68, 0x88, 0x01, 0x01, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e,
- 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72,
- 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x40, 0x0a, 0x0b, 0x75,
- 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41,
- 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x6d, 0x0a,
- 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
- 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62,
- 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d,
- 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14,
- 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d,
- 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65,
- 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68,
- 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65,
- 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c,
- 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x69, 0x0a, 0x18,
- 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e,
- 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a,
- 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41,
+ 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61,
+ 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x28, 0x0a, 0x10,
+ 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65,
+ 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x87, 0x01, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74,
+ 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49,
+ 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75,
+ 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
+ 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
+ 0x22, 0x84, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73,
+ 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08,
+ 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a,
+ 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41,
0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70,
0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07,
- 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x9e, 0x01, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61,
- 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64,
+ 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x80, 0x02, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74,
+ 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72,
+ 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72,
+ 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1b,
+ 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70,
+ 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x32, 0x0a, 0x15, 0x73, 0x65,
+ 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d,
+ 0x61, 0x69, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x2a,
+ 0x0a, 0x11, 0x73, 0x68, 0x6f, 0x77, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6b,
+ 0x65, 0x79, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x73, 0x68, 0x6f, 0x77, 0x44,
+ 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x22, 0x7f, 0x0a, 0x14, 0x4c, 0x69,
+ 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x09, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18,
+ 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
+ 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65,
+ 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x68, 0x6d, 0x61, 0x63, 0x4b,
+ 0x65, 0x79, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65,
+ 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65,
+ 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x97, 0x01, 0x0a, 0x14,
+ 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x08, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b,
+ 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52,
+ 0x07, 0x68, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61,
+ 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74,
+ 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0x88, 0x03, 0x0a, 0x0f, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65,
+ 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x13, 0x0a, 0x02, 0x69, 0x64, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x02, 0x69, 0x64, 0x12, 0x20,
+ 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64,
+ 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x09, 0x42, 0x33, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64,
0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50,
0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12,
0x37, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75,
- 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03,
- 0xe0, 0x41, 0x02, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f,
- 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x22, 0x81, 0x01, 0x0a, 0x15, 0x43, 0x72, 0x65,
- 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
- 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79,
- 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61,
- 0x74, 0x61, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79,
- 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x73, 0x65,
- 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x87, 0x01, 0x0a,
- 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f,
- 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x61,
- 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65,
- 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d,
- 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d,
- 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
- 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70,
- 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x84, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x48, 0x6d,
- 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09,
- 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42,
- 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, 0x12, 0x4d,
- 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42,
- 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65,
- 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f,
- 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x80, 0x02,
- 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b,
- 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e,
- 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
- 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f,
- 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a,
- 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a,
- 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e,
- 0x12, 0x32, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f,
- 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45,
- 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x68, 0x6f, 0x77, 0x5f, 0x64, 0x65, 0x6c,
- 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52,
- 0x0f, 0x73, 0x68, 0x6f, 0x77, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73,
- 0x22, 0x7f, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x09, 0x68, 0x6d, 0x61, 0x63,
- 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e,
- 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52,
- 0x08, 0x68, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78,
- 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65,
- 0x6e, 0x22, 0x97, 0x01, 0x0a, 0x14, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63,
- 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x08, 0x68, 0x6d,
- 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32,
- 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61,
- 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x07, 0x68, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x3b,
- 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x03, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52,
- 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0xbf, 0x01, 0x0a, 0x19,
- 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x31, 0x0a, 0x14, 0x65, 0x6e, 0x63,
- 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68,
- 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x30, 0x0a, 0x14,
- 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62,
- 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, 0x65, 0x6e, 0x63, 0x72,
- 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x3d,
- 0x0a, 0x1b, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79,
- 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20,
- 0x01, 0x28, 0x0c, 0x52, 0x18, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b,
- 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xca, 0x05,
- 0x0a, 0x10, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e,
- 0x74, 0x73, 0x22, 0xb5, 0x05, 0x0a, 0x06, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x16, 0x0a,
- 0x12, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46,
- 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x14, 0x4d, 0x41, 0x58, 0x5f, 0x52, 0x45, 0x41,
- 0x44, 0x5f, 0x43, 0x48, 0x55, 0x4e, 0x4b, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x80,
- 0x80, 0x01, 0x12, 0x1c, 0x0a, 0x15, 0x4d, 0x41, 0x58, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f,
- 0x43, 0x48, 0x55, 0x4e, 0x4b, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x80, 0x80, 0x01,
- 0x12, 0x19, 0x0a, 0x12, 0x4d, 0x41, 0x58, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x53,
- 0x49, 0x5a, 0x45, 0x5f, 0x4d, 0x42, 0x10, 0x80, 0x80, 0xc0, 0x02, 0x12, 0x29, 0x0a, 0x24, 0x4d,
+ 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03,
+ 0xe0, 0x41, 0x03, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f,
+ 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x19, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74,
+ 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x05, 0x73, 0x74,
+ 0x61, 0x74, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69,
+ 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73,
+ 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74,
+ 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f,
+ 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
+ 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, 0x70, 0x64,
+ 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x17, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18,
+ 0x08, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67,
+ 0x22, 0xbf, 0x01, 0x0a, 0x19, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63,
+ 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x31,
+ 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67,
+ 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x65, 0x6e,
+ 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68,
+ 0x6d, 0x12, 0x30, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
+ 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52,
+ 0x12, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x42, 0x79,
+ 0x74, 0x65, 0x73, 0x12, 0x3d, 0x0a, 0x1b, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74,
+ 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x18, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74,
+ 0x65, 0x73, 0x22, 0xca, 0x05, 0x0a, 0x10, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f,
+ 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x73, 0x22, 0xb5, 0x05, 0x0a, 0x06, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x73, 0x12, 0x16, 0x0a, 0x12, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x53, 0x5f, 0x55, 0x4e, 0x53,
+ 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x14, 0x4d, 0x41,
+ 0x58, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x5f, 0x43, 0x48, 0x55, 0x4e, 0x4b, 0x5f, 0x42, 0x59, 0x54,
+ 0x45, 0x53, 0x10, 0x80, 0x80, 0x80, 0x01, 0x12, 0x1c, 0x0a, 0x15, 0x4d, 0x41, 0x58, 0x5f, 0x57,
+ 0x52, 0x49, 0x54, 0x45, 0x5f, 0x43, 0x48, 0x55, 0x4e, 0x4b, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53,
+ 0x10, 0x80, 0x80, 0x80, 0x01, 0x12, 0x19, 0x0a, 0x12, 0x4d, 0x41, 0x58, 0x5f, 0x4f, 0x42, 0x4a,
+ 0x45, 0x43, 0x54, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x4d, 0x42, 0x10, 0x80, 0x80, 0xc0, 0x02,
+ 0x12, 0x29, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d,
+ 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x4e, 0x41,
+ 0x4d, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x08, 0x12, 0x2a, 0x0a, 0x25, 0x4d,
0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41,
- 0x54, 0x41, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x4e, 0x41, 0x4d, 0x45, 0x5f, 0x42, 0x59,
- 0x54, 0x45, 0x53, 0x10, 0x80, 0x08, 0x12, 0x2a, 0x0a, 0x25, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55,
- 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x46, 0x49,
- 0x45, 0x4c, 0x44, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10,
- 0x80, 0x20, 0x12, 0x29, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d,
+ 0x54, 0x41, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x42,
+ 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x20, 0x12, 0x29, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x43,
+ 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x54,
+ 0x4f, 0x54, 0x41, 0x4c, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10,
+ 0x80, 0x40, 0x12, 0x2a, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54,
0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x54, 0x4f, 0x54, 0x41, 0x4c, 0x5f,
- 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x40, 0x12, 0x2a, 0x0a,
- 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x5f, 0x4d, 0x45, 0x54, 0x41,
- 0x44, 0x41, 0x54, 0x41, 0x5f, 0x54, 0x4f, 0x54, 0x41, 0x4c, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f,
- 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0xa0, 0x01, 0x12, 0x27, 0x0a, 0x23, 0x4d, 0x41, 0x58,
- 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x4f,
- 0x4e, 0x46, 0x49, 0x47, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54,
- 0x10, 0x64, 0x12, 0x22, 0x0a, 0x1e, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x49, 0x46, 0x45, 0x43, 0x59,
- 0x43, 0x4c, 0x45, 0x5f, 0x52, 0x55, 0x4c, 0x45, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x42, 0x55,
- 0x43, 0x4b, 0x45, 0x54, 0x10, 0x64, 0x12, 0x26, 0x0a, 0x22, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f,
+ 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0xa0, 0x01, 0x12, 0x27,
+ 0x0a, 0x23, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49,
+ 0x4f, 0x4e, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x42,
+ 0x55, 0x43, 0x4b, 0x45, 0x54, 0x10, 0x64, 0x12, 0x22, 0x0a, 0x1e, 0x4d, 0x41, 0x58, 0x5f, 0x4c,
+ 0x49, 0x46, 0x45, 0x43, 0x59, 0x43, 0x4c, 0x45, 0x5f, 0x52, 0x55, 0x4c, 0x45, 0x53, 0x5f, 0x50,
+ 0x45, 0x52, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x10, 0x64, 0x12, 0x26, 0x0a, 0x22, 0x4d,
+ 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f,
+ 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45,
+ 0x53, 0x10, 0x05, 0x12, 0x31, 0x0a, 0x2c, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46,
+ 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41,
+ 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x4c, 0x45, 0x4e,
+ 0x47, 0x54, 0x48, 0x10, 0x80, 0x02, 0x12, 0x33, 0x0a, 0x2e, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f,
0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f,
- 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x53, 0x10, 0x05, 0x12, 0x31,
- 0x0a, 0x2c, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49,
- 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42,
- 0x55, 0x54, 0x45, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x80,
- 0x02, 0x12, 0x33, 0x0a, 0x2e, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43,
- 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54,
- 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4c, 0x45, 0x4e,
- 0x47, 0x54, 0x48, 0x10, 0x80, 0x08, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41,
- 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x49, 0x45, 0x53, 0x5f, 0x43, 0x4f, 0x55,
- 0x4e, 0x54, 0x10, 0x40, 0x12, 0x1f, 0x0a, 0x1b, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45,
- 0x4c, 0x53, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4c, 0x45, 0x4e,
- 0x47, 0x54, 0x48, 0x10, 0x3f, 0x12, 0x1f, 0x0a, 0x1a, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42,
- 0x45, 0x4c, 0x53, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x42, 0x59,
- 0x54, 0x45, 0x53, 0x10, 0x80, 0x01, 0x12, 0x2e, 0x0a, 0x29, 0x4d, 0x41, 0x58, 0x5f, 0x4f, 0x42,
- 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x49, 0x44, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x44, 0x45, 0x4c,
- 0x45, 0x54, 0x45, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x53, 0x5f, 0x52, 0x45, 0x51, 0x55,
- 0x45, 0x53, 0x54, 0x10, 0xe8, 0x07, 0x12, 0x1e, 0x0a, 0x1a, 0x53, 0x50, 0x4c, 0x49, 0x54, 0x5f,
- 0x54, 0x4f, 0x4b, 0x45, 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f,
- 0x44, 0x41, 0x59, 0x53, 0x10, 0x0e, 0x1a, 0x02, 0x10, 0x01, 0x22, 0xf5, 0x23, 0x0a, 0x06, 0x42,
- 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20,
- 0x0a, 0x09, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x08, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64,
- 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
- 0x65, 0x74, 0x61, 0x67, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63,
- 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61,
- 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63,
- 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a,
- 0x65, 0x63, 0x74, 0x12, 0x2b, 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03,
- 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x12, 0x1f, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01,
- 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x12, 0x28, 0x0a, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79,
- 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0c, 0x6c,
- 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73,
- 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x07, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73,
- 0x12, 0x10, 0x0a, 0x03, 0x72, 0x70, 0x6f, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72,
- 0x70, 0x6f, 0x12, 0x38, 0x0a, 0x03, 0x61, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32,
- 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
- 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73,
- 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x03, 0x61, 0x63, 0x6c, 0x12, 0x54, 0x0a, 0x12,
- 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61,
- 0x63, 0x6c, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a,
- 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
- 0x52, 0x10, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41,
- 0x63, 0x6c, 0x12, 0x41, 0x0a, 0x09, 0x6c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x18,
- 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
- 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74,
- 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x52, 0x09, 0x6c, 0x69, 0x66, 0x65,
- 0x63, 0x79, 0x63, 0x6c, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f,
- 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
- 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65,
- 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x32, 0x0a, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x18,
- 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
- 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74,
- 0x2e, 0x43, 0x6f, 0x72, 0x73, 0x52, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x75,
- 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41,
- 0x03, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x37, 0x0a,
- 0x18, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62,
- 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52,
- 0x15, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x73,
- 0x65, 0x64, 0x48, 0x6f, 0x6c, 0x64, 0x12, 0x3d, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73,
- 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x5f, 0x56, 0x41, 0x4c, 0x55,
+ 0x45, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x80, 0x08, 0x12, 0x1c, 0x0a, 0x18, 0x4d,
+ 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x49, 0x45,
+ 0x53, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x10, 0x40, 0x12, 0x1f, 0x0a, 0x1b, 0x4d, 0x41, 0x58,
+ 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x41, 0x4c, 0x55,
+ 0x45, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x3f, 0x12, 0x1f, 0x0a, 0x1a, 0x4d, 0x41,
+ 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x41, 0x4c,
+ 0x55, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x01, 0x12, 0x2e, 0x0a, 0x29, 0x4d,
+ 0x41, 0x58, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x49, 0x44, 0x53, 0x5f, 0x50, 0x45,
+ 0x52, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x53,
+ 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x10, 0xe8, 0x07, 0x12, 0x1e, 0x0a, 0x1a, 0x53,
+ 0x50, 0x4c, 0x49, 0x54, 0x5f, 0x54, 0x4f, 0x4b, 0x45, 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x5f, 0x56,
+ 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x44, 0x41, 0x59, 0x53, 0x10, 0x0e, 0x1a, 0x02, 0x10, 0x01, 0x22,
+ 0xf5, 0x23, 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x09, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x08, 0x62, 0x75, 0x63,
+ 0x6b, 0x65, 0x74, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x1d, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f,
+ 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x05, 0xfa,
+ 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
+ 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52,
+ 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x2b, 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61,
+ 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03,
+ 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x08, 0x6c, 0x6f,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0,
+ 0x41, 0x03, 0x52, 0x0c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65,
+ 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73,
+ 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
+ 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x70, 0x6f, 0x18, 0x1b, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x03, 0x72, 0x70, 0x6f, 0x12, 0x38, 0x0a, 0x03, 0x61, 0x63, 0x6c, 0x18, 0x08,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
+ 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x41,
+ 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x03, 0x61, 0x63,
+ 0x6c, 0x12, 0x54, 0x0a, 0x12, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6f, 0x62, 0x6a,
+ 0x65, 0x63, 0x74, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76,
+ 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f,
+ 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x10, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4f, 0x62,
+ 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x6c, 0x12, 0x41, 0x0a, 0x09, 0x6c, 0x69, 0x66, 0x65, 0x63,
+ 0x79, 0x63, 0x6c, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42,
+ 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x52,
+ 0x09, 0x6c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72,
+ 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03,
+ 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x32, 0x0a, 0x04,
+ 0x63, 0x6f, 0x72, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42,
+ 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x43, 0x6f, 0x72, 0x73, 0x52, 0x04, 0x63, 0x6f, 0x72, 0x73,
+ 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18,
+ 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
+ 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69,
+ 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x18, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x65, 0x76,
+ 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x0e,
+ 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x45, 0x76, 0x65,
+ 0x6e, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x48, 0x6f, 0x6c, 0x64, 0x12, 0x3d, 0x0a, 0x06, 0x6c,
+ 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e,
+ 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74,
+ 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x3b, 0x0a, 0x07, 0x77, 0x65,
+ 0x62, 0x73, 0x69, 0x74, 0x65, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e,
+ 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x57, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x52, 0x07,
+ 0x77, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69,
+ 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e,
+ 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e,
+ 0x67, 0x52, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x3b, 0x0a,
+ 0x07, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
+ 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e,
+ 0x67, 0x52, 0x07, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x33, 0x0a, 0x05, 0x6f, 0x77,
+ 0x6e, 0x65, 0x72, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x77,
+ 0x6e, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12,
+ 0x44, 0x0a, 0x0a, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x14, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
+ 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x45,
+ 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x65, 0x6e, 0x63, 0x72, 0x79,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x07, 0x62, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67,
+ 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65,
- 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c,
- 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x3b, 0x0a, 0x07, 0x77, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65,
- 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x74, 0x2e, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x62, 0x69, 0x6c, 0x6c, 0x69,
+ 0x6e, 0x67, 0x12, 0x54, 0x0a, 0x10, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
+ 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32,
+ 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f,
+ 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69,
+ 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x42, 0x0a, 0x0a, 0x69, 0x61, 0x6d, 0x5f,
+ 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32,
+ 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x49, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x52, 0x09, 0x69, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x23, 0x0a, 0x0d,
+ 0x73, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x73, 0x5f, 0x70, 0x7a, 0x73, 0x18, 0x19, 0x20,
+ 0x01, 0x28, 0x08, 0x52, 0x0c, 0x73, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x73, 0x50, 0x7a,
+ 0x73, 0x12, 0x67, 0x0a, 0x17, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x70, 0x6c, 0x61, 0x63,
+ 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x1a, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
+ 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x43, 0x75,
+ 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x52, 0x15, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65,
+ 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x41, 0x0a, 0x09, 0x61, 0x75,
+ 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76,
+ 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61,
+ 0x73, 0x73, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x6b, 0x0a,
+ 0x16, 0x68, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x6e, 0x61,
+ 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x20, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76,
+ 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x48, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63,
+ 0x68, 0x69, 0x63, 0x61, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x42, 0x03,
+ 0xe0, 0x41, 0x01, 0x52, 0x15, 0x68, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, 0x69, 0x63, 0x61,
+ 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x5d, 0x0a, 0x12, 0x73, 0x6f,
+ 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79,
+ 0x18, 0x1f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65,
- 0x74, 0x2e, 0x57, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x52, 0x07, 0x77, 0x65, 0x62, 0x73, 0x69,
- 0x74, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67,
- 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x74, 0x2e, 0x53, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69,
+ 0x63, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c,
+ 0x65, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x30, 0x0a, 0x07, 0x42, 0x69, 0x6c,
+ 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65,
+ 0x72, 0x5f, 0x70, 0x61, 0x79, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x72, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x72, 0x50, 0x61, 0x79, 0x73, 0x1a, 0x87, 0x01, 0x0a, 0x04,
+ 0x43, 0x6f, 0x72, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x18, 0x01,
+ 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x12, 0x16, 0x0a, 0x06,
+ 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65,
+ 0x74, 0x68, 0x6f, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x72,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x26, 0x0a,
+ 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x41, 0x67, 0x65, 0x53, 0x65,
+ 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x1a, 0x5c, 0x0a, 0x0a, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x0f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6b,
+ 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41,
+ 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74,
+ 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4b, 0x6d, 0x73,
+ 0x4b, 0x65, 0x79, 0x1a, 0xb1, 0x02, 0x0a, 0x09, 0x49, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x12, 0x7b, 0x0a, 0x1b, 0x75, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x5f, 0x62, 0x75, 0x63,
+ 0x6b, 0x65, 0x74, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65,
- 0x74, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0a, 0x76, 0x65,
- 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x3b, 0x0a, 0x07, 0x6c, 0x6f, 0x67, 0x67,
- 0x69, 0x6e, 0x67, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75,
- 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x6c, 0x6f,
- 0x67, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x33, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x13,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
- 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x42, 0x03,
- 0xe0, 0x41, 0x03, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x44, 0x0a, 0x0a, 0x65, 0x6e,
- 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
- 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x12, 0x3b, 0x0a, 0x07, 0x62, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x15, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61,
- 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x42, 0x69, 0x6c,
- 0x6c, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x62, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x54, 0x0a,
- 0x10, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63,
- 0x79, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b,
- 0x65, 0x74, 0x2e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69,
- 0x63, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c,
- 0x69, 0x63, 0x79, 0x12, 0x42, 0x0a, 0x0a, 0x69, 0x61, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69,
- 0x67, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b,
- 0x65, 0x74, 0x2e, 0x49, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, 0x69, 0x61,
- 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x61, 0x74, 0x69, 0x73,
- 0x66, 0x69, 0x65, 0x73, 0x5f, 0x70, 0x7a, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c,
- 0x73, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x73, 0x50, 0x7a, 0x73, 0x12, 0x67, 0x0a, 0x17,
- 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74,
- 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76,
- 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50,
- 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15,
- 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43,
- 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x41, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61,
- 0x73, 0x73, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63,
- 0x6b, 0x65, 0x74, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x52, 0x09, 0x61,
- 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x6b, 0x0a, 0x16, 0x68, 0x69, 0x65, 0x72,
- 0x61, 0x72, 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61,
- 0x63, 0x65, 0x18, 0x20, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63,
- 0x6b, 0x65, 0x74, 0x2e, 0x48, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c,
- 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x15,
- 0x68, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c, 0x4e, 0x61, 0x6d, 0x65,
- 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x5d, 0x0a, 0x12, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65,
- 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x1f, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61,
- 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x53, 0x6f, 0x66,
- 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0,
- 0x41, 0x01, 0x52, 0x10, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x6f,
- 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x30, 0x0a, 0x07, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x12,
- 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x61, 0x79,
- 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x65, 0x72, 0x50, 0x61, 0x79, 0x73, 0x1a, 0x87, 0x01, 0x0a, 0x04, 0x43, 0x6f, 0x72, 0x73, 0x12,
- 0x16, 0x0a, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52,
- 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f,
- 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12,
- 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64,
- 0x65, 0x72, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x26, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f,
- 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28,
- 0x05, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x41, 0x67, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73,
- 0x1a, 0x5c, 0x0a, 0x0a, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e,
- 0x0a, 0x0f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65,
- 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c,
- 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
- 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52,
- 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x1a, 0xb1,
- 0x02, 0x0a, 0x09, 0x49, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x7b, 0x0a, 0x1b,
- 0x75, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6c,
- 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61,
- 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x49, 0x61, 0x6d,
- 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x55, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75,
- 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52,
- 0x18, 0x75, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65,
- 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x70, 0x75, 0x62,
- 0x6c, 0x69, 0x63, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x76, 0x65,
- 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x70, 0x75, 0x62,
- 0x6c, 0x69, 0x63, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x50, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74,
- 0x69, 0x6f, 0x6e, 0x1a, 0x6d, 0x0a, 0x18, 0x55, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75,
- 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12,
- 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08,
- 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x09, 0x6c, 0x6f, 0x63,
- 0x6b, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54,
- 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x69,
- 0x6d, 0x65, 0x1a, 0xdb, 0x07, 0x0a, 0x09, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65,
- 0x12, 0x3c, 0x0a, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
- 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79,
- 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x1a, 0x8f,
- 0x07, 0x0a, 0x04, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x47, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f,
- 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b,
- 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c,
- 0x65, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e,
- 0x12, 0x50, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
+ 0x74, 0x2e, 0x49, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x55, 0x6e, 0x69, 0x66,
+ 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x41, 0x63,
+ 0x63, 0x65, 0x73, 0x73, 0x52, 0x18, 0x75, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63,
+ 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x38,
+ 0x0a, 0x18, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f,
+ 0x70, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x16, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x50, 0x72,
+ 0x65, 0x76, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x6d, 0x0a, 0x18, 0x55, 0x6e, 0x69, 0x66,
+ 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x41, 0x63,
+ 0x63, 0x65, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x37,
+ 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x6c,
+ 0x6f, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x1a, 0xdb, 0x07, 0x0a, 0x09, 0x4c, 0x69, 0x66, 0x65,
+ 0x63, 0x79, 0x63, 0x6c, 0x65, 0x12, 0x3c, 0x0a, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x18, 0x01, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c,
- 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x43, 0x6f,
- 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69,
- 0x6f, 0x6e, 0x1a, 0x41, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04,
- 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65,
- 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73,
- 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
- 0x43, 0x6c, 0x61, 0x73, 0x73, 0x1a, 0xa8, 0x05, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74,
- 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x08, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x07, 0x61, 0x67, 0x65, 0x44, 0x61, 0x79, 0x73,
- 0x88, 0x01, 0x01, 0x12, 0x38, 0x0a, 0x0e, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x62,
- 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x0d,
- 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x1c, 0x0a,
- 0x07, 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x48, 0x01,
- 0x52, 0x06, 0x69, 0x73, 0x4c, 0x69, 0x76, 0x65, 0x88, 0x01, 0x01, 0x12, 0x31, 0x0a, 0x12, 0x6e,
- 0x75, 0x6d, 0x5f, 0x6e, 0x65, 0x77, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
- 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x48, 0x02, 0x52, 0x10, 0x6e, 0x75, 0x6d, 0x4e, 0x65,
- 0x77, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x88, 0x01, 0x01, 0x12, 0x32,
- 0x0a, 0x15, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
- 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x6d,
- 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61,
- 0x73, 0x73, 0x12, 0x38, 0x0a, 0x16, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65,
- 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01,
- 0x28, 0x05, 0x48, 0x03, 0x52, 0x13, 0x64, 0x61, 0x79, 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x43,
- 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x3f, 0x0a, 0x12,
- 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f,
- 0x72, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x10, 0x63, 0x75, 0x73,
- 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x40, 0x0a,
- 0x1a, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x6f, 0x6e, 0x63,
- 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28,
- 0x05, 0x48, 0x04, 0x52, 0x17, 0x64, 0x61, 0x79, 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x6f,
- 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12,
- 0x47, 0x0a, 0x16, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69,
- 0x6d, 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61,
- 0x74, 0x65, 0x52, 0x14, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x69,
- 0x6d, 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63,
- 0x68, 0x65, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x09,
- 0x52, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12,
- 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69,
- 0x78, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73,
- 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x61, 0x67, 0x65, 0x5f, 0x64,
- 0x61, 0x79, 0x73, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, 0x42,
- 0x15, 0x0a, 0x13, 0x5f, 0x6e, 0x75, 0x6d, 0x5f, 0x6e, 0x65, 0x77, 0x65, 0x72, 0x5f, 0x76, 0x65,
- 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x19, 0x0a, 0x17, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x5f,
+ 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x04, 0x72,
+ 0x75, 0x6c, 0x65, 0x1a, 0x8f, 0x07, 0x0a, 0x04, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x47, 0x0a, 0x06,
+ 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32,
+ 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c,
+ 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61,
+ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x50, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69,
+ 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63,
+ 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75,
+ 0x6c, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x63, 0x6f,
+ 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x41, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
+ 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74,
+ 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x1a, 0xa8, 0x05, 0x0a, 0x09, 0x43,
+ 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x08, 0x61, 0x67, 0x65, 0x5f,
+ 0x64, 0x61, 0x79, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x07, 0x61, 0x67,
+ 0x65, 0x44, 0x61, 0x79, 0x73, 0x88, 0x01, 0x01, 0x12, 0x38, 0x0a, 0x0e, 0x63, 0x72, 0x65, 0x61,
+ 0x74, 0x65, 0x64, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44,
+ 0x61, 0x74, 0x65, 0x52, 0x0d, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x42, 0x65, 0x66, 0x6f,
+ 0x72, 0x65, 0x12, 0x1c, 0x0a, 0x07, 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x08, 0x48, 0x01, 0x52, 0x06, 0x69, 0x73, 0x4c, 0x69, 0x76, 0x65, 0x88, 0x01, 0x01,
+ 0x12, 0x31, 0x0a, 0x12, 0x6e, 0x75, 0x6d, 0x5f, 0x6e, 0x65, 0x77, 0x65, 0x72, 0x5f, 0x76, 0x65,
+ 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x48, 0x02, 0x52, 0x10,
+ 0x6e, 0x75, 0x6d, 0x4e, 0x65, 0x77, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73,
+ 0x88, 0x01, 0x01, 0x12, 0x32, 0x0a, 0x15, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x5f, 0x73,
+ 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x05, 0x20, 0x03,
+ 0x28, 0x09, 0x52, 0x13, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x53, 0x74, 0x6f, 0x72, 0x61,
+ 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x38, 0x0a, 0x16, 0x64, 0x61, 0x79, 0x73, 0x5f,
0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d,
- 0x65, 0x42, 0x1d, 0x0a, 0x1b, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65,
+ 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x48, 0x03, 0x52, 0x13, 0x64, 0x61, 0x79, 0x73, 0x53,
+ 0x69, 0x6e, 0x63, 0x65, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01,
+ 0x01, 0x12, 0x3f, 0x0a, 0x12, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65,
+ 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65,
+ 0x52, 0x10, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x65, 0x66, 0x6f,
+ 0x72, 0x65, 0x12, 0x40, 0x0a, 0x1a, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65,
0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65,
- 0x1a, 0x54, 0x0a, 0x07, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x6c,
- 0x6f, 0x67, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x09, 0x6c, 0x6f, 0x67, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x2a, 0x0a, 0x11, 0x6c, 0x6f,
- 0x67, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x6f, 0x67, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
- 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x1a, 0xbb, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x74, 0x65, 0x6e,
- 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x41, 0x0a, 0x0e, 0x65, 0x66,
- 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d,
- 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a,
- 0x09, 0x69, 0x73, 0x5f, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08,
- 0x52, 0x08, 0x69, 0x73, 0x4c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x48, 0x0a, 0x12, 0x72, 0x65,
- 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x52, 0x11, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xd3, 0x01, 0x0a, 0x10, 0x53, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c,
- 0x65, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x4d, 0x0a, 0x12, 0x72, 0x65, 0x74,
- 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x48, 0x00, 0x52, 0x11, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x46, 0x0a, 0x0e, 0x65, 0x66, 0x66, 0x65,
- 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x48, 0x01, 0x52, 0x0d,
- 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01,
- 0x42, 0x15, 0x0a, 0x13, 0x5f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64,
- 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x11, 0x0a, 0x0f, 0x5f, 0x65, 0x66, 0x66, 0x65,
- 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, 0x26, 0x0a, 0x0a, 0x56, 0x65,
- 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62,
- 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c,
- 0x65, 0x64, 0x1a, 0x59, 0x0a, 0x07, 0x57, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x12, 0x28, 0x0a,
- 0x10, 0x6d, 0x61, 0x69, 0x6e, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69,
- 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6d, 0x61, 0x69, 0x6e, 0x50, 0x61, 0x67,
- 0x65, 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x12, 0x24, 0x0a, 0x0e, 0x6e, 0x6f, 0x74, 0x5f, 0x66,
- 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x0c, 0x6e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x50, 0x61, 0x67, 0x65, 0x1a, 0x3e, 0x0a,
- 0x15, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74,
- 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6c,
- 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d,
- 0x64, 0x61, 0x74, 0x61, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xd6, 0x02,
- 0x0a, 0x09, 0x41, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65,
- 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e,
- 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x40, 0x0a, 0x0b, 0x74, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x5f,
- 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
- 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x74, 0x6f, 0x67,
- 0x67, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x16, 0x74, 0x65, 0x72, 0x6d, 0x69,
+ 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x48, 0x04, 0x52, 0x17, 0x64, 0x61, 0x79, 0x73, 0x53, 0x69,
+ 0x6e, 0x63, 0x65, 0x4e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d,
+ 0x65, 0x88, 0x01, 0x01, 0x12, 0x47, 0x0a, 0x16, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65,
+ 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x0a,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79,
+ 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x14, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72,
+ 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x25, 0x0a,
+ 0x0e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18,
+ 0x0b, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x50, 0x72,
+ 0x65, 0x66, 0x69, 0x78, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x5f,
+ 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x6d, 0x61,
+ 0x74, 0x63, 0x68, 0x65, 0x73, 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x42, 0x0b, 0x0a, 0x09, 0x5f,
+ 0x61, 0x67, 0x65, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x69, 0x73, 0x5f,
+ 0x6c, 0x69, 0x76, 0x65, 0x42, 0x15, 0x0a, 0x13, 0x5f, 0x6e, 0x75, 0x6d, 0x5f, 0x6e, 0x65, 0x77,
+ 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x19, 0x0a, 0x17, 0x5f,
+ 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f,
+ 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x42, 0x1d, 0x0a, 0x1b, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x5f,
+ 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74,
+ 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, 0x54, 0x0a, 0x07, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67,
+ 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x6f, 0x67, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6c, 0x6f, 0x67, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12,
+ 0x2a, 0x0a, 0x11, 0x6c, 0x6f, 0x67, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x70, 0x72,
+ 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x6f, 0x67, 0x4f,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x1a, 0xbb, 0x01, 0x0a, 0x0f,
+ 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12,
+ 0x41, 0x0a, 0x0e, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d,
+ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74,
+ 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x54, 0x69,
+ 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x69, 0x73, 0x5f, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x69, 0x73, 0x4c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x12,
+ 0x48, 0x0a, 0x12, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f,
+ 0x6e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xd3, 0x01, 0x0a, 0x10, 0x53, 0x6f,
+ 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x4d,
+ 0x0a, 0x12, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x11, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69,
+ 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x46, 0x0a,
+ 0x0e, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
+ 0x70, 0x48, 0x01, 0x52, 0x0d, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x54, 0x69,
+ 0x6d, 0x65, 0x88, 0x01, 0x01, 0x42, 0x15, 0x0a, 0x13, 0x5f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x11, 0x0a, 0x0f,
+ 0x5f, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a,
+ 0x26, 0x0a, 0x0a, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x18, 0x0a,
+ 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07,
+ 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x1a, 0x59, 0x0a, 0x07, 0x57, 0x65, 0x62, 0x73, 0x69,
+ 0x74, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x6d, 0x61, 0x69, 0x6e, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f,
+ 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6d, 0x61,
+ 0x69, 0x6e, 0x50, 0x61, 0x67, 0x65, 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x12, 0x24, 0x0a, 0x0e,
+ 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x50, 0x61,
+ 0x67, 0x65, 0x1a, 0x3e, 0x0a, 0x15, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63,
+ 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x25, 0x0a, 0x0e, 0x64,
+ 0x61, 0x74, 0x61, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20,
+ 0x03, 0x28, 0x09, 0x52, 0x0d, 0x64, 0x61, 0x74, 0x61, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x1a, 0xd6, 0x02, 0x0a, 0x09, 0x41, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73,
+ 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x40, 0x0a, 0x0b, 0x74, 0x6f,
+ 0x67, 0x67, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03,
+ 0x52, 0x0a, 0x74, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x16,
+ 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
+ 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x14,
+ 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43,
+ 0x6c, 0x61, 0x73, 0x73, 0x88, 0x01, 0x01, 0x12, 0x70, 0x0a, 0x22, 0x74, 0x65, 0x72, 0x6d, 0x69,
0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73,
- 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x14, 0x74, 0x65, 0x72, 0x6d, 0x69,
- 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x88,
- 0x01, 0x01, 0x12, 0x70, 0x0a, 0x22, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x73,
- 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x75, 0x70, 0x64,
- 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x48,
- 0x01, 0x52, 0x1e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61,
- 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d,
- 0x65, 0x88, 0x01, 0x01, 0x42, 0x19, 0x0a, 0x17, 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61,
- 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x42,
- 0x25, 0x0a, 0x23, 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f,
- 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74,
- 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, 0x36, 0x0a, 0x15, 0x48, 0x69, 0x65, 0x72, 0x61, 0x72,
- 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12,
- 0x1d, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08,
- 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x1a, 0x39,
- 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
- 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
- 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05,
- 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x47, 0xea, 0x41, 0x44, 0x0a, 0x1d,
- 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70,
- 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x23, 0x70,
- 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
- 0x7d, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65,
- 0x74, 0x7d, 0x22, 0x97, 0x02, 0x0a, 0x13, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x63, 0x63,
- 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f,
- 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x0e,
- 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16,
- 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06,
- 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x22, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79,
- 0x5f, 0x61, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52,
- 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x41, 0x6c, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e,
- 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65,
- 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18,
- 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x65,
- 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69,
- 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x41, 0x0a, 0x0c, 0x70, 0x72, 0x6f,
- 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
- 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x52,
- 0x0b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x22, 0x5a, 0x0a, 0x0f,
- 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12,
- 0x1f, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c,
- 0x42, 0x05, 0xe0, 0x41, 0x01, 0x08, 0x01, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74,
- 0x12, 0x1b, 0x0a, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x07,
- 0x48, 0x00, 0x52, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a,
- 0x07, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0x54, 0x0a, 0x0f, 0x4f, 0x62, 0x6a, 0x65,
- 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x06, 0x63,
- 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x07, 0x48, 0x00, 0x52, 0x06, 0x63,
- 0x72, 0x63, 0x33, 0x32, 0x63, 0x88, 0x01, 0x01, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x64, 0x35, 0x5f,
- 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6d, 0x64, 0x35, 0x48,
- 0x61, 0x73, 0x68, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0xfe,
- 0x02, 0x0a, 0x0f, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61,
- 0x74, 0x61, 0x12, 0x13, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03,
- 0xe0, 0x41, 0x05, 0x52, 0x02, 0x69, 0x64, 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73,
- 0x73, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52,
- 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f,
- 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x05, 0xfa,
- 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
- 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
- 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52,
- 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x37, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76,
- 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69,
- 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x13, 0x73, 0x65,
- 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69,
- 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74,
- 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54,
- 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63,
- 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64,
- 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52,
- 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x65,
- 0x74, 0x61, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x22,
- 0x85, 0x04, 0x0a, 0x12, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12,
- 0x19, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03,
- 0xe0, 0x41, 0x02, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74,
- 0x61, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x1f,
- 0x0a, 0x0b, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20,
- 0x03, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12,
- 0x68, 0x0a, 0x11, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62,
- 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e,
- 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69,
- 0x67, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74,
- 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41,
- 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6f, 0x62, 0x6a,
- 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18,
- 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x61, 0x6d,
- 0x65, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x2a, 0x0a, 0x0e, 0x70, 0x61, 0x79, 0x6c, 0x6f,
- 0x61, 0x64, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42,
- 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0d, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x46, 0x6f, 0x72,
- 0x6d, 0x61, 0x74, 0x1a, 0x43, 0x0a, 0x15, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, 0x74, 0x74,
- 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03,
- 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14,
- 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76,
- 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x7d, 0xea, 0x41, 0x7a, 0x0a, 0x29, 0x73,
- 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
- 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
- 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x62, 0x75, 0x63,
- 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x7d, 0x2f, 0x6e, 0x6f,
- 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
- 0x73, 0x2f, 0x7b, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
- 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x7d, 0x22, 0x71, 0x0a, 0x12, 0x43, 0x75, 0x73, 0x74, 0x6f,
- 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x31, 0x0a,
- 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f,
- 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x65, 0x6e, 0x63,
- 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d,
- 0x12, 0x28, 0x0a, 0x10, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62,
- 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x6b, 0x65, 0x79, 0x53,
- 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xb6, 0x0d, 0x0a, 0x06, 0x4f,
- 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3d,
- 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25,
- 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42,
- 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x12, 0x0a,
- 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61,
- 0x67, 0x12, 0x23, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65,
- 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65,
- 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03,
- 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63,
- 0x6c, 0x61, 0x73, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72,
- 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x17, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65,
- 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a,
- 0x65, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x65, 0x6e, 0x63,
- 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6e,
- 0x74, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x2f, 0x0a, 0x13,
- 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x64, 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74,
- 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x63, 0x6f, 0x6e, 0x74, 0x65,
- 0x6e, 0x74, 0x44, 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a,
- 0x0d, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x18, 0x09,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x61, 0x63, 0x68, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x72,
- 0x6f, 0x6c, 0x12, 0x38, 0x0a, 0x03, 0x61, 0x63, 0x6c, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32,
- 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
- 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73,
- 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x03, 0x61, 0x63, 0x6c, 0x12, 0x29, 0x0a, 0x10,
- 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65,
- 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4c,
- 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74,
- 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54,
- 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x64,
- 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e,
- 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x40, 0x0a, 0x0b,
- 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0,
- 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x2c,
- 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e,
- 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x63, 0x6f,
- 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x09,
- 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
- 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73,
- 0x75, 0x6d, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73,
- 0x75, 0x6d, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69,
- 0x6d, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73,
- 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74,
- 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x07, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79,
- 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f,
- 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
- 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06,
- 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x5a, 0x0a, 0x19, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65,
- 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x74,
- 0x69, 0x6d, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65,
- 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x16, 0x75, 0x70, 0x64, 0x61,
- 0x74, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x54, 0x69,
- 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x5f,
- 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x74, 0x65, 0x6d, 0x70,
- 0x6f, 0x72, 0x61, 0x72, 0x79, 0x48, 0x6f, 0x6c, 0x64, 0x12, 0x4e, 0x0a, 0x15, 0x72, 0x65, 0x74,
- 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69,
- 0x6d, 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73,
- 0x74, 0x61, 0x6d, 0x70, 0x52, 0x13, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x45,
- 0x78, 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x43, 0x0a, 0x08, 0x6d, 0x65, 0x74,
- 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x16, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f,
+ 0x73, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42,
+ 0x03, 0xe0, 0x41, 0x03, 0x48, 0x01, 0x52, 0x1e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c,
+ 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x55, 0x70, 0x64, 0x61,
+ 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x42, 0x19, 0x0a, 0x17, 0x5f, 0x74, 0x65,
+ 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63,
+ 0x6c, 0x61, 0x73, 0x73, 0x42, 0x25, 0x0a, 0x23, 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61,
+ 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f,
+ 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, 0x36, 0x0a, 0x15, 0x48,
+ 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x73,
+ 0x70, 0x61, 0x63, 0x65, 0x12, 0x1d, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62,
+ 0x6c, 0x65, 0x64, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74,
+ 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x47,
+ 0xea, 0x41, 0x44, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b,
+ 0x65, 0x74, 0x12, 0x23, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72,
+ 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x7b,
+ 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x7d, 0x22, 0x97, 0x02, 0x0a, 0x13, 0x42, 0x75, 0x63, 0x6b,
+ 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12,
+ 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72,
+ 0x6f, 0x6c, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x22, 0x0a, 0x0a, 0x65,
+ 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x61, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42,
+ 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x41, 0x6c, 0x74, 0x12,
+ 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04,
+ 0x65, 0x74, 0x61, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67,
+ 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e,
+ 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x41,
+ 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x07,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
+ 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
+ 0x54, 0x65, 0x61, 0x6d, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61,
+ 0x6d, 0x22, 0x5a, 0x0a, 0x0f, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64,
+ 0x44, 0x61, 0x74, 0x61, 0x12, 0x1f, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x05, 0xe0, 0x41, 0x01, 0x08, 0x01, 0x52, 0x07, 0x63, 0x6f,
+ 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x07, 0x48, 0x00, 0x52, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x88,
+ 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0x54, 0x0a,
+ 0x0f, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73,
+ 0x12, 0x1b, 0x0a, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x07,
+ 0x48, 0x00, 0x52, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x88, 0x01, 0x01, 0x12, 0x19, 0x0a,
+ 0x08, 0x6d, 0x64, 0x35, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52,
+ 0x07, 0x6d, 0x64, 0x35, 0x48, 0x61, 0x73, 0x68, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x63, 0x72, 0x63,
+ 0x33, 0x32, 0x63, 0x22, 0x71, 0x0a, 0x12, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45,
+ 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x31, 0x0a, 0x14, 0x65, 0x6e, 0x63,
+ 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68,
+ 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x28, 0x0a, 0x10,
+ 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x6b, 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35,
+ 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xb6, 0x0d, 0x0a, 0x06, 0x4f, 0x62, 0x6a, 0x65, 0x63,
+ 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42,
+ 0x03, 0xe0, 0x41, 0x05, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75,
+ 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x05, 0xfa,
+ 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65,
+ 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61,
+ 0x67, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x23, 0x0a,
+ 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x03, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52,
+ 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
+ 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73,
+ 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43,
+ 0x6c, 0x61, 0x73, 0x73, 0x12, 0x17, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x06, 0x20, 0x01,
+ 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x29, 0x0a,
+ 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e,
+ 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74,
+ 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x2f, 0x0a, 0x13, 0x63, 0x6f, 0x6e, 0x74,
+ 0x65, 0x6e, 0x74, 0x5f, 0x64, 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18,
+ 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x44, 0x69,
+ 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x61, 0x63,
+ 0x68, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0c, 0x63, 0x61, 0x63, 0x68, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x38,
+ 0x0a, 0x03, 0x61, 0x63, 0x6c, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e,
- 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45,
- 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x2d,
- 0x0a, 0x10, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f,
- 0x6c, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0e, 0x65, 0x76, 0x65, 0x6e,
- 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x48, 0x6f, 0x6c, 0x64, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a,
- 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32,
- 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x6f, 0x77, 0x6e,
- 0x65, 0x72, 0x12, 0x56, 0x0a, 0x13, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x65,
- 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
- 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72,
- 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72,
- 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x75,
- 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x63, 0x75, 0x73,
- 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x4e, 0x0a, 0x10, 0x73, 0x6f, 0x66, 0x74, 0x5f,
- 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x1c, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0,
- 0x41, 0x03, 0x48, 0x01, 0x52, 0x0e, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65,
- 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x4e, 0x0a, 0x10, 0x68, 0x61, 0x72, 0x64, 0x5f,
- 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x1d, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0,
- 0x41, 0x03, 0x48, 0x02, 0x52, 0x0e, 0x68, 0x61, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65,
- 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64,
- 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61,
- 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
- 0x3a, 0x02, 0x38, 0x01, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62,
- 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x73, 0x6f,
- 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x42, 0x13,
- 0x0a, 0x11, 0x5f, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74,
- 0x69, 0x6d, 0x65, 0x22, 0x97, 0x02, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63,
- 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72,
- 0x6f, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12,
- 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12,
- 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x22, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x69, 0x74,
- 0x79, 0x5f, 0x61, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03,
- 0x52, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x41, 0x6c, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x65,
- 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
- 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67,
- 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x14, 0x0a, 0x05,
- 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61,
- 0x69, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x41, 0x0a, 0x0c, 0x70, 0x72,
- 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
- 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d,
- 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x22, 0x8e, 0x01,
- 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73,
- 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63,
- 0x74, 0x52, 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72,
- 0x65, 0x66, 0x69, 0x78, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72,
- 0x65, 0x66, 0x69, 0x78, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70,
- 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x48,
- 0x0a, 0x0b, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x12, 0x25, 0x0a,
- 0x0e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x75,
- 0x6d, 0x62, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x04, 0x74, 0x65, 0x61, 0x6d, 0x22, 0x35, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76,
- 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x6d,
- 0x61, 0x69, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x0c, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22,
- 0x3c, 0x0a, 0x05, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69,
- 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79,
- 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x22, 0x5f, 0x0a,
- 0x0c, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a,
- 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x73, 0x74,
- 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03,
- 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74,
- 0x65, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e,
- 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x32, 0xaa,
- 0x27, 0x0a, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x72, 0x0a, 0x0c, 0x44, 0x65,
- 0x6c, 0x65, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44,
- 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x22, 0xda, 0x41, 0x04, 0x6e,
- 0x61, 0x6d, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x12, 0x13, 0x0a, 0x04, 0x6e, 0x61, 0x6d,
- 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x6f,
- 0x0a, 0x09, 0x47, 0x65, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f,
+ 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74,
+ 0x72, 0x6f, 0x6c, 0x52, 0x03, 0x61, 0x63, 0x6c, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74,
+ 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4c, 0x61, 0x6e, 0x67, 0x75,
+ 0x61, 0x67, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69,
+ 0x6d, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73,
+ 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x64, 0x65, 0x6c, 0x65, 0x74,
+ 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74,
+ 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e,
+ 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61,
+ 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a,
+ 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x2c, 0x0a, 0x0f, 0x63, 0x6f,
+ 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0f, 0x20,
+ 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e,
+ 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x09, 0x63, 0x68, 0x65, 0x63,
+ 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e,
- 0x47, 0x65, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
- 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x22, 0xda, 0x41, 0x04,
- 0x6e, 0x61, 0x6d, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x12, 0x13, 0x0a, 0x04, 0x6e, 0x61,
- 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12,
- 0xab, 0x01, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74,
- 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
- 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65,
- 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63,
- 0x6b, 0x65, 0x74, 0x22, 0x58, 0xda, 0x41, 0x17, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x62,
- 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x8a,
- 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12,
- 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x1e, 0x0a,
- 0x0e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12,
- 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x85, 0x01,
- 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x25, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76,
- 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
- 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63,
- 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x27, 0xda, 0x41,
- 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x18, 0x12, 0x16, 0x0a,
- 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
- 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x93, 0x01, 0x0a, 0x19, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75,
- 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c,
- 0x69, 0x63, 0x79, 0x12, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
- 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b,
- 0x65, 0x74, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63,
- 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63,
- 0x6b, 0x65, 0x74, 0x22, 0x26, 0xda, 0x41, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x8a, 0xd3,
- 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b,
- 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x75, 0x0a, 0x0c, 0x47,
- 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49,
- 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
- 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e,
- 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x2a, 0xda, 0x41, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75,
- 0x72, 0x63, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x19, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73,
- 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a,
- 0x2a, 0x7d, 0x12, 0x7c, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69,
- 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e,
- 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x31, 0xda,
- 0x41, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x6f, 0x6c, 0x69, 0x63,
- 0x79, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x19, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75,
- 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d,
- 0x12, 0xd7, 0x01, 0x0a, 0x12, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d,
- 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50,
- 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76,
- 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73,
- 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x6c, 0xda, 0x41,
- 0x14, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73,
- 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x17, 0x0a, 0x08, 0x72,
- 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74,
- 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x34, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
- 0x12, 0x28, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
- 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f,
- 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0x12, 0x8a, 0x01, 0x0a, 0x0c, 0x55,
- 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f,
+ 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x42,
+ 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12,
+ 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x11,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70,
+ 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d,
+ 0x65, 0x12, 0x3f, 0x0a, 0x07, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x12, 0x20, 0x01,
+ 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d,
+ 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x6b, 0x6d, 0x73, 0x4b,
+ 0x65, 0x79, 0x12, 0x5a, 0x0a, 0x19, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x6f,
+ 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18,
+ 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
+ 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x16, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x74,
+ 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x25,
+ 0x0a, 0x0e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x68, 0x6f, 0x6c, 0x64,
+ 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72,
+ 0x79, 0x48, 0x6f, 0x6c, 0x64, 0x12, 0x4e, 0x0a, 0x15, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69,
+ 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x15,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70,
+ 0x52, 0x13, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x70, 0x69, 0x72,
+ 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x43, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74,
+ 0x61, 0x18, 0x16, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x2d, 0x0a, 0x10, 0x65, 0x76,
+ 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x17,
+ 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x73,
+ 0x65, 0x64, 0x48, 0x6f, 0x6c, 0x64, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, 0x05, 0x6f, 0x77, 0x6e,
+ 0x65, 0x72, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x77, 0x6e,
+ 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x56,
+ 0x0a, 0x13, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e,
- 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
- 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x37,
- 0xda, 0x41, 0x12, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65,
- 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x1c, 0x12, 0x1a, 0x0a, 0x0b, 0x62,
- 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63,
- 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x9f, 0x01, 0x0a, 0x18, 0x44, 0x65, 0x6c, 0x65,
- 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f,
- 0x6e, 0x66, 0x69, 0x67, 0x12, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
- 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e,
- 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69,
- 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79,
- 0x22, 0x37, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x12,
- 0x28, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74,
- 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b,
- 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0xa8, 0x01, 0x0a, 0x15, 0x47, 0x65,
- 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e,
- 0x66, 0x69, 0x67, 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
- 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66,
- 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
- 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x37, 0xda, 0x41, 0x04,
- 0x6e, 0x61, 0x6d, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x12, 0x28, 0x0a, 0x04, 0x6e, 0x61,
- 0x6d, 0x65, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a,
- 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a,
- 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0xb1, 0x01, 0x0a, 0x18, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e,
- 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69,
- 0x67, 0x12, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61,
- 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69,
- 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
- 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69,
- 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x3a, 0xda, 0x41,
- 0x1a, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x8a, 0xd3, 0xe4, 0x93, 0x02,
- 0x17, 0x12, 0x15, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75,
- 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xa8, 0x01, 0x0a, 0x17, 0x4c, 0x69, 0x73,
- 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e,
- 0x66, 0x69, 0x67, 0x73, 0x12, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
- 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74,
- 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74,
- 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66,
- 0x69, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0xda, 0x41, 0x06,
- 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06,
- 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d,
- 0x2a, 0x2a, 0x7d, 0x12, 0x7e, 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62,
- 0x6a, 0x65, 0x63, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
- 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65,
- 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76,
- 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x29, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x23,
- 0x12, 0x21, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e,
- 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d,
- 0x2a, 0x2a, 0x7d, 0x12, 0x98, 0x01, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62,
- 0x6a, 0x65, 0x63, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
- 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f,
- 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45,
- 0x6d, 0x70, 0x74, 0x79, 0x22, 0x48, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c,
- 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c,
- 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65,
- 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x8d,
- 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
- 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
- 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, 0x6a, 0x65,
- 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62,
- 0x6a, 0x65, 0x63, 0x74, 0x22, 0x38, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c,
- 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65,
- 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xba,
- 0x01, 0x0a, 0x14, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62,
- 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, 0x63,
- 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, 0x63,
- 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, 0xda, 0x41, 0x09, 0x75, 0x70, 0x6c,
- 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, 0x0a, 0x09,
- 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b,
- 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75,
- 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0x95, 0x01, 0x0a, 0x09,
- 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65,
- 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
- 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x48, 0xda, 0x41, 0x0d, 0x62, 0x75,
- 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, 0x75,
- 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65,
- 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06,
+ 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x52, 0x12, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72,
+ 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d,
+ 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54,
+ 0x69, 0x6d, 0x65, 0x12, 0x4e, 0x0a, 0x10, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65,
+ 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x48, 0x01,
+ 0x52, 0x0e, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65,
+ 0x88, 0x01, 0x01, 0x12, 0x4e, 0x0a, 0x10, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x6c, 0x65,
+ 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x48, 0x02,
+ 0x52, 0x0e, 0x68, 0x61, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65,
+ 0x88, 0x01, 0x01, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45,
+ 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01,
+ 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64,
+ 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64,
+ 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x68,
+ 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x22,
+ 0x97, 0x02, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73,
+ 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69,
+ 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x65,
+ 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74,
+ 0x69, 0x74, 0x79, 0x12, 0x22, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x61, 0x6c,
+ 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x65, 0x6e,
+ 0x74, 0x69, 0x74, 0x79, 0x41, 0x6c, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74,
+ 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69,
+ 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x08, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69,
+ 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x16,
+ 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06,
+ 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x41, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
+ 0x74, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32,
+ 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x52, 0x0b, 0x70, 0x72,
+ 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x22, 0x8e, 0x01, 0x0a, 0x13, 0x4c, 0x69,
+ 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x12, 0x33, 0x0a, 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
+ 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x6f,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78,
+ 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78,
+ 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f,
+ 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78,
+ 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x48, 0x0a, 0x0b, 0x50, 0x72,
+ 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x6f,
+ 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x0d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72,
+ 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
+ 0x74, 0x65, 0x61, 0x6d, 0x22, 0x3c, 0x0a, 0x05, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x16, 0x0a,
+ 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65,
+ 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f,
+ 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79,
+ 0x49, 0x64, 0x22, 0x5f, 0x0a, 0x0c, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e,
+ 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f,
+ 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x03, 0x52, 0x0e, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x4c, 0x65, 0x6e,
+ 0x67, 0x74, 0x68, 0x22, 0x68, 0x0a, 0x1f, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74,
+ 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x45, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x42, 0x31, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2b, 0x0a, 0x29, 0x73, 0x74,
+ 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
+ 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x65, 0x0a,
+ 0x1c, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x45, 0x0a,
+ 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x31, 0xe0, 0x41, 0x02,
+ 0xfa, 0x41, 0x2b, 0x0a, 0x29, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69,
+ 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xc9, 0x01, 0x0a, 0x1f, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e,
+ 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65,
+ 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x31, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2b,
+ 0x12, 0x29, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x70, 0x61, 0x72,
+ 0x65, 0x6e, 0x74, 0x12, 0x5b, 0x0a, 0x13, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
+ 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x12, 0x6e, 0x6f,
+ 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x22, 0xb1, 0x01, 0x0a, 0x1e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x42, 0x31, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2b, 0x12, 0x29, 0x73, 0x74, 0x6f,
+ 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
+ 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x20,
+ 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65,
+ 0x12, 0x22, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54,
+ 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xa3, 0x01, 0x0a, 0x1f, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74,
+ 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x58, 0x0a, 0x14, 0x6e, 0x6f, 0x74, 0x69,
+ 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73,
+ 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66,
+ 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x13, 0x6e,
+ 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f,
+ 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78,
+ 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x99, 0x04, 0x0a, 0x12, 0x4e,
+ 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42,
+ 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x05, 0x74, 0x6f,
+ 0x70, 0x69, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05,
+ 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x17, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x07, 0x20,
+ 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x24,
+ 0x0a, 0x0b, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20,
+ 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54,
+ 0x79, 0x70, 0x65, 0x73, 0x12, 0x6d, 0x0a, 0x11, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x61,
+ 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
+ 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, 0x74, 0x74,
+ 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x03, 0xe0, 0x41,
+ 0x01, 0x52, 0x10, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75,
+ 0x74, 0x65, 0x73, 0x12, 0x31, 0x0a, 0x12, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61,
+ 0x6d, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42,
+ 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65,
+ 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x2a, 0x0a, 0x0e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61,
+ 0x64, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03,
+ 0xe0, 0x41, 0x02, 0x52, 0x0d, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x46, 0x6f, 0x72, 0x6d,
+ 0x61, 0x74, 0x1a, 0x43, 0x0a, 0x15, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, 0x74, 0x74, 0x72,
+ 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b,
+ 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a,
+ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x7d, 0xea, 0x41, 0x7a, 0x0a, 0x29, 0x73, 0x74,
+ 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
+ 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
+ 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x62, 0x75, 0x63, 0x6b,
+ 0x65, 0x74, 0x73, 0x2f, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x7d, 0x2f, 0x6e, 0x6f, 0x74,
+ 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73,
+ 0x2f, 0x7b, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x7d, 0x32, 0x88, 0x28, 0x0a, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61,
+ 0x67, 0x65, 0x12, 0x72, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b,
+ 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
+ 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x75, 0x63,
+ 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70,
+ 0x74, 0x79, 0x22, 0x22, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02,
+ 0x15, 0x12, 0x13, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b,
+ 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x6f, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x42, 0x75, 0x63,
+ 0x6b, 0x65, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
+ 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65,
+ 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63,
+ 0x6b, 0x65, 0x74, 0x22, 0x22, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x8a, 0xd3, 0xe4, 0x93,
+ 0x02, 0x15, 0x12, 0x13, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63,
+ 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xab, 0x01, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61,
+ 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65,
+ 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
+ 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x58, 0xda, 0x41, 0x17,
+ 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x62, 0x75,
+ 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x12, 0x16, 0x0a,
+ 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
+ 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x1e, 0x0a, 0x0e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e,
+ 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
+ 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x85, 0x01, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75,
+ 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
+ 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75,
+ 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32,
+ 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x27, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x8a,
+ 0xd3, 0xe4, 0x93, 0x02, 0x18, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12,
+ 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x93, 0x01,
+ 0x0a, 0x19, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x74, 0x65,
+ 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x33, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e,
+ 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74,
+ 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
+ 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x26, 0xda, 0x41, 0x06,
+ 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06,
0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d,
- 0x2a, 0x2a, 0x7d, 0x12, 0xa5, 0x01, 0x0a, 0x0a, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65,
- 0x63, 0x74, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
- 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63,
- 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61,
- 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
+ 0x2a, 0x2a, 0x7d, 0x12, 0x75, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c,
+ 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d,
+ 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x2a,
+ 0xda, 0x41, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02,
+ 0x19, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b,
+ 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x7c, 0x0a, 0x0c, 0x53, 0x65,
+ 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x49, 0x61,
+ 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50,
+ 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x31, 0xda, 0x41, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x2c, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x19, 0x12,
+ 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75,
+ 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x96, 0x02, 0x0a, 0x12, 0x54, 0x65, 0x73,
+ 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12,
+ 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e,
+ 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f,
+ 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61,
+ 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xaa, 0x01, 0xda, 0x41, 0x14, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x2c, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x8a, 0xd3,
+ 0xe4, 0x93, 0x02, 0x8c, 0x01, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x34,
+ 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x28, 0x7b, 0x62, 0x75, 0x63,
+ 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62,
+ 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74,
+ 0x73, 0x2f, 0x2a, 0x2a, 0x12, 0x3b, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x12, 0x2f, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
+ 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f,
+ 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x64, 0x46, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x2a,
+ 0x2a, 0x12, 0x8a, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b,
+ 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
+ 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63,
+ 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42,
+ 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x37, 0xda, 0x41, 0x12, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74,
+ 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x8a, 0xd3, 0xe4, 0x93,
+ 0x02, 0x1c, 0x12, 0x1a, 0x0a, 0x0b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x6e, 0x61, 0x6d,
+ 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x7e,
+ 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12,
+ 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
+ 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63,
+ 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a,
+ 0x65, 0x63, 0x74, 0x22, 0x29, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x12, 0x21, 0x0a, 0x12, 0x64,
+ 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65,
+ 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x98,
+ 0x01, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12,
+ 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
+ 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22,
0x48, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63,
0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63,
0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93,
0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62,
- 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x30, 0x01, 0x12, 0x8c, 0x01, 0x0a, 0x0c,
- 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x26, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32,
- 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71,
+ 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x8d, 0x01, 0x0a, 0x0d, 0x52, 0x65,
+ 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e,
+ 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71,
0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22,
- 0x39, 0xda, 0x41, 0x12, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74,
- 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x12, 0x1c, 0x0a, 0x0d,
- 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b,
- 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x60, 0x0a, 0x0b, 0x57, 0x72,
- 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72,
- 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
- 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x12, 0x6e, 0x0a, 0x0f,
- 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12,
- 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
- 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a,
- 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x38, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63,
+ 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93,
+ 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62,
+ 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xba, 0x01, 0x0a, 0x14, 0x43, 0x61,
+ 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69,
+ 0x74, 0x65, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
+ 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73,
+ 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
+ 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73,
+ 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x22, 0x41, 0xda, 0x41, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69,
+ 0x64, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61,
+ 0x64, 0x5f, 0x69, 0x64, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72,
+ 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73,
+ 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0x95, 0x01, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x4f, 0x62,
+ 0x6a, 0x65, 0x63, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
+ 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62,
+ 0x6a, 0x65, 0x63, 0x74, 0x22, 0x48, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c,
+ 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c,
+ 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65,
+ 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xa5,
+ 0x01, 0x0a, 0x0a, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x24, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76,
+ 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
+ 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x48, 0xda, 0x41, 0x0d, 0x62,
+ 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62,
+ 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e,
+ 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a,
+ 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74,
+ 0x3d, 0x2a, 0x2a, 0x7d, 0x30, 0x01, 0x12, 0x8c, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74,
+ 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61,
+ 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
+ 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
+ 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x39, 0xda, 0x41, 0x12, 0x6f,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73,
+ 0x6b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x12, 0x1c, 0x0a, 0x0d, 0x6f, 0x62, 0x6a, 0x65, 0x63,
+ 0x74, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65,
+ 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x60, 0x0a, 0x0b, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62,
+ 0x6a, 0x65, 0x63, 0x74, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
+ 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62,
+ 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e,
+ 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x12, 0x6e, 0x0a, 0x0f, 0x42, 0x69, 0x64, 0x69, 0x57,
+ 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x29, 0x2e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42,
0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x84, 0x01, 0x0a,
- 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32,
- 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
- 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65,
- 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0xda, 0x41, 0x06,
- 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06,
- 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d,
- 0x2a, 0x2a, 0x7d, 0x12, 0x98, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f,
- 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
- 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74,
- 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
+ 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x69, 0x64, 0x69, 0x57, 0x72,
+ 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x84, 0x01, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74,
+ 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74,
+ 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26,
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
- 0x76, 0x32, 0x2e, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x22, 0x3a, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x0f, 0x0a, 0x0d, 0x73, 0x6f,
- 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x21, 0x0a, 0x12, 0x64,
- 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65,
- 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xae,
- 0x01, 0x0a, 0x13, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c,
- 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74,
- 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
- 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52,
- 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x38, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x32, 0x12, 0x30, 0x0a,
- 0x21, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70,
- 0x65, 0x63, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x62, 0x75, 0x63, 0x6b,
- 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12,
- 0xae, 0x01, 0x0a, 0x10, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74,
- 0x61, 0x74, 0x75, 0x73, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
- 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72,
- 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
- 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53,
- 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, 0xda,
- 0x41, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x8a, 0xd3, 0xe4, 0x93, 0x02,
- 0x2f, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x20,
- 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73,
- 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a,
- 0x12, 0x80, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41,
- 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x65,
- 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
- 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41,
- 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x1b, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65,
- 0x63, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a,
- 0x65, 0x63, 0x74, 0x12, 0x95, 0x01, 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d,
- 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
- 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65,
- 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28,
+ 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e,
+ 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e,
+ 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x98,
+ 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
+ 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
+ 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65,
+ 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3a, 0x8a,
+ 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x0f, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f,
+ 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x21, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62,
+ 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xae, 0x01, 0x0a, 0x13, 0x53, 0x74,
+ 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74,
+ 0x65, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61,
+ 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d,
+ 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
+ 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61,
+ 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x22, 0x38, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x32, 0x12, 0x30, 0x0a, 0x21, 0x77, 0x72, 0x69, 0x74,
+ 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x72, 0x65,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b,
+ 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xae, 0x01, 0x0a, 0x10, 0x51,
+ 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12,
+ 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
+ 0x2e, 0x76, 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74,
+ 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e,
+ 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, 0xda, 0x41, 0x09, 0x75, 0x70, 0x6c,
+ 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, 0x0a, 0x09,
+ 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b,
+ 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75,
+ 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0x83, 0x01, 0x0a, 0x11,
+ 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e,
+ 0x74, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61,
+ 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21,
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
- 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x31, 0xda, 0x41, 0x1d, 0x70, 0x72, 0x6f,
- 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63,
- 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b,
- 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x77, 0x0a, 0x0d, 0x44,
- 0x65, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, 0x67,
+ 0x76, 0x32, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e,
+ 0x74, 0x22, 0x1e, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x8a, 0xd3, 0xe4,
+ 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x88, 0x02,
+ 0x01, 0x12, 0x98, 0x01, 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63,
+ 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
+ 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d,
+ 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32,
- 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x25, 0xda,
- 0x41, 0x11, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x2c, 0x70, 0x72, 0x6f, 0x6a,
- 0x65, 0x63, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f,
- 0x6a, 0x65, 0x63, 0x74, 0x12, 0x7d, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b,
- 0x65, 0x79, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
- 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65,
- 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61,
- 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x25, 0xda, 0x41,
- 0x11, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x2c, 0x70, 0x72, 0x6f, 0x6a, 0x65,
- 0x63, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a,
- 0x65, 0x63, 0x74, 0x12, 0x7c, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b,
- 0x65, 0x79, 0x73, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
- 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63,
- 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f,
+ 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x34, 0xda, 0x41, 0x1d, 0x70, 0x72, 0x6f, 0x6a, 0x65,
+ 0x63, 0x74, 0x2c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75,
+ 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09,
+ 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x88, 0x02, 0x01, 0x12, 0x7a, 0x0a, 0x0d,
+ 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76,
+ 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x28,
+ 0xda, 0x41, 0x11, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x2c, 0x70, 0x72, 0x6f,
+ 0x6a, 0x65, 0x63, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72,
+ 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x88, 0x02, 0x01, 0x12, 0x80, 0x01, 0x0a, 0x0a, 0x47, 0x65, 0x74,
+ 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x48,
+ 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76,
+ 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74,
+ 0x61, 0x22, 0x28, 0xda, 0x41, 0x11, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x2c,
+ 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a,
+ 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x88, 0x02, 0x01, 0x12, 0x7f, 0x0a, 0x0c, 0x4c,
+ 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x26, 0x2e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e,
- 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1b, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
- 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
- 0x74, 0x12, 0x9d, 0x01, 0x0a, 0x0d, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63,
- 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
- 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d,
- 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67,
+ 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
+ 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63,
+ 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1e, 0xda, 0x41,
+ 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09,
+ 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x88, 0x02, 0x01, 0x12, 0xa0, 0x01, 0x0a,
+ 0x0d, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
+ 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63,
+ 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x42, 0xda, 0x41, 0x14,
+ 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f,
+ 0x6d, 0x61, 0x73, 0x6b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x22, 0x12, 0x20, 0x0a, 0x10, 0x68, 0x6d,
+ 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x0c,
+ 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x88, 0x02, 0x01, 0x12,
+ 0xa2, 0x01, 0x0a, 0x18, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x32, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32,
- 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61,
- 0x22, 0x3f, 0xda, 0x41, 0x14, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x2c, 0x75, 0x70,
- 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x22, 0x12,
- 0x20, 0x0a, 0x10, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x6a,
- 0x65, 0x63, 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a,
- 0x7d, 0x1a, 0xa7, 0x02, 0xca, 0x41, 0x16, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x8a,
- 0x02, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68,
- 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c,
- 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f,
- 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x72,
- 0x65, 0x61, 0x64, 0x2d, 0x6f, 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f,
- 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
- 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72,
- 0x61, 0x67, 0x65, 0x2e, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
- 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68,
- 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x72, 0x65, 0x61, 0x64,
- 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77,
+ 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x3a, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d,
+ 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x12, 0x28, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12,
+ 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
+ 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a,
+ 0x2a, 0x88, 0x02, 0x01, 0x12, 0xab, 0x01, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69,
+ 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2f,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
+ 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
+ 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
+ 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x3a, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x8a,
+ 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x12, 0x28, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x7b,
+ 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f,
+ 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x88,
+ 0x02, 0x01, 0x12, 0xb4, 0x01, 0x0a, 0x18, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74,
+ 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12,
+ 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
+ 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
+ 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x3d, 0xda, 0x41, 0x1a, 0x70,
+ 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12,
+ 0x15, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b,
+ 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x88, 0x02, 0x01, 0x12, 0xab, 0x01, 0x0a, 0x17, 0x4c, 0x69,
+ 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
+ 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f,
+ 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73,
+ 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x29, 0xda, 0x41,
+ 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a,
+ 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74,
+ 0x3d, 0x2a, 0x2a, 0x7d, 0x88, 0x02, 0x01, 0x1a, 0xa7, 0x02, 0xca, 0x41, 0x16, 0x73, 0x74, 0x6f,
+ 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
+ 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x8a, 0x02, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77,
+ 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f,
+ 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61,
+ 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77,
0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d,
- 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
- 0x2e, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x42, 0xe2, 0x01, 0xea, 0x41,
- 0x78, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74,
- 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x53, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b,
- 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65,
- 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67,
- 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72,
- 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x0a, 0x15, 0x63, 0x6f, 0x6d, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32,
- 0x42, 0x0c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
- 0x5a, 0x3e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63,
- 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2f, 0x69, 0x6e,
- 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x73, 0x74, 0x6f,
- 0x72, 0x61, 0x67, 0x65, 0x70, 0x62, 0x3b, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x70, 0x62,
- 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74,
+ 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x2d, 0x6f, 0x6e, 0x6c, 0x79, 0x2c, 0x68,
+ 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64,
+ 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x63,
+ 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77,
+ 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f,
+ 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
+ 0x65, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70,
+ 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70,
+ 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73,
+ 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x77, 0x72, 0x69, 0x74,
+ 0x65, 0x42, 0xe2, 0x01, 0xea, 0x41, 0x78, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d,
+ 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x53, 0x70, 0x72, 0x6f, 0x6a,
+ 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c,
+ 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65,
+ 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65,
+ 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x0a,
+ 0x15, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
+ 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x42, 0x0c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50,
+ 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x73, 0x74, 0x6f, 0x72,
+ 0x61, 0x67, 0x65, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69,
+ 0x76, 0x32, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x70, 0x62, 0x3b, 0x73, 0x74, 0x6f,
+ 0x72, 0x61, 0x67, 0x65, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@@ -8977,56 +8991,56 @@ var file_google_storage_v2_storage_proto_goTypes = []any{
(*ListBucketsResponse)(nil), // 5: google.storage.v2.ListBucketsResponse
(*LockBucketRetentionPolicyRequest)(nil), // 6: google.storage.v2.LockBucketRetentionPolicyRequest
(*UpdateBucketRequest)(nil), // 7: google.storage.v2.UpdateBucketRequest
- (*DeleteNotificationConfigRequest)(nil), // 8: google.storage.v2.DeleteNotificationConfigRequest
- (*GetNotificationConfigRequest)(nil), // 9: google.storage.v2.GetNotificationConfigRequest
- (*CreateNotificationConfigRequest)(nil), // 10: google.storage.v2.CreateNotificationConfigRequest
- (*ListNotificationConfigsRequest)(nil), // 11: google.storage.v2.ListNotificationConfigsRequest
- (*ListNotificationConfigsResponse)(nil), // 12: google.storage.v2.ListNotificationConfigsResponse
- (*ComposeObjectRequest)(nil), // 13: google.storage.v2.ComposeObjectRequest
- (*DeleteObjectRequest)(nil), // 14: google.storage.v2.DeleteObjectRequest
- (*RestoreObjectRequest)(nil), // 15: google.storage.v2.RestoreObjectRequest
- (*CancelResumableWriteRequest)(nil), // 16: google.storage.v2.CancelResumableWriteRequest
- (*CancelResumableWriteResponse)(nil), // 17: google.storage.v2.CancelResumableWriteResponse
- (*ReadObjectRequest)(nil), // 18: google.storage.v2.ReadObjectRequest
- (*GetObjectRequest)(nil), // 19: google.storage.v2.GetObjectRequest
- (*ReadObjectResponse)(nil), // 20: google.storage.v2.ReadObjectResponse
- (*WriteObjectSpec)(nil), // 21: google.storage.v2.WriteObjectSpec
- (*WriteObjectRequest)(nil), // 22: google.storage.v2.WriteObjectRequest
- (*WriteObjectResponse)(nil), // 23: google.storage.v2.WriteObjectResponse
- (*BidiWriteObjectRequest)(nil), // 24: google.storage.v2.BidiWriteObjectRequest
- (*BidiWriteObjectResponse)(nil), // 25: google.storage.v2.BidiWriteObjectResponse
- (*ListObjectsRequest)(nil), // 26: google.storage.v2.ListObjectsRequest
- (*QueryWriteStatusRequest)(nil), // 27: google.storage.v2.QueryWriteStatusRequest
- (*QueryWriteStatusResponse)(nil), // 28: google.storage.v2.QueryWriteStatusResponse
- (*RewriteObjectRequest)(nil), // 29: google.storage.v2.RewriteObjectRequest
- (*RewriteResponse)(nil), // 30: google.storage.v2.RewriteResponse
- (*StartResumableWriteRequest)(nil), // 31: google.storage.v2.StartResumableWriteRequest
- (*StartResumableWriteResponse)(nil), // 32: google.storage.v2.StartResumableWriteResponse
- (*UpdateObjectRequest)(nil), // 33: google.storage.v2.UpdateObjectRequest
- (*GetServiceAccountRequest)(nil), // 34: google.storage.v2.GetServiceAccountRequest
- (*CreateHmacKeyRequest)(nil), // 35: google.storage.v2.CreateHmacKeyRequest
- (*CreateHmacKeyResponse)(nil), // 36: google.storage.v2.CreateHmacKeyResponse
- (*DeleteHmacKeyRequest)(nil), // 37: google.storage.v2.DeleteHmacKeyRequest
- (*GetHmacKeyRequest)(nil), // 38: google.storage.v2.GetHmacKeyRequest
- (*ListHmacKeysRequest)(nil), // 39: google.storage.v2.ListHmacKeysRequest
- (*ListHmacKeysResponse)(nil), // 40: google.storage.v2.ListHmacKeysResponse
- (*UpdateHmacKeyRequest)(nil), // 41: google.storage.v2.UpdateHmacKeyRequest
- (*CommonObjectRequestParams)(nil), // 42: google.storage.v2.CommonObjectRequestParams
- (*ServiceConstants)(nil), // 43: google.storage.v2.ServiceConstants
- (*Bucket)(nil), // 44: google.storage.v2.Bucket
- (*BucketAccessControl)(nil), // 45: google.storage.v2.BucketAccessControl
- (*ChecksummedData)(nil), // 46: google.storage.v2.ChecksummedData
- (*ObjectChecksums)(nil), // 47: google.storage.v2.ObjectChecksums
- (*HmacKeyMetadata)(nil), // 48: google.storage.v2.HmacKeyMetadata
- (*NotificationConfig)(nil), // 49: google.storage.v2.NotificationConfig
- (*CustomerEncryption)(nil), // 50: google.storage.v2.CustomerEncryption
- (*Object)(nil), // 51: google.storage.v2.Object
- (*ObjectAccessControl)(nil), // 52: google.storage.v2.ObjectAccessControl
- (*ListObjectsResponse)(nil), // 53: google.storage.v2.ListObjectsResponse
- (*ProjectTeam)(nil), // 54: google.storage.v2.ProjectTeam
- (*ServiceAccount)(nil), // 55: google.storage.v2.ServiceAccount
- (*Owner)(nil), // 56: google.storage.v2.Owner
- (*ContentRange)(nil), // 57: google.storage.v2.ContentRange
+ (*ComposeObjectRequest)(nil), // 8: google.storage.v2.ComposeObjectRequest
+ (*DeleteObjectRequest)(nil), // 9: google.storage.v2.DeleteObjectRequest
+ (*RestoreObjectRequest)(nil), // 10: google.storage.v2.RestoreObjectRequest
+ (*CancelResumableWriteRequest)(nil), // 11: google.storage.v2.CancelResumableWriteRequest
+ (*CancelResumableWriteResponse)(nil), // 12: google.storage.v2.CancelResumableWriteResponse
+ (*ReadObjectRequest)(nil), // 13: google.storage.v2.ReadObjectRequest
+ (*GetObjectRequest)(nil), // 14: google.storage.v2.GetObjectRequest
+ (*ReadObjectResponse)(nil), // 15: google.storage.v2.ReadObjectResponse
+ (*WriteObjectSpec)(nil), // 16: google.storage.v2.WriteObjectSpec
+ (*WriteObjectRequest)(nil), // 17: google.storage.v2.WriteObjectRequest
+ (*WriteObjectResponse)(nil), // 18: google.storage.v2.WriteObjectResponse
+ (*BidiWriteObjectRequest)(nil), // 19: google.storage.v2.BidiWriteObjectRequest
+ (*BidiWriteObjectResponse)(nil), // 20: google.storage.v2.BidiWriteObjectResponse
+ (*ListObjectsRequest)(nil), // 21: google.storage.v2.ListObjectsRequest
+ (*QueryWriteStatusRequest)(nil), // 22: google.storage.v2.QueryWriteStatusRequest
+ (*QueryWriteStatusResponse)(nil), // 23: google.storage.v2.QueryWriteStatusResponse
+ (*RewriteObjectRequest)(nil), // 24: google.storage.v2.RewriteObjectRequest
+ (*RewriteResponse)(nil), // 25: google.storage.v2.RewriteResponse
+ (*StartResumableWriteRequest)(nil), // 26: google.storage.v2.StartResumableWriteRequest
+ (*StartResumableWriteResponse)(nil), // 27: google.storage.v2.StartResumableWriteResponse
+ (*UpdateObjectRequest)(nil), // 28: google.storage.v2.UpdateObjectRequest
+ (*GetServiceAccountRequest)(nil), // 29: google.storage.v2.GetServiceAccountRequest
+ (*ServiceAccount)(nil), // 30: google.storage.v2.ServiceAccount
+ (*CreateHmacKeyRequest)(nil), // 31: google.storage.v2.CreateHmacKeyRequest
+ (*CreateHmacKeyResponse)(nil), // 32: google.storage.v2.CreateHmacKeyResponse
+ (*DeleteHmacKeyRequest)(nil), // 33: google.storage.v2.DeleteHmacKeyRequest
+ (*GetHmacKeyRequest)(nil), // 34: google.storage.v2.GetHmacKeyRequest
+ (*ListHmacKeysRequest)(nil), // 35: google.storage.v2.ListHmacKeysRequest
+ (*ListHmacKeysResponse)(nil), // 36: google.storage.v2.ListHmacKeysResponse
+ (*UpdateHmacKeyRequest)(nil), // 37: google.storage.v2.UpdateHmacKeyRequest
+ (*HmacKeyMetadata)(nil), // 38: google.storage.v2.HmacKeyMetadata
+ (*CommonObjectRequestParams)(nil), // 39: google.storage.v2.CommonObjectRequestParams
+ (*ServiceConstants)(nil), // 40: google.storage.v2.ServiceConstants
+ (*Bucket)(nil), // 41: google.storage.v2.Bucket
+ (*BucketAccessControl)(nil), // 42: google.storage.v2.BucketAccessControl
+ (*ChecksummedData)(nil), // 43: google.storage.v2.ChecksummedData
+ (*ObjectChecksums)(nil), // 44: google.storage.v2.ObjectChecksums
+ (*CustomerEncryption)(nil), // 45: google.storage.v2.CustomerEncryption
+ (*Object)(nil), // 46: google.storage.v2.Object
+ (*ObjectAccessControl)(nil), // 47: google.storage.v2.ObjectAccessControl
+ (*ListObjectsResponse)(nil), // 48: google.storage.v2.ListObjectsResponse
+ (*ProjectTeam)(nil), // 49: google.storage.v2.ProjectTeam
+ (*Owner)(nil), // 50: google.storage.v2.Owner
+ (*ContentRange)(nil), // 51: google.storage.v2.ContentRange
+ (*DeleteNotificationConfigRequest)(nil), // 52: google.storage.v2.DeleteNotificationConfigRequest
+ (*GetNotificationConfigRequest)(nil), // 53: google.storage.v2.GetNotificationConfigRequest
+ (*CreateNotificationConfigRequest)(nil), // 54: google.storage.v2.CreateNotificationConfigRequest
+ (*ListNotificationConfigsRequest)(nil), // 55: google.storage.v2.ListNotificationConfigsRequest
+ (*ListNotificationConfigsResponse)(nil), // 56: google.storage.v2.ListNotificationConfigsResponse
+ (*NotificationConfig)(nil), // 57: google.storage.v2.NotificationConfig
(*ComposeObjectRequest_SourceObject)(nil), // 58: google.storage.v2.ComposeObjectRequest.SourceObject
(*ComposeObjectRequest_SourceObject_ObjectPreconditions)(nil), // 59: google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions
(*Bucket_Billing)(nil), // 60: google.storage.v2.Bucket.Billing
@@ -9047,8 +9061,8 @@ var file_google_storage_v2_storage_proto_goTypes = []any{
(*Bucket_Lifecycle_Rule)(nil), // 75: google.storage.v2.Bucket.Lifecycle.Rule
(*Bucket_Lifecycle_Rule_Action)(nil), // 76: google.storage.v2.Bucket.Lifecycle.Rule.Action
(*Bucket_Lifecycle_Rule_Condition)(nil), // 77: google.storage.v2.Bucket.Lifecycle.Rule.Condition
- nil, // 78: google.storage.v2.NotificationConfig.CustomAttributesEntry
- nil, // 79: google.storage.v2.Object.MetadataEntry
+ nil, // 78: google.storage.v2.Object.MetadataEntry
+ nil, // 79: google.storage.v2.NotificationConfig.CustomAttributesEntry
(*fieldmaskpb.FieldMask)(nil), // 80: google.protobuf.FieldMask
(*timestamppb.Timestamp)(nil), // 81: google.protobuf.Timestamp
(*durationpb.Duration)(nil), // 82: google.protobuf.Duration
@@ -9062,57 +9076,57 @@ var file_google_storage_v2_storage_proto_goTypes = []any{
}
var file_google_storage_v2_storage_proto_depIdxs = []int32{
80, // 0: google.storage.v2.GetBucketRequest.read_mask:type_name -> google.protobuf.FieldMask
- 44, // 1: google.storage.v2.CreateBucketRequest.bucket:type_name -> google.storage.v2.Bucket
+ 41, // 1: google.storage.v2.CreateBucketRequest.bucket:type_name -> google.storage.v2.Bucket
80, // 2: google.storage.v2.ListBucketsRequest.read_mask:type_name -> google.protobuf.FieldMask
- 44, // 3: google.storage.v2.ListBucketsResponse.buckets:type_name -> google.storage.v2.Bucket
- 44, // 4: google.storage.v2.UpdateBucketRequest.bucket:type_name -> google.storage.v2.Bucket
+ 41, // 3: google.storage.v2.ListBucketsResponse.buckets:type_name -> google.storage.v2.Bucket
+ 41, // 4: google.storage.v2.UpdateBucketRequest.bucket:type_name -> google.storage.v2.Bucket
80, // 5: google.storage.v2.UpdateBucketRequest.update_mask:type_name -> google.protobuf.FieldMask
- 49, // 6: google.storage.v2.CreateNotificationConfigRequest.notification_config:type_name -> google.storage.v2.NotificationConfig
- 49, // 7: google.storage.v2.ListNotificationConfigsResponse.notification_configs:type_name -> google.storage.v2.NotificationConfig
- 51, // 8: google.storage.v2.ComposeObjectRequest.destination:type_name -> google.storage.v2.Object
- 58, // 9: google.storage.v2.ComposeObjectRequest.source_objects:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject
- 42, // 10: google.storage.v2.ComposeObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
- 47, // 11: google.storage.v2.ComposeObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums
- 42, // 12: google.storage.v2.DeleteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
- 42, // 13: google.storage.v2.RestoreObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
- 42, // 14: google.storage.v2.ReadObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
- 80, // 15: google.storage.v2.ReadObjectRequest.read_mask:type_name -> google.protobuf.FieldMask
- 42, // 16: google.storage.v2.GetObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
- 80, // 17: google.storage.v2.GetObjectRequest.read_mask:type_name -> google.protobuf.FieldMask
- 46, // 18: google.storage.v2.ReadObjectResponse.checksummed_data:type_name -> google.storage.v2.ChecksummedData
- 47, // 19: google.storage.v2.ReadObjectResponse.object_checksums:type_name -> google.storage.v2.ObjectChecksums
- 57, // 20: google.storage.v2.ReadObjectResponse.content_range:type_name -> google.storage.v2.ContentRange
- 51, // 21: google.storage.v2.ReadObjectResponse.metadata:type_name -> google.storage.v2.Object
- 51, // 22: google.storage.v2.WriteObjectSpec.resource:type_name -> google.storage.v2.Object
- 21, // 23: google.storage.v2.WriteObjectRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec
- 46, // 24: google.storage.v2.WriteObjectRequest.checksummed_data:type_name -> google.storage.v2.ChecksummedData
- 47, // 25: google.storage.v2.WriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums
- 42, // 26: google.storage.v2.WriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
- 51, // 27: google.storage.v2.WriteObjectResponse.resource:type_name -> google.storage.v2.Object
- 21, // 28: google.storage.v2.BidiWriteObjectRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec
- 46, // 29: google.storage.v2.BidiWriteObjectRequest.checksummed_data:type_name -> google.storage.v2.ChecksummedData
- 47, // 30: google.storage.v2.BidiWriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums
- 42, // 31: google.storage.v2.BidiWriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
- 51, // 32: google.storage.v2.BidiWriteObjectResponse.resource:type_name -> google.storage.v2.Object
- 80, // 33: google.storage.v2.ListObjectsRequest.read_mask:type_name -> google.protobuf.FieldMask
- 42, // 34: google.storage.v2.QueryWriteStatusRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
- 51, // 35: google.storage.v2.QueryWriteStatusResponse.resource:type_name -> google.storage.v2.Object
- 51, // 36: google.storage.v2.RewriteObjectRequest.destination:type_name -> google.storage.v2.Object
- 42, // 37: google.storage.v2.RewriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
- 47, // 38: google.storage.v2.RewriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums
- 51, // 39: google.storage.v2.RewriteResponse.resource:type_name -> google.storage.v2.Object
- 21, // 40: google.storage.v2.StartResumableWriteRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec
- 42, // 41: google.storage.v2.StartResumableWriteRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
- 47, // 42: google.storage.v2.StartResumableWriteRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums
- 51, // 43: google.storage.v2.UpdateObjectRequest.object:type_name -> google.storage.v2.Object
- 80, // 44: google.storage.v2.UpdateObjectRequest.update_mask:type_name -> google.protobuf.FieldMask
- 42, // 45: google.storage.v2.UpdateObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
- 48, // 46: google.storage.v2.CreateHmacKeyResponse.metadata:type_name -> google.storage.v2.HmacKeyMetadata
- 48, // 47: google.storage.v2.ListHmacKeysResponse.hmac_keys:type_name -> google.storage.v2.HmacKeyMetadata
- 48, // 48: google.storage.v2.UpdateHmacKeyRequest.hmac_key:type_name -> google.storage.v2.HmacKeyMetadata
- 80, // 49: google.storage.v2.UpdateHmacKeyRequest.update_mask:type_name -> google.protobuf.FieldMask
- 45, // 50: google.storage.v2.Bucket.acl:type_name -> google.storage.v2.BucketAccessControl
- 52, // 51: google.storage.v2.Bucket.default_object_acl:type_name -> google.storage.v2.ObjectAccessControl
+ 46, // 6: google.storage.v2.ComposeObjectRequest.destination:type_name -> google.storage.v2.Object
+ 58, // 7: google.storage.v2.ComposeObjectRequest.source_objects:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject
+ 39, // 8: google.storage.v2.ComposeObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
+ 44, // 9: google.storage.v2.ComposeObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums
+ 39, // 10: google.storage.v2.DeleteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
+ 39, // 11: google.storage.v2.RestoreObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
+ 39, // 12: google.storage.v2.ReadObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
+ 80, // 13: google.storage.v2.ReadObjectRequest.read_mask:type_name -> google.protobuf.FieldMask
+ 39, // 14: google.storage.v2.GetObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
+ 80, // 15: google.storage.v2.GetObjectRequest.read_mask:type_name -> google.protobuf.FieldMask
+ 43, // 16: google.storage.v2.ReadObjectResponse.checksummed_data:type_name -> google.storage.v2.ChecksummedData
+ 44, // 17: google.storage.v2.ReadObjectResponse.object_checksums:type_name -> google.storage.v2.ObjectChecksums
+ 51, // 18: google.storage.v2.ReadObjectResponse.content_range:type_name -> google.storage.v2.ContentRange
+ 46, // 19: google.storage.v2.ReadObjectResponse.metadata:type_name -> google.storage.v2.Object
+ 46, // 20: google.storage.v2.WriteObjectSpec.resource:type_name -> google.storage.v2.Object
+ 16, // 21: google.storage.v2.WriteObjectRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec
+ 43, // 22: google.storage.v2.WriteObjectRequest.checksummed_data:type_name -> google.storage.v2.ChecksummedData
+ 44, // 23: google.storage.v2.WriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums
+ 39, // 24: google.storage.v2.WriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
+ 46, // 25: google.storage.v2.WriteObjectResponse.resource:type_name -> google.storage.v2.Object
+ 16, // 26: google.storage.v2.BidiWriteObjectRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec
+ 43, // 27: google.storage.v2.BidiWriteObjectRequest.checksummed_data:type_name -> google.storage.v2.ChecksummedData
+ 44, // 28: google.storage.v2.BidiWriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums
+ 39, // 29: google.storage.v2.BidiWriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
+ 46, // 30: google.storage.v2.BidiWriteObjectResponse.resource:type_name -> google.storage.v2.Object
+ 80, // 31: google.storage.v2.ListObjectsRequest.read_mask:type_name -> google.protobuf.FieldMask
+ 39, // 32: google.storage.v2.QueryWriteStatusRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
+ 46, // 33: google.storage.v2.QueryWriteStatusResponse.resource:type_name -> google.storage.v2.Object
+ 46, // 34: google.storage.v2.RewriteObjectRequest.destination:type_name -> google.storage.v2.Object
+ 39, // 35: google.storage.v2.RewriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
+ 44, // 36: google.storage.v2.RewriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums
+ 46, // 37: google.storage.v2.RewriteResponse.resource:type_name -> google.storage.v2.Object
+ 16, // 38: google.storage.v2.StartResumableWriteRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec
+ 39, // 39: google.storage.v2.StartResumableWriteRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
+ 44, // 40: google.storage.v2.StartResumableWriteRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums
+ 46, // 41: google.storage.v2.UpdateObjectRequest.object:type_name -> google.storage.v2.Object
+ 80, // 42: google.storage.v2.UpdateObjectRequest.update_mask:type_name -> google.protobuf.FieldMask
+ 39, // 43: google.storage.v2.UpdateObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
+ 38, // 44: google.storage.v2.CreateHmacKeyResponse.metadata:type_name -> google.storage.v2.HmacKeyMetadata
+ 38, // 45: google.storage.v2.ListHmacKeysResponse.hmac_keys:type_name -> google.storage.v2.HmacKeyMetadata
+ 38, // 46: google.storage.v2.UpdateHmacKeyRequest.hmac_key:type_name -> google.storage.v2.HmacKeyMetadata
+ 80, // 47: google.storage.v2.UpdateHmacKeyRequest.update_mask:type_name -> google.protobuf.FieldMask
+ 81, // 48: google.storage.v2.HmacKeyMetadata.create_time:type_name -> google.protobuf.Timestamp
+ 81, // 49: google.storage.v2.HmacKeyMetadata.update_time:type_name -> google.protobuf.Timestamp
+ 42, // 50: google.storage.v2.Bucket.acl:type_name -> google.storage.v2.BucketAccessControl
+ 47, // 51: google.storage.v2.Bucket.default_object_acl:type_name -> google.storage.v2.ObjectAccessControl
64, // 52: google.storage.v2.Bucket.lifecycle:type_name -> google.storage.v2.Bucket.Lifecycle
81, // 53: google.storage.v2.Bucket.create_time:type_name -> google.protobuf.Timestamp
61, // 54: google.storage.v2.Bucket.cors:type_name -> google.storage.v2.Bucket.Cors
@@ -9121,7 +9135,7 @@ var file_google_storage_v2_storage_proto_depIdxs = []int32{
69, // 57: google.storage.v2.Bucket.website:type_name -> google.storage.v2.Bucket.Website
68, // 58: google.storage.v2.Bucket.versioning:type_name -> google.storage.v2.Bucket.Versioning
65, // 59: google.storage.v2.Bucket.logging:type_name -> google.storage.v2.Bucket.Logging
- 56, // 60: google.storage.v2.Bucket.owner:type_name -> google.storage.v2.Owner
+ 50, // 60: google.storage.v2.Bucket.owner:type_name -> google.storage.v2.Owner
62, // 61: google.storage.v2.Bucket.encryption:type_name -> google.storage.v2.Bucket.Encryption
60, // 62: google.storage.v2.Bucket.billing:type_name -> google.storage.v2.Bucket.Billing
66, // 63: google.storage.v2.Bucket.retention_policy:type_name -> google.storage.v2.Bucket.RetentionPolicy
@@ -9130,25 +9144,25 @@ var file_google_storage_v2_storage_proto_depIdxs = []int32{
71, // 66: google.storage.v2.Bucket.autoclass:type_name -> google.storage.v2.Bucket.Autoclass
72, // 67: google.storage.v2.Bucket.hierarchical_namespace:type_name -> google.storage.v2.Bucket.HierarchicalNamespace
67, // 68: google.storage.v2.Bucket.soft_delete_policy:type_name -> google.storage.v2.Bucket.SoftDeletePolicy
- 54, // 69: google.storage.v2.BucketAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam
- 81, // 70: google.storage.v2.HmacKeyMetadata.create_time:type_name -> google.protobuf.Timestamp
- 81, // 71: google.storage.v2.HmacKeyMetadata.update_time:type_name -> google.protobuf.Timestamp
- 78, // 72: google.storage.v2.NotificationConfig.custom_attributes:type_name -> google.storage.v2.NotificationConfig.CustomAttributesEntry
- 52, // 73: google.storage.v2.Object.acl:type_name -> google.storage.v2.ObjectAccessControl
- 81, // 74: google.storage.v2.Object.delete_time:type_name -> google.protobuf.Timestamp
- 81, // 75: google.storage.v2.Object.create_time:type_name -> google.protobuf.Timestamp
- 47, // 76: google.storage.v2.Object.checksums:type_name -> google.storage.v2.ObjectChecksums
- 81, // 77: google.storage.v2.Object.update_time:type_name -> google.protobuf.Timestamp
- 81, // 78: google.storage.v2.Object.update_storage_class_time:type_name -> google.protobuf.Timestamp
- 81, // 79: google.storage.v2.Object.retention_expire_time:type_name -> google.protobuf.Timestamp
- 79, // 80: google.storage.v2.Object.metadata:type_name -> google.storage.v2.Object.MetadataEntry
- 56, // 81: google.storage.v2.Object.owner:type_name -> google.storage.v2.Owner
- 50, // 82: google.storage.v2.Object.customer_encryption:type_name -> google.storage.v2.CustomerEncryption
- 81, // 83: google.storage.v2.Object.custom_time:type_name -> google.protobuf.Timestamp
- 81, // 84: google.storage.v2.Object.soft_delete_time:type_name -> google.protobuf.Timestamp
- 81, // 85: google.storage.v2.Object.hard_delete_time:type_name -> google.protobuf.Timestamp
- 54, // 86: google.storage.v2.ObjectAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam
- 51, // 87: google.storage.v2.ListObjectsResponse.objects:type_name -> google.storage.v2.Object
+ 49, // 69: google.storage.v2.BucketAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam
+ 47, // 70: google.storage.v2.Object.acl:type_name -> google.storage.v2.ObjectAccessControl
+ 81, // 71: google.storage.v2.Object.delete_time:type_name -> google.protobuf.Timestamp
+ 81, // 72: google.storage.v2.Object.create_time:type_name -> google.protobuf.Timestamp
+ 44, // 73: google.storage.v2.Object.checksums:type_name -> google.storage.v2.ObjectChecksums
+ 81, // 74: google.storage.v2.Object.update_time:type_name -> google.protobuf.Timestamp
+ 81, // 75: google.storage.v2.Object.update_storage_class_time:type_name -> google.protobuf.Timestamp
+ 81, // 76: google.storage.v2.Object.retention_expire_time:type_name -> google.protobuf.Timestamp
+ 78, // 77: google.storage.v2.Object.metadata:type_name -> google.storage.v2.Object.MetadataEntry
+ 50, // 78: google.storage.v2.Object.owner:type_name -> google.storage.v2.Owner
+ 45, // 79: google.storage.v2.Object.customer_encryption:type_name -> google.storage.v2.CustomerEncryption
+ 81, // 80: google.storage.v2.Object.custom_time:type_name -> google.protobuf.Timestamp
+ 81, // 81: google.storage.v2.Object.soft_delete_time:type_name -> google.protobuf.Timestamp
+ 81, // 82: google.storage.v2.Object.hard_delete_time:type_name -> google.protobuf.Timestamp
+ 49, // 83: google.storage.v2.ObjectAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam
+ 46, // 84: google.storage.v2.ListObjectsResponse.objects:type_name -> google.storage.v2.Object
+ 57, // 85: google.storage.v2.CreateNotificationConfigRequest.notification_config:type_name -> google.storage.v2.NotificationConfig
+ 57, // 86: google.storage.v2.ListNotificationConfigsResponse.notification_configs:type_name -> google.storage.v2.NotificationConfig
+ 79, // 87: google.storage.v2.NotificationConfig.custom_attributes:type_name -> google.storage.v2.NotificationConfig.CustomAttributesEntry
59, // 88: google.storage.v2.ComposeObjectRequest.SourceObject.object_preconditions:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions
74, // 89: google.storage.v2.Bucket.IamConfig.uniform_bucket_level_access:type_name -> google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess
75, // 90: google.storage.v2.Bucket.Lifecycle.rule:type_name -> google.storage.v2.Bucket.Lifecycle.Rule
@@ -9173,61 +9187,61 @@ var file_google_storage_v2_storage_proto_depIdxs = []int32{
85, // 109: google.storage.v2.Storage.SetIamPolicy:input_type -> google.iam.v1.SetIamPolicyRequest
86, // 110: google.storage.v2.Storage.TestIamPermissions:input_type -> google.iam.v1.TestIamPermissionsRequest
7, // 111: google.storage.v2.Storage.UpdateBucket:input_type -> google.storage.v2.UpdateBucketRequest
- 8, // 112: google.storage.v2.Storage.DeleteNotificationConfig:input_type -> google.storage.v2.DeleteNotificationConfigRequest
- 9, // 113: google.storage.v2.Storage.GetNotificationConfig:input_type -> google.storage.v2.GetNotificationConfigRequest
- 10, // 114: google.storage.v2.Storage.CreateNotificationConfig:input_type -> google.storage.v2.CreateNotificationConfigRequest
- 11, // 115: google.storage.v2.Storage.ListNotificationConfigs:input_type -> google.storage.v2.ListNotificationConfigsRequest
- 13, // 116: google.storage.v2.Storage.ComposeObject:input_type -> google.storage.v2.ComposeObjectRequest
- 14, // 117: google.storage.v2.Storage.DeleteObject:input_type -> google.storage.v2.DeleteObjectRequest
- 15, // 118: google.storage.v2.Storage.RestoreObject:input_type -> google.storage.v2.RestoreObjectRequest
- 16, // 119: google.storage.v2.Storage.CancelResumableWrite:input_type -> google.storage.v2.CancelResumableWriteRequest
- 19, // 120: google.storage.v2.Storage.GetObject:input_type -> google.storage.v2.GetObjectRequest
- 18, // 121: google.storage.v2.Storage.ReadObject:input_type -> google.storage.v2.ReadObjectRequest
- 33, // 122: google.storage.v2.Storage.UpdateObject:input_type -> google.storage.v2.UpdateObjectRequest
- 22, // 123: google.storage.v2.Storage.WriteObject:input_type -> google.storage.v2.WriteObjectRequest
- 24, // 124: google.storage.v2.Storage.BidiWriteObject:input_type -> google.storage.v2.BidiWriteObjectRequest
- 26, // 125: google.storage.v2.Storage.ListObjects:input_type -> google.storage.v2.ListObjectsRequest
- 29, // 126: google.storage.v2.Storage.RewriteObject:input_type -> google.storage.v2.RewriteObjectRequest
- 31, // 127: google.storage.v2.Storage.StartResumableWrite:input_type -> google.storage.v2.StartResumableWriteRequest
- 27, // 128: google.storage.v2.Storage.QueryWriteStatus:input_type -> google.storage.v2.QueryWriteStatusRequest
- 34, // 129: google.storage.v2.Storage.GetServiceAccount:input_type -> google.storage.v2.GetServiceAccountRequest
- 35, // 130: google.storage.v2.Storage.CreateHmacKey:input_type -> google.storage.v2.CreateHmacKeyRequest
- 37, // 131: google.storage.v2.Storage.DeleteHmacKey:input_type -> google.storage.v2.DeleteHmacKeyRequest
- 38, // 132: google.storage.v2.Storage.GetHmacKey:input_type -> google.storage.v2.GetHmacKeyRequest
- 39, // 133: google.storage.v2.Storage.ListHmacKeys:input_type -> google.storage.v2.ListHmacKeysRequest
- 41, // 134: google.storage.v2.Storage.UpdateHmacKey:input_type -> google.storage.v2.UpdateHmacKeyRequest
+ 8, // 112: google.storage.v2.Storage.ComposeObject:input_type -> google.storage.v2.ComposeObjectRequest
+ 9, // 113: google.storage.v2.Storage.DeleteObject:input_type -> google.storage.v2.DeleteObjectRequest
+ 10, // 114: google.storage.v2.Storage.RestoreObject:input_type -> google.storage.v2.RestoreObjectRequest
+ 11, // 115: google.storage.v2.Storage.CancelResumableWrite:input_type -> google.storage.v2.CancelResumableWriteRequest
+ 14, // 116: google.storage.v2.Storage.GetObject:input_type -> google.storage.v2.GetObjectRequest
+ 13, // 117: google.storage.v2.Storage.ReadObject:input_type -> google.storage.v2.ReadObjectRequest
+ 28, // 118: google.storage.v2.Storage.UpdateObject:input_type -> google.storage.v2.UpdateObjectRequest
+ 17, // 119: google.storage.v2.Storage.WriteObject:input_type -> google.storage.v2.WriteObjectRequest
+ 19, // 120: google.storage.v2.Storage.BidiWriteObject:input_type -> google.storage.v2.BidiWriteObjectRequest
+ 21, // 121: google.storage.v2.Storage.ListObjects:input_type -> google.storage.v2.ListObjectsRequest
+ 24, // 122: google.storage.v2.Storage.RewriteObject:input_type -> google.storage.v2.RewriteObjectRequest
+ 26, // 123: google.storage.v2.Storage.StartResumableWrite:input_type -> google.storage.v2.StartResumableWriteRequest
+ 22, // 124: google.storage.v2.Storage.QueryWriteStatus:input_type -> google.storage.v2.QueryWriteStatusRequest
+ 29, // 125: google.storage.v2.Storage.GetServiceAccount:input_type -> google.storage.v2.GetServiceAccountRequest
+ 31, // 126: google.storage.v2.Storage.CreateHmacKey:input_type -> google.storage.v2.CreateHmacKeyRequest
+ 33, // 127: google.storage.v2.Storage.DeleteHmacKey:input_type -> google.storage.v2.DeleteHmacKeyRequest
+ 34, // 128: google.storage.v2.Storage.GetHmacKey:input_type -> google.storage.v2.GetHmacKeyRequest
+ 35, // 129: google.storage.v2.Storage.ListHmacKeys:input_type -> google.storage.v2.ListHmacKeysRequest
+ 37, // 130: google.storage.v2.Storage.UpdateHmacKey:input_type -> google.storage.v2.UpdateHmacKeyRequest
+ 52, // 131: google.storage.v2.Storage.DeleteNotificationConfig:input_type -> google.storage.v2.DeleteNotificationConfigRequest
+ 53, // 132: google.storage.v2.Storage.GetNotificationConfig:input_type -> google.storage.v2.GetNotificationConfigRequest
+ 54, // 133: google.storage.v2.Storage.CreateNotificationConfig:input_type -> google.storage.v2.CreateNotificationConfigRequest
+ 55, // 134: google.storage.v2.Storage.ListNotificationConfigs:input_type -> google.storage.v2.ListNotificationConfigsRequest
87, // 135: google.storage.v2.Storage.DeleteBucket:output_type -> google.protobuf.Empty
- 44, // 136: google.storage.v2.Storage.GetBucket:output_type -> google.storage.v2.Bucket
- 44, // 137: google.storage.v2.Storage.CreateBucket:output_type -> google.storage.v2.Bucket
+ 41, // 136: google.storage.v2.Storage.GetBucket:output_type -> google.storage.v2.Bucket
+ 41, // 137: google.storage.v2.Storage.CreateBucket:output_type -> google.storage.v2.Bucket
5, // 138: google.storage.v2.Storage.ListBuckets:output_type -> google.storage.v2.ListBucketsResponse
- 44, // 139: google.storage.v2.Storage.LockBucketRetentionPolicy:output_type -> google.storage.v2.Bucket
+ 41, // 139: google.storage.v2.Storage.LockBucketRetentionPolicy:output_type -> google.storage.v2.Bucket
88, // 140: google.storage.v2.Storage.GetIamPolicy:output_type -> google.iam.v1.Policy
88, // 141: google.storage.v2.Storage.SetIamPolicy:output_type -> google.iam.v1.Policy
89, // 142: google.storage.v2.Storage.TestIamPermissions:output_type -> google.iam.v1.TestIamPermissionsResponse
- 44, // 143: google.storage.v2.Storage.UpdateBucket:output_type -> google.storage.v2.Bucket
- 87, // 144: google.storage.v2.Storage.DeleteNotificationConfig:output_type -> google.protobuf.Empty
- 49, // 145: google.storage.v2.Storage.GetNotificationConfig:output_type -> google.storage.v2.NotificationConfig
- 49, // 146: google.storage.v2.Storage.CreateNotificationConfig:output_type -> google.storage.v2.NotificationConfig
- 12, // 147: google.storage.v2.Storage.ListNotificationConfigs:output_type -> google.storage.v2.ListNotificationConfigsResponse
- 51, // 148: google.storage.v2.Storage.ComposeObject:output_type -> google.storage.v2.Object
- 87, // 149: google.storage.v2.Storage.DeleteObject:output_type -> google.protobuf.Empty
- 51, // 150: google.storage.v2.Storage.RestoreObject:output_type -> google.storage.v2.Object
- 17, // 151: google.storage.v2.Storage.CancelResumableWrite:output_type -> google.storage.v2.CancelResumableWriteResponse
- 51, // 152: google.storage.v2.Storage.GetObject:output_type -> google.storage.v2.Object
- 20, // 153: google.storage.v2.Storage.ReadObject:output_type -> google.storage.v2.ReadObjectResponse
- 51, // 154: google.storage.v2.Storage.UpdateObject:output_type -> google.storage.v2.Object
- 23, // 155: google.storage.v2.Storage.WriteObject:output_type -> google.storage.v2.WriteObjectResponse
- 25, // 156: google.storage.v2.Storage.BidiWriteObject:output_type -> google.storage.v2.BidiWriteObjectResponse
- 53, // 157: google.storage.v2.Storage.ListObjects:output_type -> google.storage.v2.ListObjectsResponse
- 30, // 158: google.storage.v2.Storage.RewriteObject:output_type -> google.storage.v2.RewriteResponse
- 32, // 159: google.storage.v2.Storage.StartResumableWrite:output_type -> google.storage.v2.StartResumableWriteResponse
- 28, // 160: google.storage.v2.Storage.QueryWriteStatus:output_type -> google.storage.v2.QueryWriteStatusResponse
- 55, // 161: google.storage.v2.Storage.GetServiceAccount:output_type -> google.storage.v2.ServiceAccount
- 36, // 162: google.storage.v2.Storage.CreateHmacKey:output_type -> google.storage.v2.CreateHmacKeyResponse
- 87, // 163: google.storage.v2.Storage.DeleteHmacKey:output_type -> google.protobuf.Empty
- 48, // 164: google.storage.v2.Storage.GetHmacKey:output_type -> google.storage.v2.HmacKeyMetadata
- 40, // 165: google.storage.v2.Storage.ListHmacKeys:output_type -> google.storage.v2.ListHmacKeysResponse
- 48, // 166: google.storage.v2.Storage.UpdateHmacKey:output_type -> google.storage.v2.HmacKeyMetadata
+ 41, // 143: google.storage.v2.Storage.UpdateBucket:output_type -> google.storage.v2.Bucket
+ 46, // 144: google.storage.v2.Storage.ComposeObject:output_type -> google.storage.v2.Object
+ 87, // 145: google.storage.v2.Storage.DeleteObject:output_type -> google.protobuf.Empty
+ 46, // 146: google.storage.v2.Storage.RestoreObject:output_type -> google.storage.v2.Object
+ 12, // 147: google.storage.v2.Storage.CancelResumableWrite:output_type -> google.storage.v2.CancelResumableWriteResponse
+ 46, // 148: google.storage.v2.Storage.GetObject:output_type -> google.storage.v2.Object
+ 15, // 149: google.storage.v2.Storage.ReadObject:output_type -> google.storage.v2.ReadObjectResponse
+ 46, // 150: google.storage.v2.Storage.UpdateObject:output_type -> google.storage.v2.Object
+ 18, // 151: google.storage.v2.Storage.WriteObject:output_type -> google.storage.v2.WriteObjectResponse
+ 20, // 152: google.storage.v2.Storage.BidiWriteObject:output_type -> google.storage.v2.BidiWriteObjectResponse
+ 48, // 153: google.storage.v2.Storage.ListObjects:output_type -> google.storage.v2.ListObjectsResponse
+ 25, // 154: google.storage.v2.Storage.RewriteObject:output_type -> google.storage.v2.RewriteResponse
+ 27, // 155: google.storage.v2.Storage.StartResumableWrite:output_type -> google.storage.v2.StartResumableWriteResponse
+ 23, // 156: google.storage.v2.Storage.QueryWriteStatus:output_type -> google.storage.v2.QueryWriteStatusResponse
+ 30, // 157: google.storage.v2.Storage.GetServiceAccount:output_type -> google.storage.v2.ServiceAccount
+ 32, // 158: google.storage.v2.Storage.CreateHmacKey:output_type -> google.storage.v2.CreateHmacKeyResponse
+ 87, // 159: google.storage.v2.Storage.DeleteHmacKey:output_type -> google.protobuf.Empty
+ 38, // 160: google.storage.v2.Storage.GetHmacKey:output_type -> google.storage.v2.HmacKeyMetadata
+ 36, // 161: google.storage.v2.Storage.ListHmacKeys:output_type -> google.storage.v2.ListHmacKeysResponse
+ 38, // 162: google.storage.v2.Storage.UpdateHmacKey:output_type -> google.storage.v2.HmacKeyMetadata
+ 87, // 163: google.storage.v2.Storage.DeleteNotificationConfig:output_type -> google.protobuf.Empty
+ 57, // 164: google.storage.v2.Storage.GetNotificationConfig:output_type -> google.storage.v2.NotificationConfig
+ 57, // 165: google.storage.v2.Storage.CreateNotificationConfig:output_type -> google.storage.v2.NotificationConfig
+ 56, // 166: google.storage.v2.Storage.ListNotificationConfigs:output_type -> google.storage.v2.ListNotificationConfigsResponse
135, // [135:167] is the sub-list for method output_type
103, // [103:135] is the sub-list for method input_type
103, // [103:103] is the sub-list for extension type_name
@@ -9326,7 +9340,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[7].Exporter = func(v any, i int) any {
- switch v := v.(*DeleteNotificationConfigRequest); i {
+ switch v := v.(*ComposeObjectRequest); i {
case 0:
return &v.state
case 1:
@@ -9338,7 +9352,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[8].Exporter = func(v any, i int) any {
- switch v := v.(*GetNotificationConfigRequest); i {
+ switch v := v.(*DeleteObjectRequest); i {
case 0:
return &v.state
case 1:
@@ -9350,7 +9364,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[9].Exporter = func(v any, i int) any {
- switch v := v.(*CreateNotificationConfigRequest); i {
+ switch v := v.(*RestoreObjectRequest); i {
case 0:
return &v.state
case 1:
@@ -9362,7 +9376,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[10].Exporter = func(v any, i int) any {
- switch v := v.(*ListNotificationConfigsRequest); i {
+ switch v := v.(*CancelResumableWriteRequest); i {
case 0:
return &v.state
case 1:
@@ -9374,7 +9388,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[11].Exporter = func(v any, i int) any {
- switch v := v.(*ListNotificationConfigsResponse); i {
+ switch v := v.(*CancelResumableWriteResponse); i {
case 0:
return &v.state
case 1:
@@ -9386,7 +9400,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[12].Exporter = func(v any, i int) any {
- switch v := v.(*ComposeObjectRequest); i {
+ switch v := v.(*ReadObjectRequest); i {
case 0:
return &v.state
case 1:
@@ -9398,7 +9412,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[13].Exporter = func(v any, i int) any {
- switch v := v.(*DeleteObjectRequest); i {
+ switch v := v.(*GetObjectRequest); i {
case 0:
return &v.state
case 1:
@@ -9410,7 +9424,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[14].Exporter = func(v any, i int) any {
- switch v := v.(*RestoreObjectRequest); i {
+ switch v := v.(*ReadObjectResponse); i {
case 0:
return &v.state
case 1:
@@ -9422,7 +9436,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[15].Exporter = func(v any, i int) any {
- switch v := v.(*CancelResumableWriteRequest); i {
+ switch v := v.(*WriteObjectSpec); i {
case 0:
return &v.state
case 1:
@@ -9434,7 +9448,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[16].Exporter = func(v any, i int) any {
- switch v := v.(*CancelResumableWriteResponse); i {
+ switch v := v.(*WriteObjectRequest); i {
case 0:
return &v.state
case 1:
@@ -9446,7 +9460,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[17].Exporter = func(v any, i int) any {
- switch v := v.(*ReadObjectRequest); i {
+ switch v := v.(*WriteObjectResponse); i {
case 0:
return &v.state
case 1:
@@ -9458,7 +9472,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[18].Exporter = func(v any, i int) any {
- switch v := v.(*GetObjectRequest); i {
+ switch v := v.(*BidiWriteObjectRequest); i {
case 0:
return &v.state
case 1:
@@ -9470,7 +9484,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[19].Exporter = func(v any, i int) any {
- switch v := v.(*ReadObjectResponse); i {
+ switch v := v.(*BidiWriteObjectResponse); i {
case 0:
return &v.state
case 1:
@@ -9482,7 +9496,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[20].Exporter = func(v any, i int) any {
- switch v := v.(*WriteObjectSpec); i {
+ switch v := v.(*ListObjectsRequest); i {
case 0:
return &v.state
case 1:
@@ -9494,7 +9508,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[21].Exporter = func(v any, i int) any {
- switch v := v.(*WriteObjectRequest); i {
+ switch v := v.(*QueryWriteStatusRequest); i {
case 0:
return &v.state
case 1:
@@ -9506,7 +9520,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[22].Exporter = func(v any, i int) any {
- switch v := v.(*WriteObjectResponse); i {
+ switch v := v.(*QueryWriteStatusResponse); i {
case 0:
return &v.state
case 1:
@@ -9518,7 +9532,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[23].Exporter = func(v any, i int) any {
- switch v := v.(*BidiWriteObjectRequest); i {
+ switch v := v.(*RewriteObjectRequest); i {
case 0:
return &v.state
case 1:
@@ -9530,7 +9544,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[24].Exporter = func(v any, i int) any {
- switch v := v.(*BidiWriteObjectResponse); i {
+ switch v := v.(*RewriteResponse); i {
case 0:
return &v.state
case 1:
@@ -9542,7 +9556,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[25].Exporter = func(v any, i int) any {
- switch v := v.(*ListObjectsRequest); i {
+ switch v := v.(*StartResumableWriteRequest); i {
case 0:
return &v.state
case 1:
@@ -9554,7 +9568,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[26].Exporter = func(v any, i int) any {
- switch v := v.(*QueryWriteStatusRequest); i {
+ switch v := v.(*StartResumableWriteResponse); i {
case 0:
return &v.state
case 1:
@@ -9566,7 +9580,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[27].Exporter = func(v any, i int) any {
- switch v := v.(*QueryWriteStatusResponse); i {
+ switch v := v.(*UpdateObjectRequest); i {
case 0:
return &v.state
case 1:
@@ -9578,7 +9592,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[28].Exporter = func(v any, i int) any {
- switch v := v.(*RewriteObjectRequest); i {
+ switch v := v.(*GetServiceAccountRequest); i {
case 0:
return &v.state
case 1:
@@ -9590,7 +9604,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[29].Exporter = func(v any, i int) any {
- switch v := v.(*RewriteResponse); i {
+ switch v := v.(*ServiceAccount); i {
case 0:
return &v.state
case 1:
@@ -9602,7 +9616,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[30].Exporter = func(v any, i int) any {
- switch v := v.(*StartResumableWriteRequest); i {
+ switch v := v.(*CreateHmacKeyRequest); i {
case 0:
return &v.state
case 1:
@@ -9614,7 +9628,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[31].Exporter = func(v any, i int) any {
- switch v := v.(*StartResumableWriteResponse); i {
+ switch v := v.(*CreateHmacKeyResponse); i {
case 0:
return &v.state
case 1:
@@ -9626,7 +9640,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[32].Exporter = func(v any, i int) any {
- switch v := v.(*UpdateObjectRequest); i {
+ switch v := v.(*DeleteHmacKeyRequest); i {
case 0:
return &v.state
case 1:
@@ -9638,7 +9652,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[33].Exporter = func(v any, i int) any {
- switch v := v.(*GetServiceAccountRequest); i {
+ switch v := v.(*GetHmacKeyRequest); i {
case 0:
return &v.state
case 1:
@@ -9650,7 +9664,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[34].Exporter = func(v any, i int) any {
- switch v := v.(*CreateHmacKeyRequest); i {
+ switch v := v.(*ListHmacKeysRequest); i {
case 0:
return &v.state
case 1:
@@ -9662,7 +9676,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[35].Exporter = func(v any, i int) any {
- switch v := v.(*CreateHmacKeyResponse); i {
+ switch v := v.(*ListHmacKeysResponse); i {
case 0:
return &v.state
case 1:
@@ -9674,7 +9688,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[36].Exporter = func(v any, i int) any {
- switch v := v.(*DeleteHmacKeyRequest); i {
+ switch v := v.(*UpdateHmacKeyRequest); i {
case 0:
return &v.state
case 1:
@@ -9686,7 +9700,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[37].Exporter = func(v any, i int) any {
- switch v := v.(*GetHmacKeyRequest); i {
+ switch v := v.(*HmacKeyMetadata); i {
case 0:
return &v.state
case 1:
@@ -9698,7 +9712,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[38].Exporter = func(v any, i int) any {
- switch v := v.(*ListHmacKeysRequest); i {
+ switch v := v.(*CommonObjectRequestParams); i {
case 0:
return &v.state
case 1:
@@ -9710,7 +9724,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[39].Exporter = func(v any, i int) any {
- switch v := v.(*ListHmacKeysResponse); i {
+ switch v := v.(*ServiceConstants); i {
case 0:
return &v.state
case 1:
@@ -9722,7 +9736,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[40].Exporter = func(v any, i int) any {
- switch v := v.(*UpdateHmacKeyRequest); i {
+ switch v := v.(*Bucket); i {
case 0:
return &v.state
case 1:
@@ -9734,7 +9748,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[41].Exporter = func(v any, i int) any {
- switch v := v.(*CommonObjectRequestParams); i {
+ switch v := v.(*BucketAccessControl); i {
case 0:
return &v.state
case 1:
@@ -9746,7 +9760,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[42].Exporter = func(v any, i int) any {
- switch v := v.(*ServiceConstants); i {
+ switch v := v.(*ChecksummedData); i {
case 0:
return &v.state
case 1:
@@ -9758,7 +9772,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[43].Exporter = func(v any, i int) any {
- switch v := v.(*Bucket); i {
+ switch v := v.(*ObjectChecksums); i {
case 0:
return &v.state
case 1:
@@ -9770,7 +9784,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[44].Exporter = func(v any, i int) any {
- switch v := v.(*BucketAccessControl); i {
+ switch v := v.(*CustomerEncryption); i {
case 0:
return &v.state
case 1:
@@ -9782,7 +9796,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[45].Exporter = func(v any, i int) any {
- switch v := v.(*ChecksummedData); i {
+ switch v := v.(*Object); i {
case 0:
return &v.state
case 1:
@@ -9794,7 +9808,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[46].Exporter = func(v any, i int) any {
- switch v := v.(*ObjectChecksums); i {
+ switch v := v.(*ObjectAccessControl); i {
case 0:
return &v.state
case 1:
@@ -9806,7 +9820,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[47].Exporter = func(v any, i int) any {
- switch v := v.(*HmacKeyMetadata); i {
+ switch v := v.(*ListObjectsResponse); i {
case 0:
return &v.state
case 1:
@@ -9818,7 +9832,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[48].Exporter = func(v any, i int) any {
- switch v := v.(*NotificationConfig); i {
+ switch v := v.(*ProjectTeam); i {
case 0:
return &v.state
case 1:
@@ -9830,7 +9844,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[49].Exporter = func(v any, i int) any {
- switch v := v.(*CustomerEncryption); i {
+ switch v := v.(*Owner); i {
case 0:
return &v.state
case 1:
@@ -9842,7 +9856,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[50].Exporter = func(v any, i int) any {
- switch v := v.(*Object); i {
+ switch v := v.(*ContentRange); i {
case 0:
return &v.state
case 1:
@@ -9854,7 +9868,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[51].Exporter = func(v any, i int) any {
- switch v := v.(*ObjectAccessControl); i {
+ switch v := v.(*DeleteNotificationConfigRequest); i {
case 0:
return &v.state
case 1:
@@ -9866,7 +9880,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[52].Exporter = func(v any, i int) any {
- switch v := v.(*ListObjectsResponse); i {
+ switch v := v.(*GetNotificationConfigRequest); i {
case 0:
return &v.state
case 1:
@@ -9878,7 +9892,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[53].Exporter = func(v any, i int) any {
- switch v := v.(*ProjectTeam); i {
+ switch v := v.(*CreateNotificationConfigRequest); i {
case 0:
return &v.state
case 1:
@@ -9890,7 +9904,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[54].Exporter = func(v any, i int) any {
- switch v := v.(*ServiceAccount); i {
+ switch v := v.(*ListNotificationConfigsRequest); i {
case 0:
return &v.state
case 1:
@@ -9902,7 +9916,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[55].Exporter = func(v any, i int) any {
- switch v := v.(*Owner); i {
+ switch v := v.(*ListNotificationConfigsResponse); i {
case 0:
return &v.state
case 1:
@@ -9914,7 +9928,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[56].Exporter = func(v any, i int) any {
- switch v := v.(*ContentRange); i {
+ switch v := v.(*NotificationConfig); i {
case 0:
return &v.state
case 1:
@@ -10158,40 +10172,40 @@ func file_google_storage_v2_storage_proto_init() {
file_google_storage_v2_storage_proto_msgTypes[1].OneofWrappers = []any{}
file_google_storage_v2_storage_proto_msgTypes[3].OneofWrappers = []any{}
file_google_storage_v2_storage_proto_msgTypes[6].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[7].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[8].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[9].OneofWrappers = []any{}
file_google_storage_v2_storage_proto_msgTypes[12].OneofWrappers = []any{}
file_google_storage_v2_storage_proto_msgTypes[13].OneofWrappers = []any{}
- file_google_storage_v2_storage_proto_msgTypes[14].OneofWrappers = []any{}
- file_google_storage_v2_storage_proto_msgTypes[17].OneofWrappers = []any{}
- file_google_storage_v2_storage_proto_msgTypes[18].OneofWrappers = []any{}
- file_google_storage_v2_storage_proto_msgTypes[20].OneofWrappers = []any{}
- file_google_storage_v2_storage_proto_msgTypes[21].OneofWrappers = []any{
+ file_google_storage_v2_storage_proto_msgTypes[15].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[16].OneofWrappers = []any{
(*WriteObjectRequest_UploadId)(nil),
(*WriteObjectRequest_WriteObjectSpec)(nil),
(*WriteObjectRequest_ChecksummedData)(nil),
}
- file_google_storage_v2_storage_proto_msgTypes[22].OneofWrappers = []any{
+ file_google_storage_v2_storage_proto_msgTypes[17].OneofWrappers = []any{
(*WriteObjectResponse_PersistedSize)(nil),
(*WriteObjectResponse_Resource)(nil),
}
- file_google_storage_v2_storage_proto_msgTypes[23].OneofWrappers = []any{
+ file_google_storage_v2_storage_proto_msgTypes[18].OneofWrappers = []any{
(*BidiWriteObjectRequest_UploadId)(nil),
(*BidiWriteObjectRequest_WriteObjectSpec)(nil),
(*BidiWriteObjectRequest_ChecksummedData)(nil),
}
- file_google_storage_v2_storage_proto_msgTypes[24].OneofWrappers = []any{
+ file_google_storage_v2_storage_proto_msgTypes[19].OneofWrappers = []any{
(*BidiWriteObjectResponse_PersistedSize)(nil),
(*BidiWriteObjectResponse_Resource)(nil),
}
- file_google_storage_v2_storage_proto_msgTypes[25].OneofWrappers = []any{}
- file_google_storage_v2_storage_proto_msgTypes[27].OneofWrappers = []any{
+ file_google_storage_v2_storage_proto_msgTypes[20].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[22].OneofWrappers = []any{
(*QueryWriteStatusResponse_PersistedSize)(nil),
(*QueryWriteStatusResponse_Resource)(nil),
}
- file_google_storage_v2_storage_proto_msgTypes[28].OneofWrappers = []any{}
- file_google_storage_v2_storage_proto_msgTypes[32].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[23].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[27].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[42].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[43].OneofWrappers = []any{}
file_google_storage_v2_storage_proto_msgTypes[45].OneofWrappers = []any{}
- file_google_storage_v2_storage_proto_msgTypes[46].OneofWrappers = []any{}
- file_google_storage_v2_storage_proto_msgTypes[50].OneofWrappers = []any{}
file_google_storage_v2_storage_proto_msgTypes[58].OneofWrappers = []any{}
file_google_storage_v2_storage_proto_msgTypes[66].OneofWrappers = []any{}
file_google_storage_v2_storage_proto_msgTypes[70].OneofWrappers = []any{}
@@ -10247,25 +10261,16 @@ type StorageClient interface {
// The `resource` field in the request should be
// `projects/_/buckets/{bucket}`.
SetIamPolicy(ctx context.Context, in *iampb.SetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error)
- // Tests a set of permissions on the given bucket or object to see which, if
- // any, are held by the caller.
+ // Tests a set of permissions on the given bucket, object, or managed folder
+ // to see which, if any, are held by the caller.
// The `resource` field in the request should be
- // `projects/_/buckets/{bucket}` for a bucket or
- // `projects/_/buckets/{bucket}/objects/{object}` for an object.
+ // `projects/_/buckets/{bucket}` for a bucket,
+ // `projects/_/buckets/{bucket}/objects/{object}` for an object, or
+ // `projects/_/buckets/{bucket}/managedFolders/{managedFolder}`
+ // for a managed folder.
TestIamPermissions(ctx context.Context, in *iampb.TestIamPermissionsRequest, opts ...grpc.CallOption) (*iampb.TestIamPermissionsResponse, error)
// Updates a bucket. Equivalent to JSON API's storage.buckets.patch method.
UpdateBucket(ctx context.Context, in *UpdateBucketRequest, opts ...grpc.CallOption) (*Bucket, error)
- // Permanently deletes a NotificationConfig.
- DeleteNotificationConfig(ctx context.Context, in *DeleteNotificationConfigRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
- // View a NotificationConfig.
- GetNotificationConfig(ctx context.Context, in *GetNotificationConfigRequest, opts ...grpc.CallOption) (*NotificationConfig, error)
- // Creates a NotificationConfig for a given bucket.
- // These NotificationConfigs, when triggered, publish messages to the
- // specified Pub/Sub topics. See
- // https://cloud.google.com/storage/docs/pubsub-notifications.
- CreateNotificationConfig(ctx context.Context, in *CreateNotificationConfigRequest, opts ...grpc.CallOption) (*NotificationConfig, error)
- // Retrieves a list of NotificationConfigs for a given bucket.
- ListNotificationConfigs(ctx context.Context, in *ListNotificationConfigsRequest, opts ...grpc.CallOption) (*ListNotificationConfigsResponse, error)
// Concatenates a list of existing objects into a new object in the same
// bucket.
ComposeObject(ctx context.Context, in *ComposeObjectRequest, opts ...grpc.CallOption) (*Object, error)
@@ -10393,18 +10398,39 @@ type StorageClient interface {
// object name, the sequence of returned `persisted_size` values will be
// non-decreasing.
QueryWriteStatus(ctx context.Context, in *QueryWriteStatusRequest, opts ...grpc.CallOption) (*QueryWriteStatusResponse, error)
+ // Deprecated: Do not use.
// Retrieves the name of a project's Google Cloud Storage service account.
GetServiceAccount(ctx context.Context, in *GetServiceAccountRequest, opts ...grpc.CallOption) (*ServiceAccount, error)
+ // Deprecated: Do not use.
// Creates a new HMAC key for the given service account.
CreateHmacKey(ctx context.Context, in *CreateHmacKeyRequest, opts ...grpc.CallOption) (*CreateHmacKeyResponse, error)
+ // Deprecated: Do not use.
// Deletes a given HMAC key. Key must be in an INACTIVE state.
DeleteHmacKey(ctx context.Context, in *DeleteHmacKeyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ // Deprecated: Do not use.
// Gets an existing HMAC key metadata for the given id.
GetHmacKey(ctx context.Context, in *GetHmacKeyRequest, opts ...grpc.CallOption) (*HmacKeyMetadata, error)
+ // Deprecated: Do not use.
// Lists HMAC keys under a given project with the additional filters provided.
ListHmacKeys(ctx context.Context, in *ListHmacKeysRequest, opts ...grpc.CallOption) (*ListHmacKeysResponse, error)
+ // Deprecated: Do not use.
// Updates a given HMAC key state between ACTIVE and INACTIVE.
UpdateHmacKey(ctx context.Context, in *UpdateHmacKeyRequest, opts ...grpc.CallOption) (*HmacKeyMetadata, error)
+ // Deprecated: Do not use.
+ // Permanently deletes a NotificationConfig.
+ DeleteNotificationConfig(ctx context.Context, in *DeleteNotificationConfigRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ // Deprecated: Do not use.
+ // View a NotificationConfig.
+ GetNotificationConfig(ctx context.Context, in *GetNotificationConfigRequest, opts ...grpc.CallOption) (*NotificationConfig, error)
+ // Deprecated: Do not use.
+ // Creates a NotificationConfig for a given bucket.
+ // These NotificationConfigs, when triggered, publish messages to the
+ // specified Pub/Sub topics. See
+ // https://cloud.google.com/storage/docs/pubsub-notifications.
+ CreateNotificationConfig(ctx context.Context, in *CreateNotificationConfigRequest, opts ...grpc.CallOption) (*NotificationConfig, error)
+ // Deprecated: Do not use.
+ // Retrieves a list of NotificationConfigs for a given bucket.
+ ListNotificationConfigs(ctx context.Context, in *ListNotificationConfigsRequest, opts ...grpc.CallOption) (*ListNotificationConfigsResponse, error)
}
type storageClient struct {
@@ -10496,42 +10522,6 @@ func (c *storageClient) UpdateBucket(ctx context.Context, in *UpdateBucketReques
return out, nil
}
-func (c *storageClient) DeleteNotificationConfig(ctx context.Context, in *DeleteNotificationConfigRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
- out := new(emptypb.Empty)
- err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/DeleteNotificationConfig", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageClient) GetNotificationConfig(ctx context.Context, in *GetNotificationConfigRequest, opts ...grpc.CallOption) (*NotificationConfig, error) {
- out := new(NotificationConfig)
- err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetNotificationConfig", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageClient) CreateNotificationConfig(ctx context.Context, in *CreateNotificationConfigRequest, opts ...grpc.CallOption) (*NotificationConfig, error) {
- out := new(NotificationConfig)
- err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/CreateNotificationConfig", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageClient) ListNotificationConfigs(ctx context.Context, in *ListNotificationConfigsRequest, opts ...grpc.CallOption) (*ListNotificationConfigsResponse, error) {
- out := new(ListNotificationConfigsResponse)
- err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/ListNotificationConfigs", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
func (c *storageClient) ComposeObject(ctx context.Context, in *ComposeObjectRequest, opts ...grpc.CallOption) (*Object, error) {
out := new(Object)
err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/ComposeObject", in, out, opts...)
@@ -10719,54 +10709,100 @@ func (c *storageClient) QueryWriteStatus(ctx context.Context, in *QueryWriteStat
return out, nil
}
-func (c *storageClient) GetServiceAccount(ctx context.Context, in *GetServiceAccountRequest, opts ...grpc.CallOption) (*ServiceAccount, error) {
- out := new(ServiceAccount)
- err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetServiceAccount", in, out, opts...)
+// Deprecated: Do not use.
+func (c *storageClient) GetServiceAccount(ctx context.Context, in *GetServiceAccountRequest, opts ...grpc.CallOption) (*ServiceAccount, error) {
+ out := new(ServiceAccount)
+ err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetServiceAccount", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// Deprecated: Do not use.
+func (c *storageClient) CreateHmacKey(ctx context.Context, in *CreateHmacKeyRequest, opts ...grpc.CallOption) (*CreateHmacKeyResponse, error) {
+ out := new(CreateHmacKeyResponse)
+ err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/CreateHmacKey", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// Deprecated: Do not use.
+func (c *storageClient) DeleteHmacKey(ctx context.Context, in *DeleteHmacKeyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/DeleteHmacKey", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// Deprecated: Do not use.
+func (c *storageClient) GetHmacKey(ctx context.Context, in *GetHmacKeyRequest, opts ...grpc.CallOption) (*HmacKeyMetadata, error) {
+ out := new(HmacKeyMetadata)
+ err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetHmacKey", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// Deprecated: Do not use.
+func (c *storageClient) ListHmacKeys(ctx context.Context, in *ListHmacKeysRequest, opts ...grpc.CallOption) (*ListHmacKeysResponse, error) {
+ out := new(ListHmacKeysResponse)
+ err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/ListHmacKeys", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
-func (c *storageClient) CreateHmacKey(ctx context.Context, in *CreateHmacKeyRequest, opts ...grpc.CallOption) (*CreateHmacKeyResponse, error) {
- out := new(CreateHmacKeyResponse)
- err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/CreateHmacKey", in, out, opts...)
+// Deprecated: Do not use.
+func (c *storageClient) UpdateHmacKey(ctx context.Context, in *UpdateHmacKeyRequest, opts ...grpc.CallOption) (*HmacKeyMetadata, error) {
+ out := new(HmacKeyMetadata)
+ err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/UpdateHmacKey", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
-func (c *storageClient) DeleteHmacKey(ctx context.Context, in *DeleteHmacKeyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+// Deprecated: Do not use.
+func (c *storageClient) DeleteNotificationConfig(ctx context.Context, in *DeleteNotificationConfigRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
out := new(emptypb.Empty)
- err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/DeleteHmacKey", in, out, opts...)
+ err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/DeleteNotificationConfig", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
-func (c *storageClient) GetHmacKey(ctx context.Context, in *GetHmacKeyRequest, opts ...grpc.CallOption) (*HmacKeyMetadata, error) {
- out := new(HmacKeyMetadata)
- err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetHmacKey", in, out, opts...)
+// Deprecated: Do not use.
+func (c *storageClient) GetNotificationConfig(ctx context.Context, in *GetNotificationConfigRequest, opts ...grpc.CallOption) (*NotificationConfig, error) {
+ out := new(NotificationConfig)
+ err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetNotificationConfig", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
-func (c *storageClient) ListHmacKeys(ctx context.Context, in *ListHmacKeysRequest, opts ...grpc.CallOption) (*ListHmacKeysResponse, error) {
- out := new(ListHmacKeysResponse)
- err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/ListHmacKeys", in, out, opts...)
+// Deprecated: Do not use.
+func (c *storageClient) CreateNotificationConfig(ctx context.Context, in *CreateNotificationConfigRequest, opts ...grpc.CallOption) (*NotificationConfig, error) {
+ out := new(NotificationConfig)
+ err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/CreateNotificationConfig", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
-func (c *storageClient) UpdateHmacKey(ctx context.Context, in *UpdateHmacKeyRequest, opts ...grpc.CallOption) (*HmacKeyMetadata, error) {
- out := new(HmacKeyMetadata)
- err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/UpdateHmacKey", in, out, opts...)
+// Deprecated: Do not use.
+func (c *storageClient) ListNotificationConfigs(ctx context.Context, in *ListNotificationConfigsRequest, opts ...grpc.CallOption) (*ListNotificationConfigsResponse, error) {
+ out := new(ListNotificationConfigsResponse)
+ err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/ListNotificationConfigs", in, out, opts...)
if err != nil {
return nil, err
}
@@ -10793,25 +10829,16 @@ type StorageServer interface {
// The `resource` field in the request should be
// `projects/_/buckets/{bucket}`.
SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest) (*iampb.Policy, error)
- // Tests a set of permissions on the given bucket or object to see which, if
- // any, are held by the caller.
+ // Tests a set of permissions on the given bucket, object, or managed folder
+ // to see which, if any, are held by the caller.
// The `resource` field in the request should be
- // `projects/_/buckets/{bucket}` for a bucket or
- // `projects/_/buckets/{bucket}/objects/{object}` for an object.
+ // `projects/_/buckets/{bucket}` for a bucket,
+ // `projects/_/buckets/{bucket}/objects/{object}` for an object, or
+ // `projects/_/buckets/{bucket}/managedFolders/{managedFolder}`
+ // for a managed folder.
TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error)
// Updates a bucket. Equivalent to JSON API's storage.buckets.patch method.
UpdateBucket(context.Context, *UpdateBucketRequest) (*Bucket, error)
- // Permanently deletes a NotificationConfig.
- DeleteNotificationConfig(context.Context, *DeleteNotificationConfigRequest) (*emptypb.Empty, error)
- // View a NotificationConfig.
- GetNotificationConfig(context.Context, *GetNotificationConfigRequest) (*NotificationConfig, error)
- // Creates a NotificationConfig for a given bucket.
- // These NotificationConfigs, when triggered, publish messages to the
- // specified Pub/Sub topics. See
- // https://cloud.google.com/storage/docs/pubsub-notifications.
- CreateNotificationConfig(context.Context, *CreateNotificationConfigRequest) (*NotificationConfig, error)
- // Retrieves a list of NotificationConfigs for a given bucket.
- ListNotificationConfigs(context.Context, *ListNotificationConfigsRequest) (*ListNotificationConfigsResponse, error)
// Concatenates a list of existing objects into a new object in the same
// bucket.
ComposeObject(context.Context, *ComposeObjectRequest) (*Object, error)
@@ -10939,18 +10966,39 @@ type StorageServer interface {
// object name, the sequence of returned `persisted_size` values will be
// non-decreasing.
QueryWriteStatus(context.Context, *QueryWriteStatusRequest) (*QueryWriteStatusResponse, error)
+ // Deprecated: Do not use.
// Retrieves the name of a project's Google Cloud Storage service account.
GetServiceAccount(context.Context, *GetServiceAccountRequest) (*ServiceAccount, error)
+ // Deprecated: Do not use.
// Creates a new HMAC key for the given service account.
CreateHmacKey(context.Context, *CreateHmacKeyRequest) (*CreateHmacKeyResponse, error)
+ // Deprecated: Do not use.
// Deletes a given HMAC key. Key must be in an INACTIVE state.
DeleteHmacKey(context.Context, *DeleteHmacKeyRequest) (*emptypb.Empty, error)
+ // Deprecated: Do not use.
// Gets an existing HMAC key metadata for the given id.
GetHmacKey(context.Context, *GetHmacKeyRequest) (*HmacKeyMetadata, error)
+ // Deprecated: Do not use.
// Lists HMAC keys under a given project with the additional filters provided.
ListHmacKeys(context.Context, *ListHmacKeysRequest) (*ListHmacKeysResponse, error)
+ // Deprecated: Do not use.
// Updates a given HMAC key state between ACTIVE and INACTIVE.
UpdateHmacKey(context.Context, *UpdateHmacKeyRequest) (*HmacKeyMetadata, error)
+ // Deprecated: Do not use.
+ // Permanently deletes a NotificationConfig.
+ DeleteNotificationConfig(context.Context, *DeleteNotificationConfigRequest) (*emptypb.Empty, error)
+ // Deprecated: Do not use.
+ // View a NotificationConfig.
+ GetNotificationConfig(context.Context, *GetNotificationConfigRequest) (*NotificationConfig, error)
+ // Deprecated: Do not use.
+ // Creates a NotificationConfig for a given bucket.
+ // These NotificationConfigs, when triggered, publish messages to the
+ // specified Pub/Sub topics. See
+ // https://cloud.google.com/storage/docs/pubsub-notifications.
+ CreateNotificationConfig(context.Context, *CreateNotificationConfigRequest) (*NotificationConfig, error)
+ // Deprecated: Do not use.
+ // Retrieves a list of NotificationConfigs for a given bucket.
+ ListNotificationConfigs(context.Context, *ListNotificationConfigsRequest) (*ListNotificationConfigsResponse, error)
}
// UnimplementedStorageServer can be embedded to have forward compatible implementations.
@@ -10984,18 +11032,6 @@ func (*UnimplementedStorageServer) TestIamPermissions(context.Context, *iampb.Te
func (*UnimplementedStorageServer) UpdateBucket(context.Context, *UpdateBucketRequest) (*Bucket, error) {
return nil, status.Errorf(codes.Unimplemented, "method UpdateBucket not implemented")
}
-func (*UnimplementedStorageServer) DeleteNotificationConfig(context.Context, *DeleteNotificationConfigRequest) (*emptypb.Empty, error) {
- return nil, status.Errorf(codes.Unimplemented, "method DeleteNotificationConfig not implemented")
-}
-func (*UnimplementedStorageServer) GetNotificationConfig(context.Context, *GetNotificationConfigRequest) (*NotificationConfig, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetNotificationConfig not implemented")
-}
-func (*UnimplementedStorageServer) CreateNotificationConfig(context.Context, *CreateNotificationConfigRequest) (*NotificationConfig, error) {
- return nil, status.Errorf(codes.Unimplemented, "method CreateNotificationConfig not implemented")
-}
-func (*UnimplementedStorageServer) ListNotificationConfigs(context.Context, *ListNotificationConfigsRequest) (*ListNotificationConfigsResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method ListNotificationConfigs not implemented")
-}
func (*UnimplementedStorageServer) ComposeObject(context.Context, *ComposeObjectRequest) (*Object, error) {
return nil, status.Errorf(codes.Unimplemented, "method ComposeObject not implemented")
}
@@ -11053,6 +11089,18 @@ func (*UnimplementedStorageServer) ListHmacKeys(context.Context, *ListHmacKeysRe
func (*UnimplementedStorageServer) UpdateHmacKey(context.Context, *UpdateHmacKeyRequest) (*HmacKeyMetadata, error) {
return nil, status.Errorf(codes.Unimplemented, "method UpdateHmacKey not implemented")
}
+func (*UnimplementedStorageServer) DeleteNotificationConfig(context.Context, *DeleteNotificationConfigRequest) (*emptypb.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method DeleteNotificationConfig not implemented")
+}
+func (*UnimplementedStorageServer) GetNotificationConfig(context.Context, *GetNotificationConfigRequest) (*NotificationConfig, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetNotificationConfig not implemented")
+}
+func (*UnimplementedStorageServer) CreateNotificationConfig(context.Context, *CreateNotificationConfigRequest) (*NotificationConfig, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method CreateNotificationConfig not implemented")
+}
+func (*UnimplementedStorageServer) ListNotificationConfigs(context.Context, *ListNotificationConfigsRequest) (*ListNotificationConfigsResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ListNotificationConfigs not implemented")
+}
func RegisterStorageServer(s *grpc.Server, srv StorageServer) {
s.RegisterService(&_Storage_serviceDesc, srv)
@@ -11220,78 +11268,6 @@ func _Storage_UpdateBucket_Handler(srv interface{}, ctx context.Context, dec fun
return interceptor(ctx, in, info, handler)
}
-func _Storage_DeleteNotificationConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(DeleteNotificationConfigRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageServer).DeleteNotificationConfig(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.storage.v2.Storage/DeleteNotificationConfig",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageServer).DeleteNotificationConfig(ctx, req.(*DeleteNotificationConfigRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Storage_GetNotificationConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(GetNotificationConfigRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageServer).GetNotificationConfig(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.storage.v2.Storage/GetNotificationConfig",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageServer).GetNotificationConfig(ctx, req.(*GetNotificationConfigRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Storage_CreateNotificationConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(CreateNotificationConfigRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageServer).CreateNotificationConfig(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.storage.v2.Storage/CreateNotificationConfig",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageServer).CreateNotificationConfig(ctx, req.(*CreateNotificationConfigRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Storage_ListNotificationConfigs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(ListNotificationConfigsRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageServer).ListNotificationConfigs(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.storage.v2.Storage/ListNotificationConfigs",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageServer).ListNotificationConfigs(ctx, req.(*ListNotificationConfigsRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
func _Storage_ComposeObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ComposeObjectRequest)
if err := dec(in); err != nil {
@@ -11653,6 +11629,78 @@ func _Storage_UpdateHmacKey_Handler(srv interface{}, ctx context.Context, dec fu
return interceptor(ctx, in, info, handler)
}
+func _Storage_DeleteNotificationConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(DeleteNotificationConfigRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageServer).DeleteNotificationConfig(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.storage.v2.Storage/DeleteNotificationConfig",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageServer).DeleteNotificationConfig(ctx, req.(*DeleteNotificationConfigRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Storage_GetNotificationConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetNotificationConfigRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageServer).GetNotificationConfig(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.storage.v2.Storage/GetNotificationConfig",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageServer).GetNotificationConfig(ctx, req.(*GetNotificationConfigRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Storage_CreateNotificationConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CreateNotificationConfigRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageServer).CreateNotificationConfig(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.storage.v2.Storage/CreateNotificationConfig",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageServer).CreateNotificationConfig(ctx, req.(*CreateNotificationConfigRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Storage_ListNotificationConfigs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListNotificationConfigsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageServer).ListNotificationConfigs(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.storage.v2.Storage/ListNotificationConfigs",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageServer).ListNotificationConfigs(ctx, req.(*ListNotificationConfigsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
var _Storage_serviceDesc = grpc.ServiceDesc{
ServiceName: "google.storage.v2.Storage",
HandlerType: (*StorageServer)(nil),
@@ -11693,22 +11741,6 @@ var _Storage_serviceDesc = grpc.ServiceDesc{
MethodName: "UpdateBucket",
Handler: _Storage_UpdateBucket_Handler,
},
- {
- MethodName: "DeleteNotificationConfig",
- Handler: _Storage_DeleteNotificationConfig_Handler,
- },
- {
- MethodName: "GetNotificationConfig",
- Handler: _Storage_GetNotificationConfig_Handler,
- },
- {
- MethodName: "CreateNotificationConfig",
- Handler: _Storage_CreateNotificationConfig_Handler,
- },
- {
- MethodName: "ListNotificationConfigs",
- Handler: _Storage_ListNotificationConfigs_Handler,
- },
{
MethodName: "ComposeObject",
Handler: _Storage_ComposeObject_Handler,
@@ -11773,6 +11805,22 @@ var _Storage_serviceDesc = grpc.ServiceDesc{
MethodName: "UpdateHmacKey",
Handler: _Storage_UpdateHmacKey_Handler,
},
+ {
+ MethodName: "DeleteNotificationConfig",
+ Handler: _Storage_DeleteNotificationConfig_Handler,
+ },
+ {
+ MethodName: "GetNotificationConfig",
+ Handler: _Storage_GetNotificationConfig_Handler,
+ },
+ {
+ MethodName: "CreateNotificationConfig",
+ Handler: _Storage_CreateNotificationConfig_Handler,
+ },
+ {
+ MethodName: "ListNotificationConfigs",
+ Handler: _Storage_ListNotificationConfigs_Handler,
+ },
},
Streams: []grpc.StreamDesc{
{
diff --git a/vendor/cloud.google.com/go/storage/internal/version.go b/vendor/cloud.google.com/go/storage/internal/version.go
index e5b2de09172f5..c07789c9be782 100644
--- a/vendor/cloud.google.com/go/storage/internal/version.go
+++ b/vendor/cloud.google.com/go/storage/internal/version.go
@@ -15,4 +15,4 @@
package internal
// Version is the current tagged release of the library.
-const Version = "1.43.0"
+const Version = "1.44.0"
diff --git a/vendor/cloud.google.com/go/storage/invoke.go b/vendor/cloud.google.com/go/storage/invoke.go
index de57b4bbbbc72..99783f3df47b6 100644
--- a/vendor/cloud.google.com/go/storage/invoke.go
+++ b/vendor/cloud.google.com/go/storage/invoke.go
@@ -74,7 +74,15 @@ func run(ctx context.Context, call func(ctx context.Context) error, retry *retry
return true, fmt.Errorf("storage: retry failed after %v attempts; last error: %w", *retry.maxAttempts, err)
}
attempts++
- return !errorFunc(err), err
+ retryable := errorFunc(err)
+ // Explicitly check context cancellation so that we can distinguish between a
+ // DEADLINE_EXCEEDED error from the server and a user-set context deadline.
+ // Unfortunately gRPC will codes.DeadlineExceeded (which may be retryable if it's
+ // sent by the server) in both cases.
+ if ctxErr := ctx.Err(); errors.Is(ctxErr, context.Canceled) || errors.Is(ctxErr, context.DeadlineExceeded) {
+ retryable = false
+ }
+ return !retryable, err
})
}
@@ -84,21 +92,7 @@ func setInvocationHeaders(ctx context.Context, invocationID string, attempts int
invocationHeader := fmt.Sprintf("gccl-invocation-id/%v gccl-attempt-count/%v", invocationID, attempts)
xGoogHeader := strings.Join([]string{invocationHeader, xGoogDefaultHeader}, " ")
- // TODO: remove this once the respective transport packages merge xGoogHeader.
- // Also remove gl-go at that time, as it will be repeated.
- hdrs := callctx.HeadersFromContext(ctx)
- for _, v := range hdrs[xGoogHeaderKey] {
- xGoogHeader = strings.Join([]string{xGoogHeader, v}, " ")
- }
-
- if hdrs[xGoogHeaderKey] != nil {
- // Replace the key instead of adding it, if there was anything to merge with.
- hdrs[xGoogHeaderKey] = []string{xGoogHeader}
- } else {
- // TODO: keep this line when removing the above code.
- ctx = callctx.SetHeaders(ctx, xGoogHeaderKey, xGoogHeader)
- }
-
+ ctx = callctx.SetHeaders(ctx, xGoogHeaderKey, xGoogHeader)
ctx = callctx.SetHeaders(ctx, idempotencyHeaderKey, invocationID)
return ctx
}
@@ -138,14 +132,18 @@ func ShouldRetry(err error) bool {
return true
}
}
+ case *net.DNSError:
+ if e.IsTemporary {
+ return true
+ }
case interface{ Temporary() bool }:
if e.Temporary() {
return true
}
}
- // UNAVAILABLE, RESOURCE_EXHAUSTED, and INTERNAL codes are all retryable for gRPC.
+ // UNAVAILABLE, RESOURCE_EXHAUSTED, INTERNAL, and DEADLINE_EXCEEDED codes are all retryable for gRPC.
if st, ok := status.FromError(err); ok {
- if code := st.Code(); code == codes.Unavailable || code == codes.ResourceExhausted || code == codes.Internal {
+ if code := st.Code(); code == codes.Unavailable || code == codes.ResourceExhausted || code == codes.Internal || code == codes.DeadlineExceeded {
return true
}
}
diff --git a/vendor/cloud.google.com/go/storage/notifications.go b/vendor/cloud.google.com/go/storage/notifications.go
index 1d6cfdf5984d4..bc15900f01ead 100644
--- a/vendor/cloud.google.com/go/storage/notifications.go
+++ b/vendor/cloud.google.com/go/storage/notifications.go
@@ -21,7 +21,6 @@ import (
"regexp"
"cloud.google.com/go/internal/trace"
- "cloud.google.com/go/storage/internal/apiv2/storagepb"
raw "google.golang.org/api/storage/v1"
)
@@ -92,30 +91,6 @@ func toNotification(rn *raw.Notification) *Notification {
return n
}
-func toNotificationFromProto(pbn *storagepb.NotificationConfig) *Notification {
- n := &Notification{
- ID: pbn.GetName(),
- EventTypes: pbn.GetEventTypes(),
- ObjectNamePrefix: pbn.GetObjectNamePrefix(),
- CustomAttributes: pbn.GetCustomAttributes(),
- PayloadFormat: pbn.GetPayloadFormat(),
- }
- n.TopicProjectID, n.TopicID = parseNotificationTopic(pbn.Topic)
- return n
-}
-
-func toProtoNotification(n *Notification) *storagepb.NotificationConfig {
- return &storagepb.NotificationConfig{
- Name: n.ID,
- Topic: fmt.Sprintf("//pubsub.googleapis.com/projects/%s/topics/%s",
- n.TopicProjectID, n.TopicID),
- EventTypes: n.EventTypes,
- ObjectNamePrefix: n.ObjectNamePrefix,
- CustomAttributes: n.CustomAttributes,
- PayloadFormat: n.PayloadFormat,
- }
-}
-
var topicRE = regexp.MustCompile(`^//pubsub\.googleapis\.com/projects/([^/]+)/topics/([^/]+)`)
// parseNotificationTopic extracts the project and topic IDs from from the full
@@ -144,6 +119,7 @@ func toRawNotification(n *Notification) *raw.Notification {
// AddNotification adds a notification to b. You must set n's TopicProjectID, TopicID
// and PayloadFormat, and must not set its ID. The other fields are all optional. The
// returned Notification's ID can be used to refer to it.
+// Note: gRPC is not supported.
func (b *BucketHandle) AddNotification(ctx context.Context, n *Notification) (ret *Notification, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.AddNotification")
defer func() { trace.EndSpan(ctx, err) }()
@@ -165,6 +141,7 @@ func (b *BucketHandle) AddNotification(ctx context.Context, n *Notification) (re
// Notifications returns all the Notifications configured for this bucket, as a map
// indexed by notification ID.
+// Note: gRPC is not supported.
func (b *BucketHandle) Notifications(ctx context.Context) (n map[string]*Notification, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Notifications")
defer func() { trace.EndSpan(ctx, err) }()
@@ -182,15 +159,8 @@ func notificationsToMap(rns []*raw.Notification) map[string]*Notification {
return m
}
-func notificationsToMapFromProto(ns []*storagepb.NotificationConfig) map[string]*Notification {
- m := map[string]*Notification{}
- for _, n := range ns {
- m[n.Name] = toNotificationFromProto(n)
- }
- return m
-}
-
// DeleteNotification deletes the notification with the given ID.
+// Note: gRPC is not supported.
func (b *BucketHandle) DeleteNotification(ctx context.Context, id string) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.DeleteNotification")
defer func() { trace.EndSpan(ctx, err) }()
diff --git a/vendor/cloud.google.com/go/storage/option.go b/vendor/cloud.google.com/go/storage/option.go
index debdb0f52d510..0fc82ed590642 100644
--- a/vendor/cloud.google.com/go/storage/option.go
+++ b/vendor/cloud.google.com/go/storage/option.go
@@ -22,8 +22,9 @@ import (
// storageConfig contains the Storage client option configuration that can be
// set through storageClientOptions.
type storageConfig struct {
- useJSONforReads bool
- readAPIWasSet bool
+ useJSONforReads bool
+ readAPIWasSet bool
+ disableClientMetrics bool
}
// newStorageConfig generates a new storageConfig with all the given
@@ -78,3 +79,32 @@ func (w *withReadAPI) ApplyStorageOpt(c *storageConfig) {
c.useJSONforReads = w.useJSON
c.readAPIWasSet = true
}
+
+type withDisabledClientMetrics struct {
+ internaloption.EmbeddableAdapter
+ disabledClientMetrics bool
+}
+
+// WithDisabledClientMetrics is an option that may be passed to [NewClient].
+// gRPC metrics are enabled by default in the GCS client and will export the
+// gRPC telemetry discussed in [gRFC/66] and [gRFC/78] to
+// [Google Cloud Monitoring]. The option is used to disable metrics.
+// Google Cloud Support can use this information to more quickly diagnose
+// problems related to GCS and gRPC.
+// Sending this data does not incur any billing charges, and requires minimal
+// CPU (a single RPC every few minutes) or memory (a few KiB to batch the
+// telemetry).
+//
+// The default is to enable client metrics. To opt-out of metrics collected use
+// this option.
+//
+// [gRFC/66]: https://github.com/grpc/proposal/blob/master/A66-otel-stats.md
+// [gRFC/78]: https://github.com/grpc/proposal/blob/master/A78-grpc-metrics-wrr-pf-xds.md
+// [Google Cloud Monitoring]: https://cloud.google.com/monitoring/docs
+func WithDisabledClientMetrics() option.ClientOption {
+ return &withDisabledClientMetrics{disabledClientMetrics: true}
+}
+
+func (w *withDisabledClientMetrics) ApplyStorageOpt(c *storageConfig) {
+ c.disableClientMetrics = w.disabledClientMetrics
+}
diff --git a/vendor/cloud.google.com/go/storage/reader.go b/vendor/cloud.google.com/go/storage/reader.go
index 6da2432f004ca..e1d96659282b5 100644
--- a/vendor/cloud.google.com/go/storage/reader.go
+++ b/vendor/cloud.google.com/go/storage/reader.go
@@ -65,6 +65,19 @@ type ReaderObjectAttrs struct {
// meaningful in the context of a particular generation of a
// particular object.
Metageneration int64
+
+ // CRC32C is the CRC32 checksum of the entire object's content using the
+ // Castagnoli93 polynomial, if available.
+ CRC32C uint32
+
+ // Decompressed is true if the object is stored as a gzip file and was
+ // decompressed when read.
+ // Objects are automatically decompressed if the object's metadata property
+ // "Content-Encoding" is set to "gzip" or satisfies decompressive
+ // transcoding as per https://cloud.google.com/storage/docs/transcoding.
+ //
+ // To prevent decompression on reads, use [ObjectHandle.ReadCompressed].
+ Decompressed bool
}
// NewReader creates a new Reader to read the contents of the
@@ -91,7 +104,8 @@ func (o *ObjectHandle) NewReader(ctx context.Context) (*Reader, error) {
// If the object's metadata property "Content-Encoding" is set to "gzip" or satisfies
// decompressive transcoding per https://cloud.google.com/storage/docs/transcoding
// that file will be served back whole, regardless of the requested range as
-// Google Cloud Storage dictates.
+// Google Cloud Storage dictates. If decompressive transcoding occurs,
+// [Reader.Attrs.Decompressed] will be true.
//
// By default, reads are made using the Cloud Storage XML API. We recommend
// using the JSON API instead, which can be done by setting [WithJSONReads]
diff --git a/vendor/cloud.google.com/go/storage/storage.go b/vendor/cloud.google.com/go/storage/storage.go
index b6316fa668f9a..0754dfef0bec8 100644
--- a/vendor/cloud.google.com/go/storage/storage.go
+++ b/vendor/cloud.google.com/go/storage/storage.go
@@ -1695,7 +1695,6 @@ type Query struct {
// IncludeFoldersAsPrefixes includes Folders and Managed Folders in the set of
// prefixes returned by the query. Only applicable if Delimiter is set to /.
- // IncludeFoldersAsPrefixes is not yet implemented in the gRPC API.
IncludeFoldersAsPrefixes bool
// SoftDeleted indicates whether to list soft-deleted objects.
@@ -2350,6 +2349,7 @@ func toProtoChecksums(sendCRC32C bool, attrs *ObjectAttrs) *storagepb.ObjectChec
}
// ServiceAccount fetches the email address of the given project's Google Cloud Storage service account.
+// Note: gRPC is not supported.
func (c *Client) ServiceAccount(ctx context.Context, projectID string) (string, error) {
o := makeStorageOpts(true, c.retry, "")
return c.tc.GetServiceAccount(ctx, projectID, o...)
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/LICENSE b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/LICENSE
new file mode 100644
index 0000000000000..d645695673349
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/README.md b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/README.md
new file mode 100644
index 0000000000000..9515ee52055c5
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/README.md
@@ -0,0 +1,3 @@
+# GCP Resource detection library
+
+This is a library intended to be used by Upstream OpenTelemetry resource detectors. It exists within this repository to allow for integration testing of the detection functions in real GCP environments.
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/app_engine.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/app_engine.go
new file mode 100644
index 0000000000000..0a36807033432
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/app_engine.go
@@ -0,0 +1,76 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package gcp
+
+const (
+ // See https://cloud.google.com/appengine/docs/flexible/python/migrating#modules
+ // for the environment variables available in GAE environments.
+ gaeServiceEnv = "GAE_SERVICE"
+ gaeVersionEnv = "GAE_VERSION"
+ gaeInstanceEnv = "GAE_INSTANCE"
+ gaeEnv = "GAE_ENV"
+ gaeStandard = "standard"
+)
+
+func (d *Detector) onAppEngineStandard() bool {
+ // See https://cloud.google.com/appengine/docs/standard/go111/runtime#environment_variables.
+ env, found := d.os.LookupEnv(gaeEnv)
+ return found && env == gaeStandard
+}
+
+func (d *Detector) onAppEngine() bool {
+ _, found := d.os.LookupEnv(gaeServiceEnv)
+ return found
+}
+
+// AppEngineServiceName returns the service name of the app engine service.
+func (d *Detector) AppEngineServiceName() (string, error) {
+ if name, found := d.os.LookupEnv(gaeServiceEnv); found {
+ return name, nil
+ }
+ return "", errEnvVarNotFound
+}
+
+// AppEngineServiceVersion returns the service version of the app engine service.
+func (d *Detector) AppEngineServiceVersion() (string, error) {
+ if version, found := d.os.LookupEnv(gaeVersionEnv); found {
+ return version, nil
+ }
+ return "", errEnvVarNotFound
+}
+
+// AppEngineServiceInstance returns the service instance of the app engine service.
+func (d *Detector) AppEngineServiceInstance() (string, error) {
+ if instanceID, found := d.os.LookupEnv(gaeInstanceEnv); found {
+ return instanceID, nil
+ }
+ return "", errEnvVarNotFound
+}
+
+// AppEngineFlexAvailabilityZoneAndRegion returns the zone and region in which this program is running.
+func (d *Detector) AppEngineFlexAvailabilityZoneAndRegion() (string, string, error) {
+ // The GCE metadata server is available on App Engine Flex.
+ return d.GCEAvailabilityZoneAndRegion()
+}
+
+// AppEngineStandardAvailabilityZone returns the zone the app engine service is running in.
+func (d *Detector) AppEngineStandardAvailabilityZone() (string, error) {
+ return d.metadata.Zone()
+}
+
+// AppEngineStandardCloudRegion returns the region the app engine service is running in.
+func (d *Detector) AppEngineStandardCloudRegion() (string, error) {
+ return d.FaaSCloudRegion()
+}
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/bms.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/bms.go
new file mode 100644
index 0000000000000..d3992a4f7e417
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/bms.go
@@ -0,0 +1,55 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package gcp
+
+const (
+ bmsProjectIDEnv = "BMS_PROJECT_ID"
+ bmsRegionEnv = "BMS_REGION"
+ bmsInstanceIDEnv = "BMS_INSTANCE_ID"
+)
+
+// onBareMetalSolution checks if the code is running on a Google Cloud Bare Metal Solution (BMS) by verifying
+// the presence and non-empty values of BMS_PROJECT_ID, BMS_REGION, and BMS_INSTANCE_ID environment variables.
+// For more information on Google Cloud Bare Metal Solution, see: https://cloud.google.com/bare-metal/docs
+func (d *Detector) onBareMetalSolution() bool {
+ projectID, projectIDExists := d.os.LookupEnv(bmsProjectIDEnv)
+ region, regionExists := d.os.LookupEnv(bmsRegionEnv)
+ instanceID, instanceIDExists := d.os.LookupEnv(bmsInstanceIDEnv)
+ return projectIDExists && regionExists && instanceIDExists && projectID != "" && region != "" && instanceID != ""
+}
+
+// BareMetalSolutionInstanceID returns the instance ID from the BMS_INSTANCE_ID environment variable.
+func (d *Detector) BareMetalSolutionInstanceID() (string, error) {
+ if instanceID, found := d.os.LookupEnv(bmsInstanceIDEnv); found {
+ return instanceID, nil
+ }
+ return "", errEnvVarNotFound
+}
+
+// BareMetalSolutionCloudRegion returns the region from the BMS_REGION environment variable.
+func (d *Detector) BareMetalSolutionCloudRegion() (string, error) {
+ if region, found := d.os.LookupEnv(bmsRegionEnv); found {
+ return region, nil
+ }
+ return "", errEnvVarNotFound
+}
+
+// BareMetalSolutionProjectID returns the project ID from the BMS_PROJECT_ID environment variable.
+func (d *Detector) BareMetalSolutionProjectID() (string, error) {
+ if project, found := d.os.LookupEnv(bmsProjectIDEnv); found {
+ return project, nil
+ }
+ return "", errEnvVarNotFound
+}
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/detector.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/detector.go
new file mode 100644
index 0000000000000..2cc62de09766b
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/detector.go
@@ -0,0 +1,102 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package gcp
+
+import (
+ "errors"
+ "os"
+
+ "cloud.google.com/go/compute/metadata"
+)
+
+var errEnvVarNotFound = errors.New("environment variable not found")
+
+// NewDetector returns a *Detector which can get detect the platform,
+// and fetch attributes of the platform on which it is running.
+func NewDetector() *Detector {
+ return &Detector{metadata: metadata.NewClient(nil), os: realOSProvider{}}
+}
+
+type Platform int64
+
+const (
+ UnknownPlatform Platform = iota
+ GKE
+ GCE
+ CloudRun
+ CloudRunJob
+ CloudFunctions
+ AppEngineStandard
+ AppEngineFlex
+ BareMetalSolution
+)
+
+// CloudPlatform returns the platform on which this program is running.
+func (d *Detector) CloudPlatform() Platform {
+ switch {
+ case d.onBareMetalSolution():
+ return BareMetalSolution
+ case d.onGKE():
+ return GKE
+ case d.onCloudFunctions():
+ return CloudFunctions
+ case d.onCloudRun():
+ return CloudRun
+ case d.onCloudRunJob():
+ return CloudRunJob
+ case d.onAppEngineStandard():
+ return AppEngineStandard
+ case d.onAppEngine():
+ return AppEngineFlex
+ case d.onGCE():
+ return GCE
+ }
+ return UnknownPlatform
+}
+
+// ProjectID returns the ID of the project in which this program is running.
+func (d *Detector) ProjectID() (string, error) {
+ return d.metadata.ProjectID()
+}
+
+// Detector collects resource information for all GCP platforms.
+type Detector struct {
+ metadata metadataProvider
+ os osProvider
+}
+
+// metadataProvider contains the subset of the metadata.Client functions used
+// by this resource Detector to allow testing with a fake implementation.
+type metadataProvider interface {
+ ProjectID() (string, error)
+ InstanceID() (string, error)
+ Get(string) (string, error)
+ InstanceName() (string, error)
+ Hostname() (string, error)
+ Zone() (string, error)
+ InstanceAttributeValue(string) (string, error)
+}
+
+// osProvider contains the subset of the os package functions used by.
+type osProvider interface {
+ LookupEnv(string) (string, bool)
+}
+
+// realOSProvider uses the os package to lookup env vars.
+type realOSProvider struct{}
+
+func (realOSProvider) LookupEnv(env string) (string, bool) {
+ return os.LookupEnv(env)
+}
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/faas.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/faas.go
new file mode 100644
index 0000000000000..9277608dd6fc5
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/faas.go
@@ -0,0 +1,105 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package gcp
+
+import (
+ "strings"
+)
+
+const (
+ // Cloud Functions env vars:
+ // https://cloud.google.com/functions/docs/configuring/env-var#newer_runtimes
+ //
+ // Cloud Run env vars:
+ // https://cloud.google.com/run/docs/container-contract#services-env-vars
+ //
+ // Cloud Run jobs env vars:
+ // https://cloud.google.com/run/docs/container-contract#jobs-env-vars
+ cloudFunctionsTargetEnv = "FUNCTION_TARGET"
+ cloudRunConfigurationEnv = "K_CONFIGURATION"
+ cloudRunJobsEnv = "CLOUD_RUN_JOB"
+ faasServiceEnv = "K_SERVICE"
+ faasRevisionEnv = "K_REVISION"
+ cloudRunJobExecutionEnv = "CLOUD_RUN_EXECUTION"
+ cloudRunJobTaskIndexEnv = "CLOUD_RUN_TASK_INDEX"
+ regionMetadataAttr = "instance/region"
+)
+
+func (d *Detector) onCloudFunctions() bool {
+ _, found := d.os.LookupEnv(cloudFunctionsTargetEnv)
+ return found
+}
+
+func (d *Detector) onCloudRun() bool {
+ _, found := d.os.LookupEnv(cloudRunConfigurationEnv)
+ return found
+}
+
+func (d *Detector) onCloudRunJob() bool {
+ _, found := d.os.LookupEnv(cloudRunJobsEnv)
+ return found
+}
+
+// FaaSName returns the name of the Cloud Run, Cloud Run jobs or Cloud Functions service.
+func (d *Detector) FaaSName() (string, error) {
+ if name, found := d.os.LookupEnv(faasServiceEnv); found {
+ return name, nil
+ }
+ if name, found := d.os.LookupEnv(cloudRunJobsEnv); found {
+ return name, nil
+ }
+ return "", errEnvVarNotFound
+}
+
+// FaaSVersion returns the revision of the Cloud Run or Cloud Functions service.
+func (d *Detector) FaaSVersion() (string, error) {
+ if version, found := d.os.LookupEnv(faasRevisionEnv); found {
+ return version, nil
+ }
+ return "", errEnvVarNotFound
+}
+
+// CloudRunJobExecution returns the execution id of the Cloud Run jobs.
+func (d *Detector) CloudRunJobExecution() (string, error) {
+ if eid, found := d.os.LookupEnv(cloudRunJobExecutionEnv); found {
+ return eid, nil
+ }
+ return "", errEnvVarNotFound
+}
+
+// CloudRunJobTaskIndex returns the task index for the execution of the Cloud Run jobs.
+func (d *Detector) CloudRunJobTaskIndex() (string, error) {
+ if tidx, found := d.os.LookupEnv(cloudRunJobTaskIndexEnv); found {
+ return tidx, nil
+ }
+ return "", errEnvVarNotFound
+}
+
+// FaaSID returns the instance id of the Cloud Run or Cloud Function.
+func (d *Detector) FaaSID() (string, error) {
+ return d.metadata.InstanceID()
+}
+
+// FaaSCloudRegion detects region from the metadata server.
+// It is in the format /projects//regions/.
+//
+// https://cloud.google.com/run/docs/reference/container-contract#metadata-server
+func (d *Detector) FaaSCloudRegion() (string, error) {
+ region, err := d.metadata.Get(regionMetadataAttr)
+ if err != nil {
+ return "", err
+ }
+ return region[strings.LastIndex(region, "/")+1:], nil
+}
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/gce.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/gce.go
new file mode 100644
index 0000000000000..37259fc451bdd
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/gce.go
@@ -0,0 +1,75 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package gcp
+
+import (
+ "fmt"
+ "strings"
+)
+
+// See the available GCE instance metadata:
+// https://cloud.google.com/compute/docs/metadata/default-metadata-values#vm_instance_metadata
+const machineTypeMetadataAttr = "instance/machine-type"
+
+func (d *Detector) onGCE() bool {
+ _, err := d.metadata.Get(machineTypeMetadataAttr)
+ return err == nil
+}
+
+// GCEHostType returns the machine type of the instance on which this program is running.
+func (d *Detector) GCEHostType() (string, error) {
+ return d.metadata.Get(machineTypeMetadataAttr)
+}
+
+// GCEHostID returns the instance ID of the instance on which this program is running.
+func (d *Detector) GCEHostID() (string, error) {
+ return d.metadata.InstanceID()
+}
+
+// GCEHostName returns the instance name of the instance on which this program is running.
+// Recommended to use GCEInstanceName() or GCEInstanceHostname() to more accurately reflect which
+// value is returned.
+func (d *Detector) GCEHostName() (string, error) {
+ return d.metadata.InstanceName()
+}
+
+// GCEInstanceName returns the instance name of the instance on which this program is running.
+// This is the value visible in the Cloud Console UI, and the prefix for the default hostname
+// of the instance as defined by the default internal DNS name (see https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names).
+func (d *Detector) GCEInstanceName() (string, error) {
+ return d.metadata.InstanceName()
+}
+
+// GCEInstanceHostname returns the full value of the default or custom hostname of the instance
+// on which this program is running. See https://cloud.google.com/compute/docs/instances/custom-hostname-vm.
+func (d *Detector) GCEInstanceHostname() (string, error) {
+ return d.metadata.Hostname()
+}
+
+// GCEAvailabilityZoneAndRegion returns the zone and region in which this program is running.
+func (d *Detector) GCEAvailabilityZoneAndRegion() (string, string, error) {
+ zone, err := d.metadata.Zone()
+ if err != nil {
+ return "", "", err
+ }
+ if zone == "" {
+ return "", "", fmt.Errorf("no zone detected from GCE metadata server")
+ }
+ splitZone := strings.SplitN(zone, "-", 3)
+ if len(splitZone) != 3 {
+ return "", "", fmt.Errorf("zone was not in the expected format: country-region-zone. Got %v", zone)
+ }
+ return zone, strings.Join(splitZone[0:2], "-"), nil
+}
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/gke.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/gke.go
new file mode 100644
index 0000000000000..67ed972b2326d
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/gke.go
@@ -0,0 +1,70 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package gcp
+
+import (
+ "fmt"
+ "strings"
+)
+
+const (
+ // If the kubernetes.default.svc service exists in the cluster,
+ // then the KUBERNETES_SERVICE_HOST env var will be populated.
+ // Use this as an indication that we are running on kubernetes.
+ k8sServiceHostEnv = "KUBERNETES_SERVICE_HOST"
+ // See the available GKE metadata:
+ // https://cloud.google.com/kubernetes-engine/docs/concepts/workload-identity#instance_metadata
+ clusterNameMetadataAttr = "cluster-name"
+ clusterLocationMetadataAttr = "cluster-location"
+)
+
+func (d *Detector) onGKE() bool {
+ _, found := d.os.LookupEnv(k8sServiceHostEnv)
+ return found
+}
+
+// GKEHostID returns the instance ID of the instance on which this program is running.
+func (d *Detector) GKEHostID() (string, error) {
+ return d.GCEHostID()
+}
+
+// GKEClusterName returns the name if the GKE cluster in which this program is running.
+func (d *Detector) GKEClusterName() (string, error) {
+ return d.metadata.InstanceAttributeValue(clusterNameMetadataAttr)
+}
+
+type LocationType int64
+
+const (
+ UndefinedLocation LocationType = iota
+ Zone
+ Region
+)
+
+// GKEAvailabilityZoneOrRegion returns the location of the cluster and whether the cluster is zonal or regional.
+func (d *Detector) GKEAvailabilityZoneOrRegion() (string, LocationType, error) {
+ clusterLocation, err := d.metadata.InstanceAttributeValue(clusterLocationMetadataAttr)
+ if err != nil {
+ return "", UndefinedLocation, err
+ }
+ switch strings.Count(clusterLocation, "-") {
+ case 1:
+ return clusterLocation, Region, nil
+ case 2:
+ return clusterLocation, Zone, nil
+ default:
+ return "", UndefinedLocation, fmt.Errorf("unrecognized format for cluster location: %v", clusterLocation)
+ }
+}
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/LICENSE b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/LICENSE
new file mode 100644
index 0000000000000..d645695673349
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/README.md b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/README.md
new file mode 100644
index 0000000000000..c77d5eb154461
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/README.md
@@ -0,0 +1,37 @@
+# OpenTelemetry Google Cloud Monitoring Exporter
+
+[](https://pkg.go.dev/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric)
+[![Apache License][license-image]][license-url]
+
+OpenTelemetry Google Cloud Monitoring Exporter allow the user to send collected metrics to Google Cloud.
+
+[Google Cloud Monitoring](https://cloud.google.com/monitoring) provides visibility into the performance, uptime, and overall health of cloud-powered applications. It collects metrics, events, and metadata from Google Cloud, Amazon Web Services, hosted uptime probes, application instrumentation, and a variety of common application components including Cassandra, Nginx, Apache Web Server, Elasticsearch, and many others. Operations ingests that data and generates insights via dashboards, charts, and alerts. Cloud Monitoring alerting helps you collaborate by integrating with Slack, PagerDuty, and more.
+
+## Setup
+
+Google Cloud Monitoring is a managed service provided by Google Cloud Platform. Google Cloud Monitoring requires to set up "Workspace" in advance. The guide to create a new Workspace is available on [the official document](https://cloud.google.com/monitoring/workspaces/create).
+
+## Authentication
+
+The Google Cloud Monitoring exporter depends upon [`google.FindDefaultCredentials`](https://pkg.go.dev/golang.org/x/oauth2/google?tab=doc#FindDefaultCredentials), so the service account is automatically detected by default, but also the custom credential file (so called `service_account_key.json`) can be detected with specific conditions. Quoting from the document of `google.FindDefaultCredentials`:
+
+* A JSON file whose path is specified by the `GOOGLE_APPLICATION_CREDENTIALS` environment variable.
+* A JSON file in a location known to the gcloud command-line tool. On Windows, this is `%APPDATA%/gcloud/application_default_credentials.json`. On other systems, `$HOME/.config/gcloud/application_default_credentials.json`.
+
+When running code locally, you may need to specify a Google Project ID in addition to `GOOGLE_APPLICATION_CREDENTIALS`. This is best done using an environment variable (e.g. `GOOGLE_CLOUD_PROJECT`) and the `metric.WithProjectID` method, e.g.:
+
+```golang
+projectID := os.Getenv("GOOGLE_CLOUD_PROJECT")
+opts := []mexporter.Option{
+ mexporter.WithProjectID(projectID),
+}
+```
+
+## Useful links
+
+* For more information on OpenTelemetry, visit: https://opentelemetry.io/
+* For more about OpenTelemetry Go, visit: https://github.com/open-telemetry/opentelemetry-go
+* Learn more about Google Cloud Monitoring at https://cloud.google.com/monitoring
+
+[license-url]: https://github.com/GoogleCloudPlatform/opentelemetry-operations-go/blob/main/LICENSE
+[license-image]: https://img.shields.io/badge/license-Apache_2.0-green.svg?style=flat
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/cloudmonitoring.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/cloudmonitoring.go
new file mode 100644
index 0000000000000..90dfcb344e329
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/cloudmonitoring.go
@@ -0,0 +1,49 @@
+// Copyright 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ sdkmetric "go.opentelemetry.io/otel/sdk/metric"
+
+ monitoring "cloud.google.com/go/monitoring/apiv3/v2"
+ "golang.org/x/oauth2/google"
+)
+
+// New creates a new Exporter thats implements metric.Exporter.
+func New(opts ...Option) (sdkmetric.Exporter, error) {
+ o := options{
+ context: context.Background(),
+ resourceAttributeFilter: DefaultResourceAttributesFilter,
+ }
+ for _, opt := range opts {
+ opt(&o)
+ }
+
+ if o.projectID == "" {
+ creds, err := google.FindDefaultCredentials(o.context, monitoring.DefaultAuthScopes()...)
+ if err != nil {
+ return nil, fmt.Errorf("failed to find Google Cloud credentials: %v", err)
+ }
+ if creds.ProjectID == "" {
+ return nil, errors.New("google cloud monitoring: no project found with application default credentials")
+ }
+ o.projectID = creds.ProjectID
+ }
+ return newMetricExporter(&o)
+}
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/constants.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/constants.go
new file mode 100644
index 0000000000000..57329a4bdc30e
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/constants.go
@@ -0,0 +1,97 @@
+// Copyright 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric
+
+// TODO: remove this file when the constants are ready in the Go SDK
+
+// Mappings for the well-known OpenTelemetry resource label keys
+// to applicable Monitored Resource label keys.
+// A uniquely identifying name for the Kubernetes cluster. Kubernetes
+// does not have cluster names as an internal concept so this may be
+// set to any meaningful value within the environment. For example,
+// GKE clusters have a name which can be used for this label.
+const (
+ // Deprecated: use semconv.CloudProviderKey instead.
+ CloudKeyProvider = "cloud.provider"
+ // Deprecated: use semconv.CloudAccountIDKey instead.
+ CloudKeyAccountID = "cloud.account.id"
+ // Deprecated: use semconv.CloudRegionKey instead.
+ CloudKeyRegion = "cloud.region"
+ // Deprecated: use semconv.CloudAvailabilityZoneKey instead.
+ CloudKeyZone = "cloud.availability_zone"
+
+ // Deprecated: use semconv.ServiceNamespaceKey instead.
+ ServiceKeyNamespace = "service.namespace"
+ // Deprecated: use semconv.ServiceInstanceIDKey instead.
+ ServiceKeyInstanceID = "service.instance.id"
+ // Deprecated: use semconv.ServiceNameKey instead.
+ ServiceKeyName = "service.name"
+
+ // Deprecated: HostType is not needed.
+ HostType = "host"
+ // A uniquely identifying name for the host.
+ // Deprecated: use semconv.HostNameKey instead.
+ HostKeyName = "host.name"
+ // A hostname as returned by the 'hostname' command on host machine.
+ // Deprecated: HostKeyHostName is not needed.
+ HostKeyHostName = "host.hostname"
+ // Deprecated: use semconv.HostIDKey instead.
+ HostKeyID = "host.id"
+ // Deprecated: use semconv.HostTypeKey instead.
+ HostKeyType = "host.type"
+
+ // A uniquely identifying name for the Container.
+ // Deprecated: use semconv.ContainerNameKey instead.
+ ContainerKeyName = "container.name"
+ // Deprecated: use semconv.ContainerImageNameKey instead.
+ ContainerKeyImageName = "container.image.name"
+ // Deprecated: use semconv.ContainerImageTagKey instead.
+ ContainerKeyImageTag = "container.image.tag"
+
+ // Cloud Providers
+ // Deprecated: use semconv.CloudProviderAWS instead.
+ CloudProviderAWS = "aws"
+ // Deprecated: use semconv.CloudProviderGCP instead.
+ CloudProviderGCP = "gcp"
+ // Deprecated: use semconv.CloudProviderAzure instead.
+ CloudProviderAZURE = "azure"
+
+ // Deprecated: Use "k8s" instead. This should not be needed.
+ K8S = "k8s"
+ // Deprecated: use semconv.K8SClusterNameKey instead.
+ K8SKeyClusterName = "k8s.cluster.name"
+ // Deprecated: use semconv.K8SNamespaceNameKey instead.
+ K8SKeyNamespaceName = "k8s.namespace.name"
+ // Deprecated: use semconv.K8SPodNameKey instead.
+ K8SKeyPodName = "k8s.pod.name"
+ // Deprecated: use semconv.K8SDeploymentNameKey instead.
+ K8SKeyDeploymentName = "k8s.deployment.name"
+
+ // Monitored Resources types
+ // Deprecated: Use "k8s_container" instead.
+ K8SContainer = "k8s_container"
+ // Deprecated: Use "k8s_node" instead.
+ K8SNode = "k8s_node"
+ // Deprecated: Use "k8s_pod" instead.
+ K8SPod = "k8s_pod"
+ // Deprecated: Use "k8s_cluster" instead.
+ K8SCluster = "k8s_cluster"
+ // Deprecated: Use "gce_instance" instead.
+ GCEInstance = "gce_instance"
+ // Deprecated: Use "aws_ec2_instance" instead.
+ AWSEC2Instance = "aws_ec2_instance"
+ // Deprecated: Use "generic_task" instead.
+ GenericTask = "generic_task"
+)
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/error.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/error.go
new file mode 100644
index 0000000000000..974c0af950873
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/error.go
@@ -0,0 +1,32 @@
+// Copyright 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric
+
+import (
+ "errors"
+ "fmt"
+)
+
+var (
+ errBlankProjectID = errors.New("expecting a non-blank ProjectID")
+)
+
+type errUnexpectedAggregationKind struct {
+ kind string
+}
+
+func (e errUnexpectedAggregationKind) Error() string {
+ return fmt.Sprintf("the metric kind is unexpected: %v", e.kind)
+}
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/metric.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/metric.go
new file mode 100644
index 0000000000000..ba0012e25a916
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/metric.go
@@ -0,0 +1,890 @@
+// Copyright 2021 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric
+
+import (
+ "bytes"
+ "context"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "math"
+ "net/url"
+ "reflect"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+ "unicode"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/instrumentation"
+ "go.opentelemetry.io/otel/sdk/metric"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+ "go.opentelemetry.io/otel/sdk/resource"
+ "go.opentelemetry.io/otel/trace"
+
+ monitoring "cloud.google.com/go/monitoring/apiv3/v2"
+ "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb"
+ "github.com/googleapis/gax-go/v2"
+ "google.golang.org/api/option"
+ "google.golang.org/genproto/googleapis/api/distribution"
+ "google.golang.org/genproto/googleapis/api/label"
+ googlemetricpb "google.golang.org/genproto/googleapis/api/metric"
+ monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/encoding/gzip"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/protobuf/types/known/anypb"
+ "google.golang.org/protobuf/types/known/timestamppb"
+
+ "github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping"
+)
+
+const (
+ // The number of timeserieses to send to GCM in a single request. This
+ // is a hard limit in the GCM API, so we never want to exceed 200.
+ sendBatchSize = 200
+
+ cloudMonitoringMetricDescriptorNameFormat = "workload.googleapis.com/%s"
+ platformMappingMonitoredResourceKey = "gcp.resource_type"
+)
+
+// key is used to judge the uniqueness of the record descriptor.
+type key struct {
+ name string
+ libraryname string
+}
+
+func keyOf(metrics metricdata.Metrics, library instrumentation.Library) key {
+ return key{
+ name: metrics.Name,
+ libraryname: library.Name,
+ }
+}
+
+// metricExporter is the implementation of OpenTelemetry metric exporter for
+// Google Cloud Monitoring.
+type metricExporter struct {
+ o *options
+ shutdown chan struct{}
+ // mdCache is the cache to hold MetricDescriptor to avoid creating duplicate MD.
+ mdCache map[key]*googlemetricpb.MetricDescriptor
+ client *monitoring.MetricClient
+ mdLock sync.RWMutex
+ shutdownOnce sync.Once
+}
+
+// ForceFlush does nothing, the exporter holds no state.
+func (e *metricExporter) ForceFlush(ctx context.Context) error { return ctx.Err() }
+
+// Shutdown shuts down the client connections.
+func (e *metricExporter) Shutdown(ctx context.Context) error {
+ err := errShutdown
+ e.shutdownOnce.Do(func() {
+ close(e.shutdown)
+ err = errors.Join(ctx.Err(), e.client.Close())
+ })
+ return err
+}
+
+// newMetricExporter returns an exporter that uploads OTel metric data to Google Cloud Monitoring.
+func newMetricExporter(o *options) (*metricExporter, error) {
+ if strings.TrimSpace(o.projectID) == "" {
+ return nil, errBlankProjectID
+ }
+
+ clientOpts := append([]option.ClientOption{option.WithGRPCDialOption(grpc.WithUserAgent(userAgent))}, o.monitoringClientOptions...)
+ ctx := o.context
+ if ctx == nil {
+ ctx = context.Background()
+ }
+ client, err := monitoring.NewMetricClient(ctx, clientOpts...)
+ if err != nil {
+ return nil, err
+ }
+
+ if o.compression == "gzip" {
+ client.CallOptions.GetMetricDescriptor = append(client.CallOptions.GetMetricDescriptor,
+ gax.WithGRPCOptions(grpc.UseCompressor(gzip.Name)))
+ client.CallOptions.CreateMetricDescriptor = append(client.CallOptions.CreateMetricDescriptor,
+ gax.WithGRPCOptions(grpc.UseCompressor(gzip.Name)))
+ client.CallOptions.CreateTimeSeries = append(client.CallOptions.CreateTimeSeries,
+ gax.WithGRPCOptions(grpc.UseCompressor(gzip.Name)))
+ client.CallOptions.CreateServiceTimeSeries = append(client.CallOptions.CreateServiceTimeSeries,
+ gax.WithGRPCOptions(grpc.UseCompressor(gzip.Name)))
+ }
+
+ cache := map[key]*googlemetricpb.MetricDescriptor{}
+ e := &metricExporter{
+ o: o,
+ mdCache: cache,
+ client: client,
+ shutdown: make(chan struct{}),
+ }
+ return e, nil
+}
+
+var errShutdown = fmt.Errorf("exporter is shutdown")
+
+// Export exports OpenTelemetry Metrics to Google Cloud Monitoring.
+func (me *metricExporter) Export(ctx context.Context, rm *metricdata.ResourceMetrics) error {
+ select {
+ case <-me.shutdown:
+ return errShutdown
+ default:
+ }
+
+ if me.o.destinationProjectQuota {
+ ctx = metadata.NewOutgoingContext(ctx, metadata.New(map[string]string{"x-goog-user-project": strings.TrimPrefix(me.o.projectID, "projects/")}))
+ }
+ return errors.Join(
+ me.exportMetricDescriptor(ctx, rm),
+ me.exportTimeSeries(ctx, rm),
+ )
+}
+
+// Temporality returns the Temporality to use for an instrument kind.
+func (me *metricExporter) Temporality(ik metric.InstrumentKind) metricdata.Temporality {
+ return metric.DefaultTemporalitySelector(ik)
+}
+
+// Aggregation returns the Aggregation to use for an instrument kind.
+func (me *metricExporter) Aggregation(ik metric.InstrumentKind) metric.Aggregation {
+ return metric.DefaultAggregationSelector(ik)
+}
+
+// exportMetricDescriptor create MetricDescriptor from the record
+// if the descriptor is not registered in Cloud Monitoring yet.
+func (me *metricExporter) exportMetricDescriptor(ctx context.Context, rm *metricdata.ResourceMetrics) error {
+ // We only send metric descriptors if we're configured *and* we're not sending service timeseries.
+ if me.o.disableCreateMetricDescriptors {
+ return nil
+ }
+
+ me.mdLock.Lock()
+ defer me.mdLock.Unlock()
+ mds := make(map[key]*googlemetricpb.MetricDescriptor)
+ extraLabels := me.extraLabelsFromResource(rm.Resource)
+ for _, scope := range rm.ScopeMetrics {
+ for _, metrics := range scope.Metrics {
+ k := keyOf(metrics, scope.Scope)
+
+ if _, ok := me.mdCache[k]; ok {
+ continue
+ }
+
+ if _, localok := mds[k]; !localok {
+ md := me.recordToMdpb(metrics, extraLabels)
+ mds[k] = md
+ }
+ }
+ }
+
+ // TODO: This process is synchronous and blocks longer time if records in cps
+ // have many different descriptors. In the cps.ForEach above, it should spawn
+ // goroutines to send CreateMetricDescriptorRequest asynchronously in the case
+ // the descriptor does not exist in global cache (me.mdCache).
+ // See details in #26.
+ var errs []error
+ for kmd, md := range mds {
+ err := me.createMetricDescriptorIfNeeded(ctx, md)
+ if err == nil {
+ me.mdCache[kmd] = md
+ }
+ errs = append(errs, err)
+ }
+ return errors.Join(errs...)
+}
+
+func (me *metricExporter) createMetricDescriptorIfNeeded(ctx context.Context, md *googlemetricpb.MetricDescriptor) error {
+ mdReq := &monitoringpb.GetMetricDescriptorRequest{
+ Name: fmt.Sprintf("projects/%s/metricDescriptors/%s", me.o.projectID, md.Type),
+ }
+ _, err := me.client.GetMetricDescriptor(ctx, mdReq)
+ if err == nil {
+ // If the metric descriptor already exists, skip the CreateMetricDescriptor call.
+ // Metric descriptors cannot be updated without deleting them first, so there
+ // isn't anything we can do here:
+ // https://cloud.google.com/monitoring/custom-metrics/creating-metrics#md-modify
+ return nil
+ }
+ req := &monitoringpb.CreateMetricDescriptorRequest{
+ Name: fmt.Sprintf("projects/%s", me.o.projectID),
+ MetricDescriptor: md,
+ }
+ _, err = me.client.CreateMetricDescriptor(ctx, req)
+ return err
+}
+
+// exportTimeSeries create TimeSeries from the records in cps.
+// res should be the common resource among all TimeSeries, such as instance id, application name and so on.
+func (me *metricExporter) exportTimeSeries(ctx context.Context, rm *metricdata.ResourceMetrics) error {
+ tss, err := me.recordsToTspbs(rm)
+ if len(tss) == 0 {
+ return err
+ }
+
+ name := fmt.Sprintf("projects/%s", me.o.projectID)
+
+ errs := []error{err}
+ for i := 0; i < len(tss); i += sendBatchSize {
+ j := i + sendBatchSize
+ if j >= len(tss) {
+ j = len(tss)
+ }
+
+ // TODO: When this exporter is rewritten, support writing to multiple
+ // projects based on the "gcp.project.id" resource.
+ req := &monitoringpb.CreateTimeSeriesRequest{
+ Name: name,
+ TimeSeries: tss[i:j],
+ }
+ if me.o.createServiceTimeSeries {
+ errs = append(errs, me.client.CreateServiceTimeSeries(ctx, req))
+ } else {
+ errs = append(errs, me.client.CreateTimeSeries(ctx, req))
+ }
+ }
+
+ return errors.Join(errs...)
+}
+
+func (me *metricExporter) extraLabelsFromResource(res *resource.Resource) *attribute.Set {
+ set, _ := attribute.NewSetWithFiltered(res.Attributes(), me.o.resourceAttributeFilter)
+ return &set
+}
+
+// descToMetricType converts descriptor to MetricType proto type.
+// Basically this returns default value ("workload.googleapis.com/[metric type]").
+func (me *metricExporter) descToMetricType(desc metricdata.Metrics) string {
+ if formatter := me.o.metricDescriptorTypeFormatter; formatter != nil {
+ return formatter(desc)
+ }
+ return fmt.Sprintf(cloudMonitoringMetricDescriptorNameFormat, desc.Name)
+}
+
+// metricTypeToDisplayName takes a GCM metric type, like (workload.googleapis.com/MyCoolMetric) and returns the display name.
+func metricTypeToDisplayName(mURL string) string {
+ // strip domain, keep path after domain.
+ u, err := url.Parse(fmt.Sprintf("metrics://%s", mURL))
+ if err != nil || u.Path == "" {
+ return mURL
+ }
+ return strings.TrimLeft(u.Path, "/")
+}
+
+// recordToMdpb extracts data and converts them to googlemetricpb.MetricDescriptor.
+func (me *metricExporter) recordToMdpb(metrics metricdata.Metrics, extraLabels *attribute.Set) *googlemetricpb.MetricDescriptor {
+ name := metrics.Name
+ typ := me.descToMetricType(metrics)
+ kind, valueType := recordToMdpbKindType(metrics.Data)
+
+ // Detailed explanations on MetricDescriptor proto is not documented on
+ // generated Go packages. Refer to the original proto file.
+ // https://github.com/googleapis/googleapis/blob/50af053/google/api/metric.proto#L33
+ return &googlemetricpb.MetricDescriptor{
+ Name: name,
+ DisplayName: metricTypeToDisplayName(typ),
+ Type: typ,
+ MetricKind: kind,
+ ValueType: valueType,
+ Unit: string(metrics.Unit),
+ Description: metrics.Description,
+ Labels: labelDescriptors(metrics, extraLabels),
+ }
+}
+
+func labelDescriptors(metrics metricdata.Metrics, extraLabels *attribute.Set) []*label.LabelDescriptor {
+ labels := []*label.LabelDescriptor{}
+ seenKeys := map[string]struct{}{}
+ addAttributes := func(attr *attribute.Set) {
+ iter := attr.Iter()
+ for iter.Next() {
+ kv := iter.Attribute()
+ // Skip keys that have already been set
+ if _, ok := seenKeys[normalizeLabelKey(string(kv.Key))]; ok {
+ continue
+ }
+ labels = append(labels, &label.LabelDescriptor{
+ Key: normalizeLabelKey(string(kv.Key)),
+ })
+ seenKeys[normalizeLabelKey(string(kv.Key))] = struct{}{}
+ }
+ }
+ addAttributes(extraLabels)
+ switch a := metrics.Data.(type) {
+ case metricdata.Gauge[int64]:
+ for _, pt := range a.DataPoints {
+ addAttributes(&pt.Attributes)
+ }
+ case metricdata.Gauge[float64]:
+ for _, pt := range a.DataPoints {
+ addAttributes(&pt.Attributes)
+ }
+ case metricdata.Sum[int64]:
+ for _, pt := range a.DataPoints {
+ addAttributes(&pt.Attributes)
+ }
+ case metricdata.Sum[float64]:
+ for _, pt := range a.DataPoints {
+ addAttributes(&pt.Attributes)
+ }
+ case metricdata.Histogram[float64]:
+ for _, pt := range a.DataPoints {
+ addAttributes(&pt.Attributes)
+ }
+ case metricdata.Histogram[int64]:
+ for _, pt := range a.DataPoints {
+ addAttributes(&pt.Attributes)
+ }
+ }
+ return labels
+}
+
+type attributes struct {
+ attrs attribute.Set
+}
+
+func (attrs *attributes) GetString(key string) (string, bool) {
+ value, ok := attrs.attrs.Value(attribute.Key(key))
+ return value.AsString(), ok
+}
+
+// resourceToMonitoredResourcepb converts resource in OTel to MonitoredResource
+// proto type for Cloud Monitoring.
+//
+// https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.monitoredResourceDescriptors
+func (me *metricExporter) resourceToMonitoredResourcepb(res *resource.Resource) *monitoredrespb.MonitoredResource {
+ platformMrType, platformMappingRequested := res.Set().Value(platformMappingMonitoredResourceKey)
+
+ // check if platform mapping is requested and possible
+ if platformMappingRequested && platformMrType.AsString() == me.o.monitoredResourceDescription.mrType {
+ // assemble attributes required to construct this MR
+ attributeMap := make(map[string]string)
+ for expectedLabel := range me.o.monitoredResourceDescription.mrLabels {
+ value, found := res.Set().Value(attribute.Key(expectedLabel))
+ if found {
+ attributeMap[expectedLabel] = value.AsString()
+ }
+ }
+ return &monitoredrespb.MonitoredResource{
+ Type: platformMrType.AsString(),
+ Labels: attributeMap,
+ }
+ }
+
+ gmr := resourcemapping.ResourceAttributesToMonitoringMonitoredResource(&attributes{
+ attrs: attribute.NewSet(res.Attributes()...),
+ })
+ newLabels := make(map[string]string, len(gmr.Labels))
+ for k, v := range gmr.Labels {
+ newLabels[k] = sanitizeUTF8(v)
+ }
+ mr := &monitoredrespb.MonitoredResource{
+ Type: gmr.Type,
+ Labels: newLabels,
+ }
+ return mr
+}
+
+// recordToMdpbKindType return the mapping from OTel's record descriptor to
+// Cloud Monitoring's MetricKind and ValueType.
+func recordToMdpbKindType(a metricdata.Aggregation) (googlemetricpb.MetricDescriptor_MetricKind, googlemetricpb.MetricDescriptor_ValueType) {
+ switch agg := a.(type) {
+ case metricdata.Gauge[int64]:
+ return googlemetricpb.MetricDescriptor_GAUGE, googlemetricpb.MetricDescriptor_INT64
+ case metricdata.Gauge[float64]:
+ return googlemetricpb.MetricDescriptor_GAUGE, googlemetricpb.MetricDescriptor_DOUBLE
+ case metricdata.Sum[int64]:
+ if agg.IsMonotonic {
+ return googlemetricpb.MetricDescriptor_CUMULATIVE, googlemetricpb.MetricDescriptor_INT64
+ }
+ return googlemetricpb.MetricDescriptor_GAUGE, googlemetricpb.MetricDescriptor_INT64
+ case metricdata.Sum[float64]:
+ if agg.IsMonotonic {
+ return googlemetricpb.MetricDescriptor_CUMULATIVE, googlemetricpb.MetricDescriptor_DOUBLE
+ }
+ return googlemetricpb.MetricDescriptor_GAUGE, googlemetricpb.MetricDescriptor_DOUBLE
+ case metricdata.Histogram[int64], metricdata.Histogram[float64]:
+ return googlemetricpb.MetricDescriptor_CUMULATIVE, googlemetricpb.MetricDescriptor_DISTRIBUTION
+ default:
+ return googlemetricpb.MetricDescriptor_METRIC_KIND_UNSPECIFIED, googlemetricpb.MetricDescriptor_VALUE_TYPE_UNSPECIFIED
+ }
+}
+
+// recordToMpb converts data from records to Metric proto type for Cloud Monitoring.
+func (me *metricExporter) recordToMpb(metrics metricdata.Metrics, attributes attribute.Set, library instrumentation.Library, extraLabels *attribute.Set) *googlemetricpb.Metric {
+ me.mdLock.RLock()
+ defer me.mdLock.RUnlock()
+ k := keyOf(metrics, library)
+ md, ok := me.mdCache[k]
+ if !ok {
+ md = me.recordToMdpb(metrics, extraLabels)
+ }
+
+ labels := make(map[string]string)
+ addAttributes := func(attr *attribute.Set) {
+ iter := attr.Iter()
+ for iter.Next() {
+ kv := iter.Attribute()
+ labels[normalizeLabelKey(string(kv.Key))] = sanitizeUTF8(kv.Value.Emit())
+ }
+ }
+ addAttributes(extraLabels)
+ addAttributes(&attributes)
+
+ return &googlemetricpb.Metric{
+ Type: md.Type,
+ Labels: labels,
+ }
+}
+
+// recordToTspb converts record to TimeSeries proto type with common resource.
+// ref. https://cloud.google.com/monitoring/api/ref_v3/rest/v3/TimeSeries
+func (me *metricExporter) recordToTspb(m metricdata.Metrics, mr *monitoredrespb.MonitoredResource, library instrumentation.Scope, extraLabels *attribute.Set) ([]*monitoringpb.TimeSeries, error) {
+ var tss []*monitoringpb.TimeSeries
+ var errs []error
+ if m.Data == nil {
+ return nil, nil
+ }
+ switch a := m.Data.(type) {
+ case metricdata.Gauge[int64]:
+ for _, point := range a.DataPoints {
+ ts, err := gaugeToTimeSeries[int64](point, m, mr)
+ if err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ ts.Metric = me.recordToMpb(m, point.Attributes, library, extraLabels)
+ tss = append(tss, ts)
+ }
+ case metricdata.Gauge[float64]:
+ for _, point := range a.DataPoints {
+ ts, err := gaugeToTimeSeries[float64](point, m, mr)
+ if err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ ts.Metric = me.recordToMpb(m, point.Attributes, library, extraLabels)
+ tss = append(tss, ts)
+ }
+ case metricdata.Sum[int64]:
+ for _, point := range a.DataPoints {
+ var ts *monitoringpb.TimeSeries
+ var err error
+ if a.IsMonotonic {
+ ts, err = sumToTimeSeries[int64](point, m, mr)
+ } else {
+ // Send non-monotonic sums as gauges
+ ts, err = gaugeToTimeSeries[int64](point, m, mr)
+ }
+ if err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ ts.Metric = me.recordToMpb(m, point.Attributes, library, extraLabels)
+ tss = append(tss, ts)
+ }
+ case metricdata.Sum[float64]:
+ for _, point := range a.DataPoints {
+ var ts *monitoringpb.TimeSeries
+ var err error
+ if a.IsMonotonic {
+ ts, err = sumToTimeSeries[float64](point, m, mr)
+ } else {
+ // Send non-monotonic sums as gauges
+ ts, err = gaugeToTimeSeries[float64](point, m, mr)
+ }
+ if err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ ts.Metric = me.recordToMpb(m, point.Attributes, library, extraLabels)
+ tss = append(tss, ts)
+ }
+ case metricdata.Histogram[int64]:
+ for _, point := range a.DataPoints {
+ ts, err := histogramToTimeSeries(point, m, mr, me.o.enableSumOfSquaredDeviation, me.o.projectID)
+ if err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ ts.Metric = me.recordToMpb(m, point.Attributes, library, extraLabels)
+ tss = append(tss, ts)
+ }
+ case metricdata.Histogram[float64]:
+ for _, point := range a.DataPoints {
+ ts, err := histogramToTimeSeries(point, m, mr, me.o.enableSumOfSquaredDeviation, me.o.projectID)
+ if err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ ts.Metric = me.recordToMpb(m, point.Attributes, library, extraLabels)
+ tss = append(tss, ts)
+ }
+ case metricdata.ExponentialHistogram[int64]:
+ for _, point := range a.DataPoints {
+ ts, err := expHistogramToTimeSeries(point, m, mr, me.o.enableSumOfSquaredDeviation, me.o.projectID)
+ if err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ ts.Metric = me.recordToMpb(m, point.Attributes, library, extraLabels)
+ tss = append(tss, ts)
+ }
+ case metricdata.ExponentialHistogram[float64]:
+ for _, point := range a.DataPoints {
+ ts, err := expHistogramToTimeSeries(point, m, mr, me.o.enableSumOfSquaredDeviation, me.o.projectID)
+ if err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ ts.Metric = me.recordToMpb(m, point.Attributes, library, extraLabels)
+ tss = append(tss, ts)
+ }
+ default:
+ errs = append(errs, errUnexpectedAggregationKind{kind: reflect.TypeOf(m.Data).String()})
+ }
+ return tss, errors.Join(errs...)
+}
+
+func (me *metricExporter) recordsToTspbs(rm *metricdata.ResourceMetrics) ([]*monitoringpb.TimeSeries, error) {
+ mr := me.resourceToMonitoredResourcepb(rm.Resource)
+ extraLabels := me.extraLabelsFromResource(rm.Resource)
+
+ var (
+ tss []*monitoringpb.TimeSeries
+ errs []error
+ )
+ for _, scope := range rm.ScopeMetrics {
+ for _, metrics := range scope.Metrics {
+ ts, err := me.recordToTspb(metrics, mr, scope.Scope, extraLabels)
+ errs = append(errs, err)
+ tss = append(tss, ts...)
+ }
+ }
+
+ return tss, errors.Join(errs...)
+}
+
+func sanitizeUTF8(s string) string {
+ return strings.ToValidUTF8(s, "�")
+}
+
+func gaugeToTimeSeries[N int64 | float64](point metricdata.DataPoint[N], metrics metricdata.Metrics, mr *monitoredrespb.MonitoredResource) (*monitoringpb.TimeSeries, error) {
+ value, valueType := numberDataPointToValue(point)
+ timestamp := timestamppb.New(point.Time)
+ if err := timestamp.CheckValid(); err != nil {
+ return nil, err
+ }
+ return &monitoringpb.TimeSeries{
+ Resource: mr,
+ Unit: string(metrics.Unit),
+ MetricKind: googlemetricpb.MetricDescriptor_GAUGE,
+ ValueType: valueType,
+ Points: []*monitoringpb.Point{{
+ Interval: &monitoringpb.TimeInterval{
+ EndTime: timestamp,
+ },
+ Value: value,
+ }},
+ }, nil
+}
+
+func sumToTimeSeries[N int64 | float64](point metricdata.DataPoint[N], metrics metricdata.Metrics, mr *monitoredrespb.MonitoredResource) (*monitoringpb.TimeSeries, error) {
+ interval, err := toNonemptyTimeIntervalpb(point.StartTime, point.Time)
+ if err != nil {
+ return nil, err
+ }
+ value, valueType := numberDataPointToValue[N](point)
+ return &monitoringpb.TimeSeries{
+ Resource: mr,
+ Unit: string(metrics.Unit),
+ MetricKind: googlemetricpb.MetricDescriptor_CUMULATIVE,
+ ValueType: valueType,
+ Points: []*monitoringpb.Point{{
+ Interval: interval,
+ Value: value,
+ }},
+ }, nil
+}
+
+// TODO(@dashpole): Refactor to pass control-coupling lint check.
+//
+//nolint:revive
+func histogramToTimeSeries[N int64 | float64](point metricdata.HistogramDataPoint[N], metrics metricdata.Metrics, mr *monitoredrespb.MonitoredResource, enableSOSD bool, projectID string) (*monitoringpb.TimeSeries, error) {
+ interval, err := toNonemptyTimeIntervalpb(point.StartTime, point.Time)
+ if err != nil {
+ return nil, err
+ }
+ distributionValue := histToDistribution(point, projectID)
+ if enableSOSD {
+ setSumOfSquaredDeviation(point, distributionValue)
+ }
+ return &monitoringpb.TimeSeries{
+ Resource: mr,
+ Unit: string(metrics.Unit),
+ MetricKind: googlemetricpb.MetricDescriptor_CUMULATIVE,
+ ValueType: googlemetricpb.MetricDescriptor_DISTRIBUTION,
+ Points: []*monitoringpb.Point{{
+ Interval: interval,
+ Value: &monitoringpb.TypedValue{
+ Value: &monitoringpb.TypedValue_DistributionValue{
+ DistributionValue: distributionValue,
+ },
+ },
+ }},
+ }, nil
+}
+
+func expHistogramToTimeSeries[N int64 | float64](point metricdata.ExponentialHistogramDataPoint[N], metrics metricdata.Metrics, mr *monitoredrespb.MonitoredResource, enableSOSD bool, projectID string) (*monitoringpb.TimeSeries, error) {
+ interval, err := toNonemptyTimeIntervalpb(point.StartTime, point.Time)
+ if err != nil {
+ return nil, err
+ }
+ distributionValue := expHistToDistribution(point, projectID)
+ // TODO: Implement "setSumOfSquaredDeviationExpHist" for parameter "enableSOSD" functionality.
+ return &monitoringpb.TimeSeries{
+ Resource: mr,
+ Unit: string(metrics.Unit),
+ MetricKind: googlemetricpb.MetricDescriptor_CUMULATIVE,
+ ValueType: googlemetricpb.MetricDescriptor_DISTRIBUTION,
+ Points: []*monitoringpb.Point{{
+ Interval: interval,
+ Value: &monitoringpb.TypedValue{
+ Value: &monitoringpb.TypedValue_DistributionValue{
+ DistributionValue: distributionValue,
+ },
+ },
+ }},
+ }, nil
+}
+
+func toNonemptyTimeIntervalpb(start, end time.Time) (*monitoringpb.TimeInterval, error) {
+ // The end time of a new interval must be at least a millisecond after the end time of the
+ // previous interval, for all non-gauge types.
+ // https://cloud.google.com/monitoring/api/ref_v3/rpc/google.monitoring.v3#timeinterval
+ if end.Sub(start).Milliseconds() <= 1 {
+ end = start.Add(time.Millisecond)
+ }
+ startpb := timestamppb.New(start)
+ endpb := timestamppb.New(end)
+ err := errors.Join(
+ startpb.CheckValid(),
+ endpb.CheckValid(),
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ return &monitoringpb.TimeInterval{
+ StartTime: startpb,
+ EndTime: endpb,
+ }, nil
+}
+
+func histToDistribution[N int64 | float64](hist metricdata.HistogramDataPoint[N], projectID string) *distribution.Distribution {
+ counts := make([]int64, len(hist.BucketCounts))
+ for i, v := range hist.BucketCounts {
+ counts[i] = int64(v)
+ }
+ var mean float64
+ if !math.IsNaN(float64(hist.Sum)) && hist.Count > 0 { // Avoid divide-by-zero
+ mean = float64(hist.Sum) / float64(hist.Count)
+ }
+ return &distribution.Distribution{
+ Count: int64(hist.Count),
+ Mean: mean,
+ BucketCounts: counts,
+ BucketOptions: &distribution.Distribution_BucketOptions{
+ Options: &distribution.Distribution_BucketOptions_ExplicitBuckets{
+ ExplicitBuckets: &distribution.Distribution_BucketOptions_Explicit{
+ Bounds: hist.Bounds,
+ },
+ },
+ },
+ Exemplars: toDistributionExemplar[N](hist.Exemplars, projectID),
+ }
+}
+
+func expHistToDistribution[N int64 | float64](hist metricdata.ExponentialHistogramDataPoint[N], projectID string) *distribution.Distribution {
+ // First calculate underflow bucket with all negatives + zeros.
+ underflow := hist.ZeroCount
+ negativeBuckets := hist.NegativeBucket.Counts
+ for i := 0; i < len(negativeBuckets); i++ {
+ underflow += negativeBuckets[i]
+ }
+
+ // Next, pull in remaining buckets.
+ counts := make([]int64, len(hist.PositiveBucket.Counts)+2)
+ bucketOptions := &distribution.Distribution_BucketOptions{}
+ counts[0] = int64(underflow)
+ positiveBuckets := hist.PositiveBucket.Counts
+ for i := 0; i < len(positiveBuckets); i++ {
+ counts[i+1] = int64(positiveBuckets[i])
+ }
+ // Overflow bucket is always empty
+ counts[len(counts)-1] = 0
+
+ if len(hist.PositiveBucket.Counts) == 0 {
+ // We cannot send exponential distributions with no positive buckets,
+ // instead we send a simple overflow/underflow histogram.
+ bucketOptions.Options = &distribution.Distribution_BucketOptions_ExplicitBuckets{
+ ExplicitBuckets: &distribution.Distribution_BucketOptions_Explicit{
+ Bounds: []float64{0},
+ },
+ }
+ } else {
+ // Exponential histogram
+ growth := math.Exp2(math.Exp2(-float64(hist.Scale)))
+ scale := math.Pow(growth, float64(hist.PositiveBucket.Offset))
+ bucketOptions.Options = &distribution.Distribution_BucketOptions_ExponentialBuckets{
+ ExponentialBuckets: &distribution.Distribution_BucketOptions_Exponential{
+ GrowthFactor: growth,
+ Scale: scale,
+ NumFiniteBuckets: int32(len(counts) - 2),
+ },
+ }
+ }
+
+ var mean float64
+ if !math.IsNaN(float64(hist.Sum)) && hist.Count > 0 { // Avoid divide-by-zero
+ mean = float64(hist.Sum) / float64(hist.Count)
+ }
+
+ return &distribution.Distribution{
+ Count: int64(hist.Count),
+ Mean: mean,
+ BucketCounts: counts,
+ BucketOptions: bucketOptions,
+ Exemplars: toDistributionExemplar[N](hist.Exemplars, projectID),
+ }
+}
+
+func toDistributionExemplar[N int64 | float64](Exemplars []metricdata.Exemplar[N], projectID string) []*distribution.Distribution_Exemplar {
+ var exemplars []*distribution.Distribution_Exemplar
+ for _, e := range Exemplars {
+ attachments := []*anypb.Any{}
+ if hasValidSpanContext(e) {
+ sctx, err := anypb.New(&monitoringpb.SpanContext{
+ SpanName: fmt.Sprintf("projects/%s/traces/%s/spans/%s", projectID, hex.EncodeToString(e.TraceID[:]), hex.EncodeToString(e.SpanID[:])),
+ })
+ if err == nil {
+ attachments = append(attachments, sctx)
+ }
+ }
+ if len(e.FilteredAttributes) > 0 {
+ attr, err := anypb.New(&monitoringpb.DroppedLabels{
+ Label: attributesToLabels(e.FilteredAttributes),
+ })
+ if err == nil {
+ attachments = append(attachments, attr)
+ }
+ }
+ exemplars = append(exemplars, &distribution.Distribution_Exemplar{
+ Value: float64(e.Value),
+ Timestamp: timestamppb.New(e.Time),
+ Attachments: attachments,
+ })
+ }
+ sort.Slice(exemplars, func(i, j int) bool {
+ return exemplars[i].Value < exemplars[j].Value
+ })
+ return exemplars
+}
+
+func attributesToLabels(attrs []attribute.KeyValue) map[string]string {
+ labels := make(map[string]string, len(attrs))
+ for _, attr := range attrs {
+ labels[normalizeLabelKey(string(attr.Key))] = sanitizeUTF8(attr.Value.Emit())
+ }
+ return labels
+}
+
+var (
+ nilTraceID trace.TraceID
+ nilSpanID trace.SpanID
+)
+
+func hasValidSpanContext[N int64 | float64](e metricdata.Exemplar[N]) bool {
+ return !bytes.Equal(e.TraceID[:], nilTraceID[:]) && !bytes.Equal(e.SpanID[:], nilSpanID[:])
+}
+
+func setSumOfSquaredDeviation[N int64 | float64](hist metricdata.HistogramDataPoint[N], dist *distribution.Distribution) {
+ var prevBound float64
+ // Calculate the sum of squared deviation.
+ for i := 0; i < len(hist.Bounds); i++ {
+ // Assume all points in the bucket occur at the middle of the bucket range
+ middleOfBucket := (prevBound + hist.Bounds[i]) / 2
+ dist.SumOfSquaredDeviation += float64(dist.BucketCounts[i]) * (middleOfBucket - dist.Mean) * (middleOfBucket - dist.Mean)
+ prevBound = hist.Bounds[i]
+ }
+ // The infinity bucket is an implicit +Inf bound after the list of explicit bounds.
+ // Assume points in the infinity bucket are at the top of the previous bucket
+ middleOfInfBucket := prevBound
+ if len(dist.BucketCounts) > 0 {
+ dist.SumOfSquaredDeviation += float64(dist.BucketCounts[len(dist.BucketCounts)-1]) * (middleOfInfBucket - dist.Mean) * (middleOfInfBucket - dist.Mean)
+ }
+}
+
+func numberDataPointToValue[N int64 | float64](
+ point metricdata.DataPoint[N],
+) (*monitoringpb.TypedValue, googlemetricpb.MetricDescriptor_ValueType) {
+ switch v := any(point.Value).(type) {
+ case int64:
+ return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{
+ Int64Value: v,
+ }},
+ googlemetricpb.MetricDescriptor_INT64
+ case float64:
+ return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DoubleValue{
+ DoubleValue: v,
+ }},
+ googlemetricpb.MetricDescriptor_DOUBLE
+ }
+ // It is impossible to reach this statement
+ return nil, googlemetricpb.MetricDescriptor_INT64
+}
+
+// https://github.com/googleapis/googleapis/blob/c4c562f89acce603fb189679836712d08c7f8584/google/api/metric.proto#L149
+//
+// > The label key name must follow:
+// >
+// > * Only upper and lower-case letters, digits and underscores (_) are
+// > allowed.
+// > * Label name must start with a letter or digit.
+// > * The maximum length of a label name is 100 characters.
+//
+// Note: this does not truncate if a label is too long.
+func normalizeLabelKey(s string) string {
+ if len(s) == 0 {
+ return s
+ }
+ s = strings.Map(sanitizeRune, s)
+ if unicode.IsDigit(rune(s[0])) {
+ s = "key_" + s
+ }
+ return s
+}
+
+// converts anything that is not a letter or digit to an underscore.
+func sanitizeRune(r rune) rune {
+ if unicode.IsLetter(r) || unicode.IsDigit(r) {
+ return r
+ }
+ // Everything else turns into an underscore
+ return '_'
+}
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/option.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/option.go
new file mode 100644
index 0000000000000..11b96067d557d
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/option.go
@@ -0,0 +1,201 @@
+// Copyright 2020-2021 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric
+
+import (
+ "context"
+ "fmt"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+ semconv "go.opentelemetry.io/otel/semconv/v1.24.0"
+
+ apioption "google.golang.org/api/option"
+)
+
+var userAgent = fmt.Sprintf("opentelemetry-go %s; google-cloud-metric-exporter %s", otel.Version(), Version())
+
+// MonitoredResourceDescription is the struct which holds information required to map OTel resource to specific
+// Google Cloud MonitoredResource.
+type MonitoredResourceDescription struct {
+ mrLabels map[string]struct{}
+ mrType string
+}
+
+// Option is function type that is passed to the exporter initialization function.
+type Option func(*options)
+
+// options is the struct to hold options for metricExporter and its client instance.
+type options struct {
+ // context allows you to provide a custom context for API calls.
+ //
+ // This context will be used several times: first, to create Cloud Monitoring
+ // clients, and then every time a new batch of metrics needs to be uploaded.
+ //
+ // If unset, context.Background() will be used.
+ context context.Context
+ // metricDescriptorTypeFormatter is the custom formtter for the MetricDescriptor.Type.
+ // By default, the format string is "workload.googleapis.com/[metric name]".
+ metricDescriptorTypeFormatter func(metricdata.Metrics) string
+ // resourceAttributeFilter determinies which resource attributes to
+ // add to metrics as metric labels. By default, it adds service.name,
+ // service.namespace, and service.instance.id.
+ resourceAttributeFilter attribute.Filter
+ // monitoredResourceDescription sets whether to attempt mapping the OTel Resource to a specific
+ // Google Cloud Monitored Resource. When provided, the exporter attempts to map only to the provided
+ // monitored resource type.
+ monitoredResourceDescription MonitoredResourceDescription
+ // projectID is the identifier of the Cloud Monitoring
+ // project the user is uploading the stats data to.
+ // If not set, this will default to your "Application Default Credentials".
+ // For details see: https://developers.google.com/accounts/docs/application-default-credentials.
+ //
+ // It will be used in the project_id label of a Google Cloud Monitoring monitored
+ // resource if the resource does not inherently belong to a specific
+ // project, e.g. on-premise resource like k8s_container or generic_task.
+ projectID string
+ // compression enables gzip compression on gRPC calls.
+ compression string
+ // monitoringClientOptions are additional options to be passed
+ // to the underlying Stackdriver Monitoring API client.
+ // Optional.
+ monitoringClientOptions []apioption.ClientOption
+ // destinationProjectQuota sets whether the request should use quota from
+ // the destination project for the request.
+ destinationProjectQuota bool
+
+ // disableCreateMetricDescriptors disables automatic MetricDescriptor creation
+ disableCreateMetricDescriptors bool
+
+ // enableSumOfSquaredDeviation enables calculation of an estimated sum of squared
+ // deviation. It isn't correct, so we don't send it by default.
+ enableSumOfSquaredDeviation bool
+
+ // createServiceTimeSeries sets whether to create timeseries using `CreateServiceTimeSeries`.
+ // Implicitly, this sets `disableCreateMetricDescriptors` to true.
+ createServiceTimeSeries bool
+}
+
+// WithProjectID sets Google Cloud Platform project as projectID.
+// Without using this option, it automatically detects the project ID
+// from the default credential detection process.
+// Please find the detailed order of the default credentail detection proecess on the doc:
+// https://godoc.org/golang.org/x/oauth2/google#FindDefaultCredentials
+func WithProjectID(id string) func(o *options) {
+ return func(o *options) {
+ o.projectID = id
+ }
+}
+
+// WithDestinationProjectQuota enables per-request usage of the destination
+// project's quota. For example, when setting gcp.project.id on a metric.
+func WithDestinationProjectQuota() func(o *options) {
+ return func(o *options) {
+ o.destinationProjectQuota = true
+ }
+}
+
+// WithMonitoringClientOptions add the options for Cloud Monitoring client instance.
+// Available options are defined in.
+func WithMonitoringClientOptions(opts ...apioption.ClientOption) func(o *options) {
+ return func(o *options) {
+ o.monitoringClientOptions = append(o.monitoringClientOptions, opts...)
+ }
+}
+
+// WithMetricDescriptorTypeFormatter sets the custom formatter for MetricDescriptor.
+// Note that the format has to follow the convention defined in the official document.
+// The default is "workload.googleapis.com/[metric name]".
+// ref. https://cloud.google.com/monitoring/custom-metrics/creating-metrics#custom_metric_names
+func WithMetricDescriptorTypeFormatter(f func(metricdata.Metrics) string) func(o *options) {
+ return func(o *options) {
+ o.metricDescriptorTypeFormatter = f
+ }
+}
+
+// WithFilteredResourceAttributes determinies which resource attributes to
+// add to metrics as metric labels. By default, it adds service.name,
+// service.namespace, and service.instance.id. This is recommended to avoid
+// writing duplicate timeseries against the same monitored resource. Use
+// WithFilteredResourceAttributes(NoAttributes()) to disable the addition of
+// resource attributes to metric labels.
+func WithFilteredResourceAttributes(filter attribute.Filter) func(o *options) {
+ return func(o *options) {
+ o.resourceAttributeFilter = filter
+ }
+}
+
+// DefaultResourceAttributesFilter is the default filter applied to resource
+// attributes.
+func DefaultResourceAttributesFilter(kv attribute.KeyValue) bool {
+ return (kv.Key == semconv.ServiceNameKey ||
+ kv.Key == semconv.ServiceNamespaceKey ||
+ kv.Key == semconv.ServiceInstanceIDKey) && len(kv.Value.AsString()) > 0
+}
+
+// NoAttributes can be passed to WithFilteredResourceAttributes to disable
+// adding resource attributes as metric labels.
+func NoAttributes(attribute.KeyValue) bool {
+ return false
+}
+
+// WithDisableCreateMetricDescriptors will disable the automatic creation of
+// MetricDescriptors when an unknown metric is set to be exported.
+func WithDisableCreateMetricDescriptors() func(o *options) {
+ return func(o *options) {
+ o.disableCreateMetricDescriptors = true
+ }
+}
+
+// WithCompression sets the compression to use for gRPC requests.
+func WithCompression(c string) func(o *options) {
+ return func(o *options) {
+ o.compression = c
+ }
+}
+
+// WithSumOfSquaredDeviation sets the SumOfSquaredDeviation field on histograms.
+// It is an estimate, and is not the actual sum of squared deviations.
+func WithSumOfSquaredDeviation() func(o *options) {
+ return func(o *options) {
+ o.enableSumOfSquaredDeviation = true
+ }
+}
+
+// WithCreateServiceTimeSeries configures the exporter to use `CreateServiceTimeSeries` for creating timeseries.
+// If this is used, metric descriptors are not exported.
+func WithCreateServiceTimeSeries() func(o *options) {
+ return func(o *options) {
+ o.createServiceTimeSeries = true
+ o.disableCreateMetricDescriptors = true
+ }
+}
+
+// WithMonitoredResourceDescription configures the exporter to attempt to map the OpenTelemetry Resource to the provided
+// Google MonitoredResource. The provided mrLabels would be searched for in the OpenTelemetry Resource Attributes and if
+// found, would be included in the MonitoredResource labels.
+func WithMonitoredResourceDescription(mrType string, mrLabels []string) func(o *options) {
+ return func(o *options) {
+ mrLabelSet := make(map[string]struct{})
+ for _, label := range mrLabels {
+ mrLabelSet[label] = struct{}{}
+ }
+ o.monitoredResourceDescription = MonitoredResourceDescription{
+ mrType: mrType,
+ mrLabels: mrLabelSet,
+ }
+ }
+}
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/version.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/version.go
new file mode 100644
index 0000000000000..e31119fc1293f
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/version.go
@@ -0,0 +1,21 @@
+// Copyright 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric
+
+// Version is the current release version of the OpenTelemetry
+// Operations Metric Exporter in use.
+func Version() string {
+ return "0.48.1"
+}
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping/LICENSE b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping/LICENSE
new file mode 100644
index 0000000000000..d645695673349
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping/resourcemapping.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping/resourcemapping.go
new file mode 100644
index 0000000000000..4b5af517fe62d
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping/resourcemapping.go
@@ -0,0 +1,286 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package resourcemapping
+
+import (
+ "strings"
+
+ semconv "go.opentelemetry.io/otel/semconv/v1.24.0"
+ monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres"
+)
+
+const (
+ ProjectIDAttributeKey = "gcp.project.id"
+
+ awsAccount = "aws_account"
+ awsEc2Instance = "aws_ec2_instance"
+ clusterName = "cluster_name"
+ containerName = "container_name"
+ gceInstance = "gce_instance"
+ genericNode = "generic_node"
+ genericTask = "generic_task"
+ instanceID = "instance_id"
+ job = "job"
+ k8sCluster = "k8s_cluster"
+ k8sContainer = "k8s_container"
+ k8sNode = "k8s_node"
+ k8sPod = "k8s_pod"
+ location = "location"
+ namespace = "namespace"
+ namespaceName = "namespace_name"
+ nodeID = "node_id"
+ nodeName = "node_name"
+ podName = "pod_name"
+ region = "region"
+ taskID = "task_id"
+ zone = "zone"
+ gaeInstance = "gae_instance"
+ gaeApp = "gae_app"
+ gaeModuleID = "module_id"
+ gaeVersionID = "version_id"
+ cloudRunRevision = "cloud_run_revision"
+ cloudFunction = "cloud_function"
+ cloudFunctionName = "function_name"
+ serviceName = "service_name"
+ configurationName = "configuration_name"
+ revisionName = "revision_name"
+ bmsInstance = "baremetalsolution.googleapis.com/Instance"
+ unknownServicePrefix = "unknown_service"
+)
+
+var (
+ // monitoredResourceMappings contains mappings of GCM resource label keys onto mapping config from OTel
+ // resource for a given monitored resource type.
+ monitoredResourceMappings = map[string]map[string]struct {
+ // If none of the otelKeys are present in the Resource, fallback to this literal value
+ fallbackLiteral string
+ // OTel resource keys to try and populate the resource label from. For entries with
+ // multiple OTel resource keys, the keys' values will be coalesced in order until there
+ // is a non-empty value.
+ otelKeys []string
+ }{
+ gceInstance: {
+ zone: {otelKeys: []string{string(semconv.CloudAvailabilityZoneKey)}},
+ instanceID: {otelKeys: []string{string(semconv.HostIDKey)}},
+ },
+ k8sContainer: {
+ location: {otelKeys: []string{
+ string(semconv.CloudAvailabilityZoneKey),
+ string(semconv.CloudRegionKey),
+ }},
+ clusterName: {otelKeys: []string{string(semconv.K8SClusterNameKey)}},
+ namespaceName: {otelKeys: []string{string(semconv.K8SNamespaceNameKey)}},
+ podName: {otelKeys: []string{string(semconv.K8SPodNameKey)}},
+ containerName: {otelKeys: []string{string(semconv.K8SContainerNameKey)}},
+ },
+ k8sPod: {
+ location: {otelKeys: []string{
+ string(semconv.CloudAvailabilityZoneKey),
+ string(semconv.CloudRegionKey),
+ }},
+ clusterName: {otelKeys: []string{string(semconv.K8SClusterNameKey)}},
+ namespaceName: {otelKeys: []string{string(semconv.K8SNamespaceNameKey)}},
+ podName: {otelKeys: []string{string(semconv.K8SPodNameKey)}},
+ },
+ k8sNode: {
+ location: {otelKeys: []string{
+ string(semconv.CloudAvailabilityZoneKey),
+ string(semconv.CloudRegionKey),
+ }},
+ clusterName: {otelKeys: []string{string(semconv.K8SClusterNameKey)}},
+ nodeName: {otelKeys: []string{string(semconv.K8SNodeNameKey)}},
+ },
+ k8sCluster: {
+ location: {otelKeys: []string{
+ string(semconv.CloudAvailabilityZoneKey),
+ string(semconv.CloudRegionKey),
+ }},
+ clusterName: {otelKeys: []string{string(semconv.K8SClusterNameKey)}},
+ },
+ gaeInstance: {
+ location: {otelKeys: []string{
+ string(semconv.CloudAvailabilityZoneKey),
+ string(semconv.CloudRegionKey),
+ }},
+ gaeModuleID: {otelKeys: []string{string(semconv.FaaSNameKey)}},
+ gaeVersionID: {otelKeys: []string{string(semconv.FaaSVersionKey)}},
+ instanceID: {otelKeys: []string{string(semconv.FaaSInstanceKey)}},
+ },
+ gaeApp: {
+ location: {otelKeys: []string{
+ string(semconv.CloudAvailabilityZoneKey),
+ string(semconv.CloudRegionKey),
+ }},
+ gaeModuleID: {otelKeys: []string{string(semconv.FaaSNameKey)}},
+ gaeVersionID: {otelKeys: []string{string(semconv.FaaSVersionKey)}},
+ },
+ awsEc2Instance: {
+ instanceID: {otelKeys: []string{string(semconv.HostIDKey)}},
+ region: {
+ otelKeys: []string{
+ string(semconv.CloudAvailabilityZoneKey),
+ string(semconv.CloudRegionKey),
+ },
+ },
+ awsAccount: {otelKeys: []string{string(semconv.CloudAccountIDKey)}},
+ },
+ bmsInstance: {
+ location: {otelKeys: []string{string(semconv.CloudRegionKey)}},
+ instanceID: {otelKeys: []string{string(semconv.HostIDKey)}},
+ },
+ genericTask: {
+ location: {
+ otelKeys: []string{
+ string(semconv.CloudAvailabilityZoneKey),
+ string(semconv.CloudRegionKey),
+ },
+ fallbackLiteral: "global",
+ },
+ namespace: {otelKeys: []string{string(semconv.ServiceNamespaceKey)}},
+ job: {otelKeys: []string{string(semconv.ServiceNameKey), string(semconv.FaaSNameKey)}},
+ taskID: {otelKeys: []string{string(semconv.ServiceInstanceIDKey), string(semconv.FaaSInstanceKey)}},
+ },
+ genericNode: {
+ location: {
+ otelKeys: []string{
+ string(semconv.CloudAvailabilityZoneKey),
+ string(semconv.CloudRegionKey),
+ },
+ fallbackLiteral: "global",
+ },
+ namespace: {otelKeys: []string{string(semconv.ServiceNamespaceKey)}},
+ nodeID: {otelKeys: []string{string(semconv.HostIDKey), string(semconv.HostNameKey)}},
+ },
+ }
+)
+
+// ReadOnlyAttributes is an interface to abstract between pulling attributes from PData library or OTEL SDK.
+type ReadOnlyAttributes interface {
+ GetString(string) (string, bool)
+}
+
+// ResourceAttributesToLoggingMonitoredResource converts from a set of OTEL resource attributes into a
+// GCP monitored resource type and label set for Cloud Logging.
+// E.g.
+// This may output `gce_instance` type with appropriate labels.
+func ResourceAttributesToLoggingMonitoredResource(attrs ReadOnlyAttributes) *monitoredrespb.MonitoredResource {
+ cloudPlatform, _ := attrs.GetString(string(semconv.CloudPlatformKey))
+ switch cloudPlatform {
+ case semconv.CloudPlatformGCPAppEngine.Value.AsString():
+ return createMonitoredResource(gaeApp, attrs)
+ default:
+ return commonResourceAttributesToMonitoredResource(cloudPlatform, attrs)
+ }
+}
+
+// ResourceAttributesToMonitoringMonitoredResource converts from a set of OTEL resource attributes into a
+// GCP monitored resource type and label set for Cloud Monitoring
+// E.g.
+// This may output `gce_instance` type with appropriate labels.
+func ResourceAttributesToMonitoringMonitoredResource(attrs ReadOnlyAttributes) *monitoredrespb.MonitoredResource {
+ cloudPlatform, _ := attrs.GetString(string(semconv.CloudPlatformKey))
+ switch cloudPlatform {
+ case semconv.CloudPlatformGCPAppEngine.Value.AsString():
+ return createMonitoredResource(gaeInstance, attrs)
+ default:
+ return commonResourceAttributesToMonitoredResource(cloudPlatform, attrs)
+ }
+}
+
+func commonResourceAttributesToMonitoredResource(cloudPlatform string, attrs ReadOnlyAttributes) *monitoredrespb.MonitoredResource {
+ switch cloudPlatform {
+ case semconv.CloudPlatformGCPComputeEngine.Value.AsString():
+ return createMonitoredResource(gceInstance, attrs)
+ case semconv.CloudPlatformAWSEC2.Value.AsString():
+ return createMonitoredResource(awsEc2Instance, attrs)
+ // TODO(alex-basinov): replace this string literal with semconv.CloudPlatformGCPBareMetalSolution
+ // once https://github.com/open-telemetry/semantic-conventions/pull/64 makes its way
+ // into the semconv module.
+ case "gcp_bare_metal_solution":
+ return createMonitoredResource(bmsInstance, attrs)
+ default:
+ // if k8s.cluster.name is set, pattern match for various k8s resources.
+ // this will also match non-cloud k8s platforms like minikube.
+ if _, ok := attrs.GetString(string(semconv.K8SClusterNameKey)); ok {
+ // Try for most to least specific k8s_container, k8s_pod, etc
+ if _, ok := attrs.GetString(string(semconv.K8SContainerNameKey)); ok {
+ return createMonitoredResource(k8sContainer, attrs)
+ } else if _, ok := attrs.GetString(string(semconv.K8SPodNameKey)); ok {
+ return createMonitoredResource(k8sPod, attrs)
+ } else if _, ok := attrs.GetString(string(semconv.K8SNodeNameKey)); ok {
+ return createMonitoredResource(k8sNode, attrs)
+ } else {
+ return createMonitoredResource(k8sCluster, attrs)
+ }
+ }
+
+ // Fallback to generic_task
+ _, hasServiceName := attrs.GetString(string(semconv.ServiceNameKey))
+ _, hasFaaSName := attrs.GetString(string(semconv.FaaSNameKey))
+ _, hasServiceInstanceID := attrs.GetString(string(semconv.ServiceInstanceIDKey))
+ _, hasFaaSInstance := attrs.GetString(string(semconv.FaaSInstanceKey))
+ if (hasServiceName && hasServiceInstanceID) || (hasFaaSInstance && hasFaaSName) {
+ return createMonitoredResource(genericTask, attrs)
+ }
+
+ // Everything else fallback to generic_node
+ return createMonitoredResource(genericNode, attrs)
+ }
+}
+
+func createMonitoredResource(
+ monitoredResourceType string,
+ resourceAttrs ReadOnlyAttributes,
+) *monitoredrespb.MonitoredResource {
+ mappings := monitoredResourceMappings[monitoredResourceType]
+ mrLabels := make(map[string]string, len(mappings))
+
+ for mrKey, mappingConfig := range mappings {
+ mrValue := ""
+ ok := false
+ // Coalesce the possible keys in order
+ for _, otelKey := range mappingConfig.otelKeys {
+ mrValue, ok = resourceAttrs.GetString(otelKey)
+ if mrValue != "" && !strings.HasPrefix(mrValue, unknownServicePrefix) {
+ break
+ }
+ }
+ if mrValue == "" && contains(mappingConfig.otelKeys, string(semconv.ServiceNameKey)) {
+ // the service name started with unknown_service, and was ignored above
+ mrValue, ok = resourceAttrs.GetString(string(semconv.ServiceNameKey))
+ }
+ if !ok || mrValue == "" {
+ mrValue = mappingConfig.fallbackLiteral
+ }
+ mrLabels[mrKey] = sanitizeUTF8(mrValue)
+ }
+ return &monitoredrespb.MonitoredResource{
+ Type: monitoredResourceType,
+ Labels: mrLabels,
+ }
+}
+
+func contains(list []string, element string) bool {
+ for _, item := range list {
+ if item == element {
+ return true
+ }
+ }
+ return false
+}
+
+func sanitizeUTF8(s string) string {
+ return strings.ToValidUTF8(s, "�")
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/cel.pb.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/cel.pb.go
index 0b702e5d0612e..7299227a3d952 100644
--- a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/cel.pb.go
+++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/cel.pb.go
@@ -7,7 +7,6 @@
package v3
import (
- _ "github.com/cncf/xds/go/xds/annotations/v3"
v3 "github.com/cncf/xds/go/xds/type/v3"
_ "github.com/envoyproxy/protoc-gen-validate/validate"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
@@ -84,26 +83,23 @@ var file_xds_type_matcher_v3_cel_proto_rawDesc = []byte{
0x0a, 0x1d, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68,
0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x65, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,
0x13, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65,
- 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x15, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f,
- 0x76, 0x33, 0x2f, 0x63, 0x65, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61,
- 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x73, 0x0a, 0x0a, 0x43, 0x65, 0x6c, 0x4d, 0x61, 0x74, 0x63,
- 0x68, 0x65, 0x72, 0x12, 0x43, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x72, 0x5f, 0x6d, 0x61, 0x74, 0x63,
- 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79,
- 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65, 0x6c, 0x45, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73,
- 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x09, 0x65,
- 0x78, 0x70, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63,
- 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64,
- 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x60, 0xd2, 0xc6, 0xa4, 0xe1,
- 0x06, 0x02, 0x08, 0x01, 0x0a, 0x1e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
- 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65,
- 0x72, 0x2e, 0x76, 0x33, 0x42, 0x08, 0x43, 0x65, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
- 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63,
- 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70,
- 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x33,
+ 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x15, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76,
+ 0x33, 0x2f, 0x63, 0x65, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c,
+ 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x73, 0x0a, 0x0a, 0x43, 0x65, 0x6c, 0x4d, 0x61, 0x74, 0x63, 0x68,
+ 0x65, 0x72, 0x12, 0x43, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x72, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70,
+ 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65, 0x6c, 0x45, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69,
+ 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x09, 0x65, 0x78,
+ 0x70, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x58, 0x0a, 0x1e, 0x63, 0x6f, 0x6d,
+ 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65,
+ 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x08, 0x43, 0x65, 0x6c,
+ 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e,
+ 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f,
+ 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72,
+ 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/http_inputs.pb.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/http_inputs.pb.go
index f53a4ee947829..4393bb7e29201 100644
--- a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/http_inputs.pb.go
+++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/http_inputs.pb.go
@@ -7,7 +7,6 @@
package v3
import (
- _ "github.com/cncf/xds/go/xds/annotations/v3"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
@@ -65,18 +64,16 @@ var file_xds_type_matcher_v3_http_inputs_proto_rawDesc = []byte{
0x0a, 0x25, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68,
0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74,
0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x13, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70,
- 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x78, 0x64,
- 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33,
- 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x1d, 0x0a,
- 0x1b, 0x48, 0x74, 0x74, 0x70, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43,
- 0x65, 0x6c, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x42, 0x67, 0xd2, 0xc6,
- 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x0a, 0x1e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68,
- 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63,
- 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0f, 0x48, 0x74, 0x74, 0x70, 0x49, 0x6e, 0x70, 0x75,
- 0x74, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75,
- 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67,
- 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68,
- 0x65, 0x72, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x22, 0x1d, 0x0a, 0x1b,
+ 0x48, 0x74, 0x74, 0x70, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x65,
+ 0x6c, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x42, 0x5f, 0x0a, 0x1e, 0x63,
+ 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79,
+ 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0f, 0x48,
+ 0x74, 0x74, 0x70, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
+ 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63,
+ 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70,
+ 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x33,
}
var (
diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/matcher.pb.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/matcher.pb.go
index f88e67e681fc0..d94b03b55958f 100644
--- a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/matcher.pb.go
+++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/matcher.pb.go
@@ -7,7 +7,6 @@
package v3
import (
- _ "github.com/cncf/xds/go/xds/annotations/v3"
v3 "github.com/cncf/xds/go/xds/core/v3"
_ "github.com/envoyproxy/protoc-gen-validate/validate"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
@@ -698,149 +697,146 @@ var file_xds_type_matcher_v3_matcher_proto_rawDesc = []byte{
0x0a, 0x21, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68,
0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x12, 0x13, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61,
- 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e,
- 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61,
- 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x78, 0x64, 0x73, 0x2f, 0x63,
- 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65,
- 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x72, 0x69,
- 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61,
- 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x22, 0x80, 0x10, 0x0a, 0x07, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x4d, 0x0a,
- 0x0c, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d,
- 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65,
- 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52,
- 0x0b, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x0c,
- 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x5f, 0x74, 0x72, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01,
+ 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x1b, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f,
+ 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e,
+ 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74,
+ 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x22, 0xf6, 0x0f, 0x0a, 0x07, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x4d, 0x0a, 0x0c,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01,
0x28, 0x0b, 0x32, 0x28, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61,
0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72,
- 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x48, 0x00, 0x52, 0x0b,
- 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x12, 0x44, 0x0a, 0x0b, 0x6f,
- 0x6e, 0x5f, 0x6e, 0x6f, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x24, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63,
- 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4f,
- 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x52, 0x09, 0x6f, 0x6e, 0x4e, 0x6f, 0x4d, 0x61, 0x74, 0x63,
- 0x68, 0x1a, 0x91, 0x01, 0x0a, 0x07, 0x4f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x38, 0x0a,
- 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c,
- 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65,
- 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x07,
- 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f,
- 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f,
- 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e,
- 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x06, 0x61, 0x63,
- 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0f, 0x0a, 0x08, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68,
- 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0xb6, 0x08, 0x0a, 0x0b, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65,
- 0x72, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x5b, 0x0a, 0x08, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72,
- 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79,
- 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61,
- 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73,
- 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08,
- 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x08, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65,
- 0x72, 0x73, 0x1a, 0x91, 0x06, 0x0a, 0x09, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65,
- 0x12, 0x6f, 0x0a, 0x10, 0x73, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x69,
- 0x63, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x78, 0x64, 0x73,
- 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33,
- 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72,
- 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x2e, 0x53,
- 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x48, 0x00,
- 0x52, 0x0f, 0x73, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74,
- 0x65, 0x12, 0x61, 0x0a, 0x0a, 0x6f, 0x72, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65,
- 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63,
- 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x2e,
- 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63,
- 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x09, 0x6f, 0x72, 0x4d, 0x61, 0x74,
- 0x63, 0x68, 0x65, 0x72, 0x12, 0x63, 0x0a, 0x0b, 0x61, 0x6e, 0x64, 0x5f, 0x6d, 0x61, 0x74, 0x63,
- 0x68, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x78, 0x64, 0x73, 0x2e,
+ 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0b,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x0c, 0x6d,
+ 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x5f, 0x74, 0x72, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x28, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e,
+ 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x48, 0x00, 0x52, 0x0b, 0x6d,
+ 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x12, 0x44, 0x0a, 0x0b, 0x6f, 0x6e,
+ 0x5f, 0x6e, 0x6f, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x24, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4f, 0x6e,
+ 0x4d, 0x61, 0x74, 0x63, 0x68, 0x52, 0x09, 0x6f, 0x6e, 0x4e, 0x6f, 0x4d, 0x61, 0x74, 0x63, 0x68,
+ 0x1a, 0x91, 0x01, 0x0a, 0x07, 0x4f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x38, 0x0a, 0x07,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e,
+ 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72,
+ 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x07, 0x6d,
+ 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72,
+ 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73,
+ 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x06, 0x61, 0x63, 0x74,
+ 0x69, 0x6f, 0x6e, 0x42, 0x0f, 0x0a, 0x08, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x12,
+ 0x03, 0xf8, 0x42, 0x01, 0x1a, 0xb6, 0x08, 0x0a, 0x0b, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72,
+ 0x4c, 0x69, 0x73, 0x74, 0x12, 0x5b, 0x0a, 0x08, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x73,
+ 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70,
+ 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74,
+ 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74,
+ 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xfa,
+ 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x08, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72,
+ 0x73, 0x1a, 0x91, 0x06, 0x0a, 0x09, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12,
+ 0x6f, 0x0a, 0x10, 0x73, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63,
+ 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x78, 0x64, 0x73, 0x2e,
0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e,
0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c,
- 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x2e, 0x50, 0x72,
- 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x61,
- 0x6e, 0x64, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x55, 0x0a, 0x0b, 0x6e, 0x6f, 0x74,
- 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32,
- 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65,
- 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74,
- 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61,
- 0x74, 0x65, 0x48, 0x00, 0x52, 0x0a, 0x6e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72,
- 0x1a, 0xf3, 0x01, 0x0a, 0x0f, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x50, 0x72, 0x65, 0x64, 0x69,
- 0x63, 0x61, 0x74, 0x65, 0x12, 0x41, 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76,
- 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
- 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01,
- 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x45, 0x0a, 0x0b, 0x76, 0x61, 0x6c, 0x75, 0x65,
- 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x78,
- 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e,
- 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72,
- 0x48, 0x00, 0x52, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x46,
- 0x0a, 0x0c, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e,
- 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f,
- 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f,
- 0x6d, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0e, 0x0a, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65,
- 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0x6b, 0x0a, 0x0d, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63,
- 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x5a, 0x0a, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69,
- 0x63, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x78, 0x64, 0x73,
- 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33,
- 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72,
- 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, 0x08,
- 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x02, 0x52, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63,
- 0x61, 0x74, 0x65, 0x42, 0x11, 0x0a, 0x0a, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x74, 0x79, 0x70,
- 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0xb5, 0x01, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64,
- 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x5a, 0x0a, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69,
- 0x63, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x78, 0x64, 0x73,
- 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33,
- 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72,
- 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, 0x08,
- 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63,
- 0x61, 0x74, 0x65, 0x12, 0x49, 0x0a, 0x08, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65,
- 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63,
- 0x68, 0x65, 0x72, 0x2e, 0x4f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x08, 0xfa, 0x42, 0x05,
- 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x07, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x1a, 0xa9,
- 0x04, 0x0a, 0x0b, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x12, 0x41,
- 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e,
- 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65,
- 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
- 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75,
- 0x74, 0x12, 0x5b, 0x0a, 0x0f, 0x65, 0x78, 0x61, 0x63, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68,
- 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x78, 0x64, 0x73,
- 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33,
- 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72,
- 0x54, 0x72, 0x65, 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, 0x70, 0x48, 0x00, 0x52,
- 0x0d, 0x65, 0x78, 0x61, 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, 0x70, 0x12, 0x5d,
- 0x0a, 0x10, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x6d,
- 0x61, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74,
+ 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x2e, 0x53, 0x69,
+ 0x6e, 0x67, 0x6c, 0x65, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x48, 0x00, 0x52,
+ 0x0f, 0x73, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65,
+ 0x12, 0x61, 0x0a, 0x0a, 0x6f, 0x72, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68,
+ 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x50,
+ 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61,
+ 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x09, 0x6f, 0x72, 0x4d, 0x61, 0x74, 0x63,
+ 0x68, 0x65, 0x72, 0x12, 0x63, 0x0a, 0x0b, 0x61, 0x6e, 0x64, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74,
0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d,
- 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72,
- 0x65, 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, 0x70, 0x48, 0x00, 0x52, 0x0e, 0x70,
- 0x72, 0x65, 0x66, 0x69, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, 0x70, 0x12, 0x46, 0x0a,
- 0x0c, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20,
+ 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69,
+ 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x2e, 0x50, 0x72, 0x65,
+ 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x61, 0x6e,
+ 0x64, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x55, 0x0a, 0x0b, 0x6e, 0x6f, 0x74, 0x5f,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e,
+ 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72,
+ 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63,
+ 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74,
+ 0x65, 0x48, 0x00, 0x52, 0x0a, 0x6e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x1a,
+ 0xf3, 0x01, 0x0a, 0x0f, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63,
+ 0x61, 0x74, 0x65, 0x12, 0x41, 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33,
+ 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52,
+ 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x45, 0x0a, 0x0b, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x78, 0x64,
+ 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76,
+ 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48,
+ 0x00, 0x52, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x46, 0x0a,
+ 0x0c, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76,
0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d,
- 0x4d, 0x61, 0x74, 0x63, 0x68, 0x1a, 0xc0, 0x01, 0x0a, 0x08, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d,
- 0x61, 0x70, 0x12, 0x56, 0x0a, 0x03, 0x6d, 0x61, 0x70, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
- 0x3a, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68,
- 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61,
- 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d,
- 0x61, 0x70, 0x2e, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05,
- 0x9a, 0x01, 0x02, 0x08, 0x01, 0x52, 0x03, 0x6d, 0x61, 0x70, 0x1a, 0x5c, 0x0a, 0x08, 0x4d, 0x61,
- 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x3a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
- 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79,
+ 0x4d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0e, 0x0a, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72,
+ 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0x6b, 0x0a, 0x0d, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61,
+ 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x5a, 0x0a, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63,
+ 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x78, 0x64, 0x73, 0x2e,
+ 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e,
+ 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c,
+ 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, 0x08, 0xfa,
+ 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x02, 0x52, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61,
+ 0x74, 0x65, 0x42, 0x11, 0x0a, 0x0a, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x74, 0x79, 0x70, 0x65,
+ 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0xb5, 0x01, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d,
+ 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x5a, 0x0a, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63,
+ 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x78, 0x64, 0x73, 0x2e,
+ 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e,
+ 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c,
+ 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, 0x08, 0xfa,
+ 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61,
+ 0x74, 0x65, 0x12, 0x49, 0x0a, 0x08, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68,
+ 0x65, 0x72, 0x2e, 0x4f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a,
+ 0x01, 0x02, 0x10, 0x01, 0x52, 0x07, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x1a, 0xa9, 0x04,
+ 0x0a, 0x0b, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x12, 0x41, 0x0a,
+ 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x78,
+ 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64,
+ 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42,
+ 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74,
+ 0x12, 0x5b, 0x0a, 0x0f, 0x65, 0x78, 0x61, 0x63, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f,
+ 0x6d, 0x61, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x78, 0x64, 0x73, 0x2e,
+ 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e,
+ 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54,
+ 0x72, 0x65, 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, 0x70, 0x48, 0x00, 0x52, 0x0d,
+ 0x65, 0x78, 0x61, 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, 0x70, 0x12, 0x5d, 0x0a,
+ 0x10, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x6d, 0x61,
+ 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79,
0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61,
- 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x52, 0x05, 0x76,
- 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x10, 0x0a, 0x09, 0x74, 0x72, 0x65, 0x65,
- 0x5f, 0x74, 0x79, 0x70, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x3a, 0x08, 0xd2, 0xc6, 0xa4, 0xe1,
- 0x06, 0x02, 0x08, 0x01, 0x42, 0x0e, 0x0a, 0x0c, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x5f,
- 0x74, 0x79, 0x70, 0x65, 0x42, 0x5c, 0x0a, 0x1e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68,
- 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63,
- 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0c, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x50,
- 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63,
- 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78,
- 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f,
- 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65,
+ 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, 0x70, 0x48, 0x00, 0x52, 0x0e, 0x70, 0x72,
+ 0x65, 0x66, 0x69, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, 0x70, 0x12, 0x46, 0x0a, 0x0c,
+ 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33,
+ 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x4d,
+ 0x61, 0x74, 0x63, 0x68, 0x1a, 0xc0, 0x01, 0x0a, 0x08, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61,
+ 0x70, 0x12, 0x56, 0x0a, 0x03, 0x6d, 0x61, 0x70, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a,
+ 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65,
+ 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74,
+ 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61,
+ 0x70, 0x2e, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x9a,
+ 0x01, 0x02, 0x08, 0x01, 0x52, 0x03, 0x6d, 0x61, 0x70, 0x1a, 0x5c, 0x0a, 0x08, 0x4d, 0x61, 0x70,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x3a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70,
+ 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74,
+ 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x52, 0x05, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x10, 0x0a, 0x09, 0x74, 0x72, 0x65, 0x65, 0x5f,
+ 0x74, 0x79, 0x70, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x42, 0x0e, 0x0a, 0x0c, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x65, 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x42, 0x5c, 0x0a, 0x1e, 0x63, 0x6f, 0x6d,
+ 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65,
+ 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0c, 0x4d, 0x61, 0x74,
+ 0x63, 0x68, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x69, 0x74,
+ 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73,
+ 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
diff --git a/vendor/github.com/ebitengine/purego/.gitignore b/vendor/github.com/ebitengine/purego/.gitignore
new file mode 100644
index 0000000000000..b25c15b81fae0
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/.gitignore
@@ -0,0 +1 @@
+*~
diff --git a/vendor/github.com/ebitengine/purego/LICENSE b/vendor/github.com/ebitengine/purego/LICENSE
new file mode 100644
index 0000000000000..8dada3edaf50d
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/ebitengine/purego/README.md b/vendor/github.com/ebitengine/purego/README.md
new file mode 100644
index 0000000000000..f1ff9053aceeb
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/README.md
@@ -0,0 +1,97 @@
+# purego
+[](https://pkg.go.dev/github.com/ebitengine/purego?GOOS=darwin)
+
+A library for calling C functions from Go without Cgo.
+
+> This is beta software so expect bugs and potentially API breaking changes
+> but each release will be tagged to avoid breaking people's code.
+> Bug reports are encouraged.
+
+## Motivation
+
+The [Ebitengine](https://github.com/hajimehoshi/ebiten) game engine was ported to use only Go on Windows. This enabled
+cross-compiling to Windows from any other operating system simply by setting `GOOS=windows`. The purego project was
+born to bring that same vision to the other platforms supported by Ebitengine.
+
+## Benefits
+
+- **Simple Cross-Compilation**: No C means you can build for other platforms easily without a C compiler.
+- **Faster Compilation**: Efficiently cache your entirely Go builds.
+- **Smaller Binaries**: Using Cgo generates a C wrapper function for each C function called. Purego doesn't!
+- **Dynamic Linking**: Load symbols at runtime and use it as a plugin system.
+- **Foreign Function Interface**: Call into other languages that are compiled into shared objects.
+- **Cgo Fallback**: Works even with CGO_ENABLED=1 so incremental porting is possible.
+This also means unsupported GOARCHs (freebsd/riscv64, linux/mips, etc.) will still work
+except for float arguments and return values.
+
+## Supported Platforms
+
+- **FreeBSD**: amd64, arm64
+- **Linux**: amd64, arm64
+- **macOS / iOS**: amd64, arm64
+- **Windows**: 386*, amd64, arm*, arm64
+
+`*` These architectures only support SyscallN and NewCallback
+
+## Example
+
+The example below only showcases purego use for macOS and Linux. The other platforms require special handling which can
+be seen in the complete example at [examples/libc](https://github.com/ebitengine/purego/tree/main/examples/libc) which supports Windows and FreeBSD.
+
+```go
+package main
+
+import (
+ "fmt"
+ "runtime"
+
+ "github.com/ebitengine/purego"
+)
+
+func getSystemLibrary() string {
+ switch runtime.GOOS {
+ case "darwin":
+ return "/usr/lib/libSystem.B.dylib"
+ case "linux":
+ return "libc.so.6"
+ default:
+ panic(fmt.Errorf("GOOS=%s is not supported", runtime.GOOS))
+ }
+}
+
+func main() {
+ libc, err := purego.Dlopen(getSystemLibrary(), purego.RTLD_NOW|purego.RTLD_GLOBAL)
+ if err != nil {
+ panic(err)
+ }
+ var puts func(string)
+ purego.RegisterLibFunc(&puts, libc, "puts")
+ puts("Calling C from Go without Cgo!")
+}
+```
+
+Then to run: `CGO_ENABLED=0 go run main.go`
+
+## Questions
+
+If you have questions about how to incorporate purego in your project or want to discuss
+how it works join the [Discord](https://discord.gg/HzGZVD6BkY)!
+
+### External Code
+
+Purego uses code that originates from the Go runtime. These files are under the BSD-3
+License that can be found [in the Go Source](https://github.com/golang/go/blob/master/LICENSE).
+This is a list of the copied files:
+
+* `abi_*.h` from package `runtime/cgo`
+* `zcallback_darwin_*.s` from package `runtime`
+* `internal/fakecgo/abi_*.h` from package `runtime/cgo`
+* `internal/fakecgo/asm_GOARCH.s` from package `runtime/cgo`
+* `internal/fakecgo/callbacks.go` from package `runtime/cgo`
+* `internal/fakecgo/go_GOOS_GOARCH.go` from package `runtime/cgo`
+* `internal/fakecgo/iscgo.go` from package `runtime/cgo`
+* `internal/fakecgo/setenv.go` from package `runtime/cgo`
+* `internal/fakecgo/freebsd.go` from package `runtime/cgo`
+
+The files `abi_*.h` and `internal/fakecgo/abi_*.h` are the same because Bazel does not support cross-package use of
+`#include` so we need each one once per package. (cf. [issue](https://github.com/bazelbuild/rules_go/issues/3636))
diff --git a/vendor/github.com/ebitengine/purego/abi_amd64.h b/vendor/github.com/ebitengine/purego/abi_amd64.h
new file mode 100644
index 0000000000000..9949435fe9e0a
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/abi_amd64.h
@@ -0,0 +1,99 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Macros for transitioning from the host ABI to Go ABI0.
+//
+// These save the frame pointer, so in general, functions that use
+// these should have zero frame size to suppress the automatic frame
+// pointer, though it's harmless to not do this.
+
+#ifdef GOOS_windows
+
+// REGS_HOST_TO_ABI0_STACK is the stack bytes used by
+// PUSH_REGS_HOST_TO_ABI0.
+#define REGS_HOST_TO_ABI0_STACK (28*8 + 8)
+
+// PUSH_REGS_HOST_TO_ABI0 prepares for transitioning from
+// the host ABI to Go ABI0 code. It saves all registers that are
+// callee-save in the host ABI and caller-save in Go ABI0 and prepares
+// for entry to Go.
+//
+// Save DI SI BP BX R12 R13 R14 R15 X6-X15 registers and the DF flag.
+// Clear the DF flag for the Go ABI.
+// MXCSR matches the Go ABI, so we don't have to set that,
+// and Go doesn't modify it, so we don't have to save it.
+#define PUSH_REGS_HOST_TO_ABI0() \
+ PUSHFQ \
+ CLD \
+ ADJSP $(REGS_HOST_TO_ABI0_STACK - 8) \
+ MOVQ DI, (0*0)(SP) \
+ MOVQ SI, (1*8)(SP) \
+ MOVQ BP, (2*8)(SP) \
+ MOVQ BX, (3*8)(SP) \
+ MOVQ R12, (4*8)(SP) \
+ MOVQ R13, (5*8)(SP) \
+ MOVQ R14, (6*8)(SP) \
+ MOVQ R15, (7*8)(SP) \
+ MOVUPS X6, (8*8)(SP) \
+ MOVUPS X7, (10*8)(SP) \
+ MOVUPS X8, (12*8)(SP) \
+ MOVUPS X9, (14*8)(SP) \
+ MOVUPS X10, (16*8)(SP) \
+ MOVUPS X11, (18*8)(SP) \
+ MOVUPS X12, (20*8)(SP) \
+ MOVUPS X13, (22*8)(SP) \
+ MOVUPS X14, (24*8)(SP) \
+ MOVUPS X15, (26*8)(SP)
+
+#define POP_REGS_HOST_TO_ABI0() \
+ MOVQ (0*0)(SP), DI \
+ MOVQ (1*8)(SP), SI \
+ MOVQ (2*8)(SP), BP \
+ MOVQ (3*8)(SP), BX \
+ MOVQ (4*8)(SP), R12 \
+ MOVQ (5*8)(SP), R13 \
+ MOVQ (6*8)(SP), R14 \
+ MOVQ (7*8)(SP), R15 \
+ MOVUPS (8*8)(SP), X6 \
+ MOVUPS (10*8)(SP), X7 \
+ MOVUPS (12*8)(SP), X8 \
+ MOVUPS (14*8)(SP), X9 \
+ MOVUPS (16*8)(SP), X10 \
+ MOVUPS (18*8)(SP), X11 \
+ MOVUPS (20*8)(SP), X12 \
+ MOVUPS (22*8)(SP), X13 \
+ MOVUPS (24*8)(SP), X14 \
+ MOVUPS (26*8)(SP), X15 \
+ ADJSP $-(REGS_HOST_TO_ABI0_STACK - 8) \
+ POPFQ
+
+#else
+// SysV ABI
+
+#define REGS_HOST_TO_ABI0_STACK (6*8)
+
+// SysV MXCSR matches the Go ABI, so we don't have to set that,
+// and Go doesn't modify it, so we don't have to save it.
+// Both SysV and Go require DF to be cleared, so that's already clear.
+// The SysV and Go frame pointer conventions are compatible.
+#define PUSH_REGS_HOST_TO_ABI0() \
+ ADJSP $(REGS_HOST_TO_ABI0_STACK) \
+ MOVQ BP, (5*8)(SP) \
+ LEAQ (5*8)(SP), BP \
+ MOVQ BX, (0*8)(SP) \
+ MOVQ R12, (1*8)(SP) \
+ MOVQ R13, (2*8)(SP) \
+ MOVQ R14, (3*8)(SP) \
+ MOVQ R15, (4*8)(SP)
+
+#define POP_REGS_HOST_TO_ABI0() \
+ MOVQ (0*8)(SP), BX \
+ MOVQ (1*8)(SP), R12 \
+ MOVQ (2*8)(SP), R13 \
+ MOVQ (3*8)(SP), R14 \
+ MOVQ (4*8)(SP), R15 \
+ MOVQ (5*8)(SP), BP \
+ ADJSP $-(REGS_HOST_TO_ABI0_STACK)
+
+#endif
diff --git a/vendor/github.com/ebitengine/purego/abi_arm64.h b/vendor/github.com/ebitengine/purego/abi_arm64.h
new file mode 100644
index 0000000000000..5d5061ec1dbf8
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/abi_arm64.h
@@ -0,0 +1,39 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Macros for transitioning from the host ABI to Go ABI0.
+//
+// These macros save and restore the callee-saved registers
+// from the stack, but they don't adjust stack pointer, so
+// the user should prepare stack space in advance.
+// SAVE_R19_TO_R28(offset) saves R19 ~ R28 to the stack space
+// of ((offset)+0*8)(RSP) ~ ((offset)+9*8)(RSP).
+//
+// SAVE_F8_TO_F15(offset) saves F8 ~ F15 to the stack space
+// of ((offset)+0*8)(RSP) ~ ((offset)+7*8)(RSP).
+//
+// R29 is not saved because Go will save and restore it.
+
+#define SAVE_R19_TO_R28(offset) \
+ STP (R19, R20), ((offset)+0*8)(RSP) \
+ STP (R21, R22), ((offset)+2*8)(RSP) \
+ STP (R23, R24), ((offset)+4*8)(RSP) \
+ STP (R25, R26), ((offset)+6*8)(RSP) \
+ STP (R27, g), ((offset)+8*8)(RSP)
+#define RESTORE_R19_TO_R28(offset) \
+ LDP ((offset)+0*8)(RSP), (R19, R20) \
+ LDP ((offset)+2*8)(RSP), (R21, R22) \
+ LDP ((offset)+4*8)(RSP), (R23, R24) \
+ LDP ((offset)+6*8)(RSP), (R25, R26) \
+ LDP ((offset)+8*8)(RSP), (R27, g) /* R28 */
+#define SAVE_F8_TO_F15(offset) \
+ FSTPD (F8, F9), ((offset)+0*8)(RSP) \
+ FSTPD (F10, F11), ((offset)+2*8)(RSP) \
+ FSTPD (F12, F13), ((offset)+4*8)(RSP) \
+ FSTPD (F14, F15), ((offset)+6*8)(RSP)
+#define RESTORE_F8_TO_F15(offset) \
+ FLDPD ((offset)+0*8)(RSP), (F8, F9) \
+ FLDPD ((offset)+2*8)(RSP), (F10, F11) \
+ FLDPD ((offset)+4*8)(RSP), (F12, F13) \
+ FLDPD ((offset)+6*8)(RSP), (F14, F15)
diff --git a/vendor/github.com/ebitengine/purego/cgo.go b/vendor/github.com/ebitengine/purego/cgo.go
new file mode 100644
index 0000000000000..7d5abef349983
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/cgo.go
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+//go:build cgo && (darwin || freebsd || linux)
+
+package purego
+
+// if CGO_ENABLED=1 import the Cgo runtime to ensure that it is set up properly.
+// This is required since some frameworks need TLS setup the C way which Go doesn't do.
+// We currently don't support ios in fakecgo mode so force Cgo or fail
+// Even if CGO_ENABLED=1 the Cgo runtime is not imported unless `import "C"` is used.
+// which will import this package automatically. Normally this isn't an issue since it
+// usually isn't possible to call into C without using that import. However, with purego
+// it is since we don't use `import "C"`!
+import (
+ _ "runtime/cgo"
+
+ _ "github.com/ebitengine/purego/internal/cgo"
+)
diff --git a/vendor/github.com/ebitengine/purego/dlerror.go b/vendor/github.com/ebitengine/purego/dlerror.go
new file mode 100644
index 0000000000000..95cdfe16f2488
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/dlerror.go
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2023 The Ebitengine Authors
+
+//go:build darwin || freebsd || linux
+
+package purego
+
+// Dlerror represents an error value returned from Dlopen, Dlsym, or Dlclose.
+//
+// This type is not available on Windows as there is no counterpart to it on Windows.
+type Dlerror struct {
+ s string
+}
+
+func (e Dlerror) Error() string {
+ return e.s
+}
diff --git a/vendor/github.com/ebitengine/purego/dlfcn.go b/vendor/github.com/ebitengine/purego/dlfcn.go
new file mode 100644
index 0000000000000..f70a24584d659
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/dlfcn.go
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+//go:build (darwin || freebsd || linux) && !android && !faketime
+
+package purego
+
+import (
+ "unsafe"
+)
+
+// Unix Specification for dlfcn.h: https://pubs.opengroup.org/onlinepubs/7908799/xsh/dlfcn.h.html
+
+var (
+ fnDlopen func(path string, mode int) uintptr
+ fnDlsym func(handle uintptr, name string) uintptr
+ fnDlerror func() string
+ fnDlclose func(handle uintptr) bool
+)
+
+func init() {
+ RegisterFunc(&fnDlopen, dlopenABI0)
+ RegisterFunc(&fnDlsym, dlsymABI0)
+ RegisterFunc(&fnDlerror, dlerrorABI0)
+ RegisterFunc(&fnDlclose, dlcloseABI0)
+}
+
+// Dlopen examines the dynamic library or bundle file specified by path. If the file is compatible
+// with the current process and has not already been loaded into the
+// current process, it is loaded and linked. After being linked, if it contains
+// any initializer functions, they are called, before Dlopen
+// returns. It returns a handle that can be used with Dlsym and Dlclose.
+// A second call to Dlopen with the same path will return the same handle, but the internal
+// reference count for the handle will be incremented. Therefore, all
+// Dlopen calls should be balanced with a Dlclose call.
+//
+// This function is not available on Windows.
+// Use [golang.org/x/sys/windows.LoadLibrary], [golang.org/x/sys/windows.LoadLibraryEx],
+// [golang.org/x/sys/windows.NewLazyDLL], or [golang.org/x/sys/windows.NewLazySystemDLL] for Windows instead.
+func Dlopen(path string, mode int) (uintptr, error) {
+ u := fnDlopen(path, mode)
+ if u == 0 {
+ return 0, Dlerror{fnDlerror()}
+ }
+ return u, nil
+}
+
+// Dlsym takes a "handle" of a dynamic library returned by Dlopen and the symbol name.
+// It returns the address where that symbol is loaded into memory. If the symbol is not found,
+// in the specified library or any of the libraries that were automatically loaded by Dlopen
+// when that library was loaded, Dlsym returns zero.
+//
+// This function is not available on Windows.
+// Use [golang.org/x/sys/windows.GetProcAddress] for Windows instead.
+func Dlsym(handle uintptr, name string) (uintptr, error) {
+ u := fnDlsym(handle, name)
+ if u == 0 {
+ return 0, Dlerror{fnDlerror()}
+ }
+ return u, nil
+}
+
+// Dlclose decrements the reference count on the dynamic library handle.
+// If the reference count drops to zero and no other loaded libraries
+// use symbols in it, then the dynamic library is unloaded.
+//
+// This function is not available on Windows.
+// Use [golang.org/x/sys/windows.FreeLibrary] for Windows instead.
+func Dlclose(handle uintptr) error {
+ if fnDlclose(handle) {
+ return Dlerror{fnDlerror()}
+ }
+ return nil
+}
+
+func loadSymbol(handle uintptr, name string) (uintptr, error) {
+ return Dlsym(handle, name)
+}
+
+// these functions exist in dlfcn_stubs.s and are calling C functions linked to in dlfcn_GOOS.go
+// the indirection is necessary because a function is actually a pointer to the pointer to the code.
+// sadly, I do not know of anyway to remove the assembly stubs entirely because //go:linkname doesn't
+// appear to work if you link directly to the C function on darwin arm64.
+
+//go:linkname dlopen dlopen
+var dlopen uintptr
+var dlopenABI0 = uintptr(unsafe.Pointer(&dlopen))
+
+//go:linkname dlsym dlsym
+var dlsym uintptr
+var dlsymABI0 = uintptr(unsafe.Pointer(&dlsym))
+
+//go:linkname dlclose dlclose
+var dlclose uintptr
+var dlcloseABI0 = uintptr(unsafe.Pointer(&dlclose))
+
+//go:linkname dlerror dlerror
+var dlerror uintptr
+var dlerrorABI0 = uintptr(unsafe.Pointer(&dlerror))
diff --git a/vendor/github.com/ebitengine/purego/dlfcn_android.go b/vendor/github.com/ebitengine/purego/dlfcn_android.go
new file mode 100644
index 0000000000000..0d5341764edef
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/dlfcn_android.go
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2024 The Ebitengine Authors
+
+package purego
+
+import "github.com/ebitengine/purego/internal/cgo"
+
+// Source for constants: https://android.googlesource.com/platform/bionic/+/refs/heads/main/libc/include/dlfcn.h
+
+const (
+ is64bit = 1 << (^uintptr(0) >> 63) / 2
+ is32bit = 1 - is64bit
+ RTLD_DEFAULT = is32bit * 0xffffffff
+ RTLD_LAZY = 0x00000001
+ RTLD_NOW = is64bit * 0x00000002
+ RTLD_LOCAL = 0x00000000
+ RTLD_GLOBAL = is64bit*0x00100 | is32bit*0x00000002
+)
+
+func Dlopen(path string, mode int) (uintptr, error) {
+ return cgo.Dlopen(path, mode)
+}
+
+func Dlsym(handle uintptr, name string) (uintptr, error) {
+ return cgo.Dlsym(handle, name)
+}
+
+func Dlclose(handle uintptr) error {
+ return cgo.Dlclose(handle)
+}
+
+func loadSymbol(handle uintptr, name string) (uintptr, error) {
+ return Dlsym(handle, name)
+}
diff --git a/vendor/github.com/ebitengine/purego/dlfcn_darwin.go b/vendor/github.com/ebitengine/purego/dlfcn_darwin.go
new file mode 100644
index 0000000000000..5f876278a3e66
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/dlfcn_darwin.go
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+package purego
+
+// Source for constants: https://opensource.apple.com/source/dyld/dyld-360.14/include/dlfcn.h.auto.html
+
+const (
+ RTLD_DEFAULT = 1<<64 - 2 // Pseudo-handle for dlsym so search for any loaded symbol
+ RTLD_LAZY = 0x1 // Relocations are performed at an implementation-dependent time.
+ RTLD_NOW = 0x2 // Relocations are performed when the object is loaded.
+ RTLD_LOCAL = 0x4 // All symbols are not made available for relocation processing by other modules.
+ RTLD_GLOBAL = 0x8 // All symbols are available for relocation processing of other modules.
+)
+
+//go:cgo_import_dynamic purego_dlopen dlopen "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic purego_dlsym dlsym "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic purego_dlerror dlerror "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic purego_dlclose dlclose "/usr/lib/libSystem.B.dylib"
+
+//go:cgo_import_dynamic purego_dlopen dlopen "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic purego_dlsym dlsym "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic purego_dlerror dlerror "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic purego_dlclose dlclose "/usr/lib/libSystem.B.dylib"
diff --git a/vendor/github.com/ebitengine/purego/dlfcn_freebsd.go b/vendor/github.com/ebitengine/purego/dlfcn_freebsd.go
new file mode 100644
index 0000000000000..6b371620d969d
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/dlfcn_freebsd.go
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+package purego
+
+// Constants as defined in https://github.com/freebsd/freebsd-src/blob/main/include/dlfcn.h
+const (
+ intSize = 32 << (^uint(0) >> 63) // 32 or 64
+ RTLD_DEFAULT = 1< C)
+//
+// string <=> char*
+// bool <=> _Bool
+// uintptr <=> uintptr_t
+// uint <=> uint32_t or uint64_t
+// uint8 <=> uint8_t
+// uint16 <=> uint16_t
+// uint32 <=> uint32_t
+// uint64 <=> uint64_t
+// int <=> int32_t or int64_t
+// int8 <=> int8_t
+// int16 <=> int16_t
+// int32 <=> int32_t
+// int64 <=> int64_t
+// float32 <=> float
+// float64 <=> double
+// struct <=> struct (WIP - darwin only)
+// func <=> C function
+// unsafe.Pointer, *T <=> void*
+// []T => void*
+//
+// There is a special case when the last argument of fptr is a variadic interface (or []interface}
+// it will be expanded into a call to the C function as if it had the arguments in that slice.
+// This means that using arg ...interface{} is like a cast to the function with the arguments inside arg.
+// This is not the same as C variadic.
+//
+// # Memory
+//
+// In general it is not possible for purego to guarantee the lifetimes of objects returned or received from
+// calling functions using RegisterFunc. For arguments to a C function it is important that the C function doesn't
+// hold onto a reference to Go memory. This is the same as the [Cgo rules].
+//
+// However, there are some special cases. When passing a string as an argument if the string does not end in a null
+// terminated byte (\x00) then the string will be copied into memory maintained by purego. The memory is only valid for
+// that specific call. Therefore, if the C code keeps a reference to that string it may become invalid at some
+// undefined time. However, if the string does already contain a null-terminated byte then no copy is done.
+// It is then the responsibility of the caller to ensure the string stays alive as long as it's needed in C memory.
+// This can be done using runtime.KeepAlive or allocating the string in C memory using malloc. When a C function
+// returns a null-terminated pointer to char a Go string can be used. Purego will allocate a new string in Go memory
+// and copy the data over. This string will be garbage collected whenever Go decides it's no longer referenced.
+// This C created string will not be freed by purego. If the pointer to char is not null-terminated or must continue
+// to point to C memory (because it's a buffer for example) then use a pointer to byte and then convert that to a slice
+// using unsafe.Slice. Doing this means that it becomes the responsibility of the caller to care about the lifetime
+// of the pointer
+//
+// # Structs
+//
+// Purego can handle the most common structs that have fields of builtin types like int8, uint16, float32, etc. However,
+// it does not support aligning fields properly. It is therefore the responsibility of the caller to ensure
+// that all padding is added to the Go struct to match the C one. See `BoolStructFn` in struct_test.go for an example.
+//
+// # Example
+//
+// All functions below call this C function:
+//
+// char *foo(char *str);
+//
+// // Let purego convert types
+// var foo func(s string) string
+// goString := foo("copied")
+// // Go will garbage collect this string
+//
+// // Manually, handle allocations
+// var foo2 func(b string) *byte
+// mustFree := foo2("not copied\x00")
+// defer free(mustFree)
+//
+// [Cgo rules]: https://pkg.go.dev/cmd/cgo#hdr-Go_references_to_C
+func RegisterFunc(fptr interface{}, cfn uintptr) {
+ fn := reflect.ValueOf(fptr).Elem()
+ ty := fn.Type()
+ if ty.Kind() != reflect.Func {
+ panic("purego: fptr must be a function pointer")
+ }
+ if ty.NumOut() > 1 {
+ panic("purego: function can only return zero or one values")
+ }
+ if cfn == 0 {
+ panic("purego: cfn is nil")
+ }
+ if ty.NumOut() == 1 && (ty.Out(0).Kind() == reflect.Float32 || ty.Out(0).Kind() == reflect.Float64) &&
+ runtime.GOARCH != "arm64" && runtime.GOARCH != "amd64" {
+ panic("purego: float returns are not supported")
+ }
+ {
+ // this code checks how many registers and stack this function will use
+ // to avoid crashing with too many arguments
+ var ints int
+ var floats int
+ var stack int
+ for i := 0; i < ty.NumIn(); i++ {
+ arg := ty.In(i)
+ switch arg.Kind() {
+ case reflect.Func:
+ // This only does preliminary testing to ensure the CDecl argument
+ // is the first argument. Full testing is done when the callback is actually
+ // created in NewCallback.
+ for j := 0; j < arg.NumIn(); j++ {
+ in := arg.In(j)
+ if !in.AssignableTo(reflect.TypeOf(CDecl{})) {
+ continue
+ }
+ if j != 0 {
+ panic("purego: CDecl must be the first argument")
+ }
+ }
+ case reflect.String, reflect.Uintptr, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
+ reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Ptr, reflect.UnsafePointer,
+ reflect.Slice, reflect.Bool:
+ if ints < numOfIntegerRegisters() {
+ ints++
+ } else {
+ stack++
+ }
+ case reflect.Float32, reflect.Float64:
+ const is32bit = unsafe.Sizeof(uintptr(0)) == 4
+ if is32bit {
+ panic("purego: floats only supported on 64bit platforms")
+ }
+ if floats < numOfFloats {
+ floats++
+ } else {
+ stack++
+ }
+ case reflect.Struct:
+ if runtime.GOOS != "darwin" || (runtime.GOARCH != "amd64" && runtime.GOARCH != "arm64") {
+ panic("purego: struct arguments are only supported on darwin amd64 & arm64")
+ }
+ if arg.Size() == 0 {
+ continue
+ }
+ addInt := func(u uintptr) {
+ ints++
+ }
+ addFloat := func(u uintptr) {
+ floats++
+ }
+ addStack := func(u uintptr) {
+ stack++
+ }
+ _ = addStruct(reflect.New(arg).Elem(), &ints, &floats, &stack, addInt, addFloat, addStack, nil)
+ default:
+ panic("purego: unsupported kind " + arg.Kind().String())
+ }
+ }
+ if ty.NumOut() == 1 && ty.Out(0).Kind() == reflect.Struct {
+ if runtime.GOOS != "darwin" {
+ panic("purego: struct return values only supported on darwin arm64 & amd64")
+ }
+ outType := ty.Out(0)
+ checkStructFieldsSupported(outType)
+ if runtime.GOARCH == "amd64" && outType.Size() > maxRegAllocStructSize {
+ // on amd64 if struct is bigger than 16 bytes allocate the return struct
+ // and pass it in as a hidden first argument.
+ ints++
+ }
+ }
+ sizeOfStack := maxArgs - numOfIntegerRegisters()
+ if stack > sizeOfStack {
+ panic("purego: too many arguments")
+ }
+ }
+ v := reflect.MakeFunc(ty, func(args []reflect.Value) (results []reflect.Value) {
+ if len(args) > 0 {
+ if variadic, ok := args[len(args)-1].Interface().([]interface{}); ok {
+ // subtract one from args bc the last argument in args is []interface{}
+ // which we are currently expanding
+ tmp := make([]reflect.Value, len(args)-1+len(variadic))
+ n := copy(tmp, args[:len(args)-1])
+ for i, v := range variadic {
+ tmp[n+i] = reflect.ValueOf(v)
+ }
+ args = tmp
+ }
+ }
+ var sysargs [maxArgs]uintptr
+ stack := sysargs[numOfIntegerRegisters():]
+ var floats [numOfFloats]uintptr
+ var numInts int
+ var numFloats int
+ var numStack int
+ var addStack, addInt, addFloat func(x uintptr)
+ if runtime.GOARCH == "arm64" || runtime.GOOS != "windows" {
+ // Windows arm64 uses the same calling convention as macOS and Linux
+ addStack = func(x uintptr) {
+ stack[numStack] = x
+ numStack++
+ }
+ addInt = func(x uintptr) {
+ if numInts >= numOfIntegerRegisters() {
+ addStack(x)
+ } else {
+ sysargs[numInts] = x
+ numInts++
+ }
+ }
+ addFloat = func(x uintptr) {
+ if numFloats < len(floats) {
+ floats[numFloats] = x
+ numFloats++
+ } else {
+ addStack(x)
+ }
+ }
+ } else {
+ // On Windows amd64 the arguments are passed in the numbered registered.
+ // So the first int is in the first integer register and the first float
+ // is in the second floating register if there is already a first int.
+ // This is in contrast to how macOS and Linux pass arguments which
+ // tries to use as many registers as possible in the calling convention.
+ addStack = func(x uintptr) {
+ sysargs[numStack] = x
+ numStack++
+ }
+ addInt = addStack
+ addFloat = addStack
+ }
+
+ var keepAlive []interface{}
+ defer func() {
+ runtime.KeepAlive(keepAlive)
+ runtime.KeepAlive(args)
+ }()
+ var syscall syscall15Args
+ if ty.NumOut() == 1 && ty.Out(0).Kind() == reflect.Struct {
+ outType := ty.Out(0)
+ if runtime.GOARCH == "amd64" && outType.Size() > maxRegAllocStructSize {
+ val := reflect.New(outType)
+ keepAlive = append(keepAlive, val)
+ addInt(val.Pointer())
+ } else if runtime.GOARCH == "arm64" && outType.Size() > maxRegAllocStructSize {
+ isAllFloats, numFields := isAllSameFloat(outType)
+ if !isAllFloats || numFields > 4 {
+ val := reflect.New(outType)
+ keepAlive = append(keepAlive, val)
+ syscall.arm64_r8 = val.Pointer()
+ }
+ }
+ }
+ for _, v := range args {
+ switch v.Kind() {
+ case reflect.String:
+ ptr := strings.CString(v.String())
+ keepAlive = append(keepAlive, ptr)
+ addInt(uintptr(unsafe.Pointer(ptr)))
+ case reflect.Uintptr, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ addInt(uintptr(v.Uint()))
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ addInt(uintptr(v.Int()))
+ case reflect.Ptr, reflect.UnsafePointer, reflect.Slice:
+ // There is no need to keepAlive this pointer separately because it is kept alive in the args variable
+ addInt(v.Pointer())
+ case reflect.Func:
+ addInt(NewCallback(v.Interface()))
+ case reflect.Bool:
+ if v.Bool() {
+ addInt(1)
+ } else {
+ addInt(0)
+ }
+ case reflect.Float32:
+ addFloat(uintptr(math.Float32bits(float32(v.Float()))))
+ case reflect.Float64:
+ addFloat(uintptr(math.Float64bits(v.Float())))
+ case reflect.Struct:
+ keepAlive = addStruct(v, &numInts, &numFloats, &numStack, addInt, addFloat, addStack, keepAlive)
+ default:
+ panic("purego: unsupported kind: " + v.Kind().String())
+ }
+ }
+ if runtime.GOARCH == "arm64" || runtime.GOOS != "windows" {
+ // Use the normal arm64 calling convention even on Windows
+ syscall = syscall15Args{
+ cfn,
+ sysargs[0], sysargs[1], sysargs[2], sysargs[3], sysargs[4], sysargs[5],
+ sysargs[6], sysargs[7], sysargs[8], sysargs[9], sysargs[10], sysargs[11],
+ sysargs[12], sysargs[13], sysargs[14],
+ floats[0], floats[1], floats[2], floats[3], floats[4], floats[5], floats[6], floats[7],
+ syscall.arm64_r8,
+ }
+ runtime_cgocall(syscall15XABI0, unsafe.Pointer(&syscall))
+ } else {
+ // This is a fallback for Windows amd64, 386, and arm. Note this may not support floats
+ syscall.a1, syscall.a2, _ = syscall_syscall15X(cfn, sysargs[0], sysargs[1], sysargs[2], sysargs[3], sysargs[4],
+ sysargs[5], sysargs[6], sysargs[7], sysargs[8], sysargs[9], sysargs[10], sysargs[11],
+ sysargs[12], sysargs[13], sysargs[14])
+ syscall.f1 = syscall.a2 // on amd64 a2 stores the float return. On 32bit platforms floats aren't support
+ }
+ if ty.NumOut() == 0 {
+ return nil
+ }
+ outType := ty.Out(0)
+ v := reflect.New(outType).Elem()
+ switch outType.Kind() {
+ case reflect.Uintptr, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ v.SetUint(uint64(syscall.a1))
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ v.SetInt(int64(syscall.a1))
+ case reflect.Bool:
+ v.SetBool(byte(syscall.a1) != 0)
+ case reflect.UnsafePointer:
+ // We take the address and then dereference it to trick go vet from creating a possible miss-use of unsafe.Pointer
+ v.SetPointer(*(*unsafe.Pointer)(unsafe.Pointer(&syscall.a1)))
+ case reflect.Ptr:
+ v = reflect.NewAt(outType, unsafe.Pointer(&syscall.a1)).Elem()
+ case reflect.Func:
+ // wrap this C function in a nicely typed Go function
+ v = reflect.New(outType)
+ RegisterFunc(v.Interface(), syscall.a1)
+ case reflect.String:
+ v.SetString(strings.GoString(syscall.a1))
+ case reflect.Float32:
+ // NOTE: syscall.r2 is only the floating return value on 64bit platforms.
+ // On 32bit platforms syscall.r2 is the upper part of a 64bit return.
+ v.SetFloat(float64(math.Float32frombits(uint32(syscall.f1))))
+ case reflect.Float64:
+ // NOTE: syscall.r2 is only the floating return value on 64bit platforms.
+ // On 32bit platforms syscall.r2 is the upper part of a 64bit return.
+ v.SetFloat(math.Float64frombits(uint64(syscall.f1)))
+ case reflect.Struct:
+ v = getStruct(outType, syscall)
+ default:
+ panic("purego: unsupported return kind: " + outType.Kind().String())
+ }
+ return []reflect.Value{v}
+ })
+ fn.Set(v)
+}
+
+// maxRegAllocStructSize is the biggest a struct can be while still fitting in registers.
+// if it is bigger than this than enough space must be allocated on the heap and then passed into
+// the function as the first parameter on amd64 or in R8 on arm64.
+//
+// If you change this make sure to update it in objc_runtime_darwin.go
+const maxRegAllocStructSize = 16
+
+func isAllSameFloat(ty reflect.Type) (allFloats bool, numFields int) {
+ allFloats = true
+ root := ty.Field(0).Type
+ for root.Kind() == reflect.Struct {
+ root = root.Field(0).Type
+ }
+ first := root.Kind()
+ if first != reflect.Float32 && first != reflect.Float64 {
+ allFloats = false
+ }
+ for i := 0; i < ty.NumField(); i++ {
+ f := ty.Field(i).Type
+ if f.Kind() == reflect.Struct {
+ var structNumFields int
+ allFloats, structNumFields = isAllSameFloat(f)
+ numFields += structNumFields
+ continue
+ }
+ numFields++
+ if f.Kind() != first {
+ allFloats = false
+ }
+ }
+ return allFloats, numFields
+}
+
+func checkStructFieldsSupported(ty reflect.Type) {
+ for i := 0; i < ty.NumField(); i++ {
+ f := ty.Field(i).Type
+ if f.Kind() == reflect.Array {
+ f = f.Elem()
+ } else if f.Kind() == reflect.Struct {
+ checkStructFieldsSupported(f)
+ continue
+ }
+ switch f.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
+ reflect.Uintptr, reflect.Ptr, reflect.UnsafePointer, reflect.Float64, reflect.Float32:
+ default:
+ panic(fmt.Sprintf("purego: struct field type %s is not supported", f))
+ }
+ }
+}
+
+func roundUpTo8(val uintptr) uintptr {
+ return (val + 7) &^ 7
+}
+
+func numOfIntegerRegisters() int {
+ switch runtime.GOARCH {
+ case "arm64":
+ return 8
+ case "amd64":
+ return 6
+ default:
+ // since this platform isn't supported and can therefore only access
+ // integer registers it is fine to return the maxArgs
+ return maxArgs
+ }
+}
diff --git a/vendor/github.com/ebitengine/purego/go_runtime.go b/vendor/github.com/ebitengine/purego/go_runtime.go
new file mode 100644
index 0000000000000..13671ff23f270
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/go_runtime.go
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+//go:build darwin || freebsd || linux || windows
+
+package purego
+
+import (
+ "unsafe"
+)
+
+//go:linkname runtime_cgocall runtime.cgocall
+func runtime_cgocall(fn uintptr, arg unsafe.Pointer) int32 // from runtime/sys_libc.go
diff --git a/vendor/github.com/ebitengine/purego/internal/cgo/dlfcn_cgo_unix.go b/vendor/github.com/ebitengine/purego/internal/cgo/dlfcn_cgo_unix.go
new file mode 100644
index 0000000000000..b09ecac1cfebb
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/internal/cgo/dlfcn_cgo_unix.go
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2024 The Ebitengine Authors
+
+//go:build freebsd || linux
+
+package cgo
+
+/*
+ #cgo LDFLAGS: -ldl
+
+#include
+#include
+*/
+import "C"
+
+import (
+ "errors"
+ "unsafe"
+)
+
+func Dlopen(filename string, flag int) (uintptr, error) {
+ cfilename := C.CString(filename)
+ defer C.free(unsafe.Pointer(cfilename))
+ handle := C.dlopen(cfilename, C.int(flag))
+ if handle == nil {
+ return 0, errors.New(C.GoString(C.dlerror()))
+ }
+ return uintptr(handle), nil
+}
+
+func Dlsym(handle uintptr, symbol string) (uintptr, error) {
+ csymbol := C.CString(symbol)
+ defer C.free(unsafe.Pointer(csymbol))
+ symbolAddr := C.dlsym(*(*unsafe.Pointer)(unsafe.Pointer(&handle)), csymbol)
+ if symbolAddr == nil {
+ return 0, errors.New(C.GoString(C.dlerror()))
+ }
+ return uintptr(symbolAddr), nil
+}
+
+func Dlclose(handle uintptr) error {
+ result := C.dlclose(*(*unsafe.Pointer)(unsafe.Pointer(&handle)))
+ if result != 0 {
+ return errors.New(C.GoString(C.dlerror()))
+ }
+ return nil
+}
+
+// all that is needed is to assign each dl function because then its
+// symbol will then be made available to the linker and linked to inside dlfcn.go
+var (
+ _ = C.dlopen
+ _ = C.dlsym
+ _ = C.dlerror
+ _ = C.dlclose
+)
diff --git a/vendor/github.com/ebitengine/purego/internal/cgo/empty.go b/vendor/github.com/ebitengine/purego/internal/cgo/empty.go
new file mode 100644
index 0000000000000..1d7cffe2a7e5f
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/internal/cgo/empty.go
@@ -0,0 +1,6 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2024 The Ebitengine Authors
+
+package cgo
+
+// Empty so that importing this package doesn't cause issue for certain platforms.
diff --git a/vendor/github.com/ebitengine/purego/internal/cgo/syscall_cgo_unix.go b/vendor/github.com/ebitengine/purego/internal/cgo/syscall_cgo_unix.go
new file mode 100644
index 0000000000000..37ff24d5c1de7
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/internal/cgo/syscall_cgo_unix.go
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+//go:build freebsd || (linux && !(arm64 || amd64))
+
+package cgo
+
+// this file is placed inside internal/cgo and not package purego
+// because Cgo and assembly files can't be in the same package.
+
+/*
+ #cgo LDFLAGS: -ldl
+
+#include
+#include
+#include
+#include
+
+typedef struct syscall15Args {
+ uintptr_t fn;
+ uintptr_t a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15;
+ uintptr_t f1, f2, f3, f4, f5, f6, f7, f8;
+ uintptr_t err;
+} syscall15Args;
+
+void syscall15(struct syscall15Args *args) {
+ assert((args->f1|args->f2|args->f3|args->f4|args->f5|args->f6|args->f7|args->f8) == 0);
+ uintptr_t (*func_name)(uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6,
+ uintptr_t a7, uintptr_t a8, uintptr_t a9, uintptr_t a10, uintptr_t a11, uintptr_t a12,
+ uintptr_t a13, uintptr_t a14, uintptr_t a15);
+ *(void**)(&func_name) = (void*)(args->fn);
+ uintptr_t r1 = func_name(args->a1,args->a2,args->a3,args->a4,args->a5,args->a6,args->a7,args->a8,args->a9,
+ args->a10,args->a11,args->a12,args->a13,args->a14,args->a15);
+ args->a1 = r1;
+ args->err = errno;
+}
+
+*/
+import "C"
+import "unsafe"
+
+// assign purego.syscall15XABI0 to the C version of this function.
+var Syscall15XABI0 = unsafe.Pointer(C.syscall15)
+
+//go:nosplit
+func Syscall15X(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15 uintptr) (r1, r2, err uintptr) {
+ args := C.syscall15Args{
+ C.uintptr_t(fn), C.uintptr_t(a1), C.uintptr_t(a2), C.uintptr_t(a3),
+ C.uintptr_t(a4), C.uintptr_t(a5), C.uintptr_t(a6),
+ C.uintptr_t(a7), C.uintptr_t(a8), C.uintptr_t(a9), C.uintptr_t(a10), C.uintptr_t(a11), C.uintptr_t(a12),
+ C.uintptr_t(a13), C.uintptr_t(a14), C.uintptr_t(a15), 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ }
+ C.syscall15(&args)
+ return uintptr(args.a1), 0, uintptr(args.err)
+}
diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/abi_amd64.h b/vendor/github.com/ebitengine/purego/internal/fakecgo/abi_amd64.h
new file mode 100644
index 0000000000000..9949435fe9e0a
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/abi_amd64.h
@@ -0,0 +1,99 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Macros for transitioning from the host ABI to Go ABI0.
+//
+// These save the frame pointer, so in general, functions that use
+// these should have zero frame size to suppress the automatic frame
+// pointer, though it's harmless to not do this.
+
+#ifdef GOOS_windows
+
+// REGS_HOST_TO_ABI0_STACK is the stack bytes used by
+// PUSH_REGS_HOST_TO_ABI0.
+#define REGS_HOST_TO_ABI0_STACK (28*8 + 8)
+
+// PUSH_REGS_HOST_TO_ABI0 prepares for transitioning from
+// the host ABI to Go ABI0 code. It saves all registers that are
+// callee-save in the host ABI and caller-save in Go ABI0 and prepares
+// for entry to Go.
+//
+// Save DI SI BP BX R12 R13 R14 R15 X6-X15 registers and the DF flag.
+// Clear the DF flag for the Go ABI.
+// MXCSR matches the Go ABI, so we don't have to set that,
+// and Go doesn't modify it, so we don't have to save it.
+#define PUSH_REGS_HOST_TO_ABI0() \
+ PUSHFQ \
+ CLD \
+ ADJSP $(REGS_HOST_TO_ABI0_STACK - 8) \
+ MOVQ DI, (0*0)(SP) \
+ MOVQ SI, (1*8)(SP) \
+ MOVQ BP, (2*8)(SP) \
+ MOVQ BX, (3*8)(SP) \
+ MOVQ R12, (4*8)(SP) \
+ MOVQ R13, (5*8)(SP) \
+ MOVQ R14, (6*8)(SP) \
+ MOVQ R15, (7*8)(SP) \
+ MOVUPS X6, (8*8)(SP) \
+ MOVUPS X7, (10*8)(SP) \
+ MOVUPS X8, (12*8)(SP) \
+ MOVUPS X9, (14*8)(SP) \
+ MOVUPS X10, (16*8)(SP) \
+ MOVUPS X11, (18*8)(SP) \
+ MOVUPS X12, (20*8)(SP) \
+ MOVUPS X13, (22*8)(SP) \
+ MOVUPS X14, (24*8)(SP) \
+ MOVUPS X15, (26*8)(SP)
+
+#define POP_REGS_HOST_TO_ABI0() \
+ MOVQ (0*0)(SP), DI \
+ MOVQ (1*8)(SP), SI \
+ MOVQ (2*8)(SP), BP \
+ MOVQ (3*8)(SP), BX \
+ MOVQ (4*8)(SP), R12 \
+ MOVQ (5*8)(SP), R13 \
+ MOVQ (6*8)(SP), R14 \
+ MOVQ (7*8)(SP), R15 \
+ MOVUPS (8*8)(SP), X6 \
+ MOVUPS (10*8)(SP), X7 \
+ MOVUPS (12*8)(SP), X8 \
+ MOVUPS (14*8)(SP), X9 \
+ MOVUPS (16*8)(SP), X10 \
+ MOVUPS (18*8)(SP), X11 \
+ MOVUPS (20*8)(SP), X12 \
+ MOVUPS (22*8)(SP), X13 \
+ MOVUPS (24*8)(SP), X14 \
+ MOVUPS (26*8)(SP), X15 \
+ ADJSP $-(REGS_HOST_TO_ABI0_STACK - 8) \
+ POPFQ
+
+#else
+// SysV ABI
+
+#define REGS_HOST_TO_ABI0_STACK (6*8)
+
+// SysV MXCSR matches the Go ABI, so we don't have to set that,
+// and Go doesn't modify it, so we don't have to save it.
+// Both SysV and Go require DF to be cleared, so that's already clear.
+// The SysV and Go frame pointer conventions are compatible.
+#define PUSH_REGS_HOST_TO_ABI0() \
+ ADJSP $(REGS_HOST_TO_ABI0_STACK) \
+ MOVQ BP, (5*8)(SP) \
+ LEAQ (5*8)(SP), BP \
+ MOVQ BX, (0*8)(SP) \
+ MOVQ R12, (1*8)(SP) \
+ MOVQ R13, (2*8)(SP) \
+ MOVQ R14, (3*8)(SP) \
+ MOVQ R15, (4*8)(SP)
+
+#define POP_REGS_HOST_TO_ABI0() \
+ MOVQ (0*8)(SP), BX \
+ MOVQ (1*8)(SP), R12 \
+ MOVQ (2*8)(SP), R13 \
+ MOVQ (3*8)(SP), R14 \
+ MOVQ (4*8)(SP), R15 \
+ MOVQ (5*8)(SP), BP \
+ ADJSP $-(REGS_HOST_TO_ABI0_STACK)
+
+#endif
diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/abi_arm64.h b/vendor/github.com/ebitengine/purego/internal/fakecgo/abi_arm64.h
new file mode 100644
index 0000000000000..5d5061ec1dbf8
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/abi_arm64.h
@@ -0,0 +1,39 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Macros for transitioning from the host ABI to Go ABI0.
+//
+// These macros save and restore the callee-saved registers
+// from the stack, but they don't adjust stack pointer, so
+// the user should prepare stack space in advance.
+// SAVE_R19_TO_R28(offset) saves R19 ~ R28 to the stack space
+// of ((offset)+0*8)(RSP) ~ ((offset)+9*8)(RSP).
+//
+// SAVE_F8_TO_F15(offset) saves F8 ~ F15 to the stack space
+// of ((offset)+0*8)(RSP) ~ ((offset)+7*8)(RSP).
+//
+// R29 is not saved because Go will save and restore it.
+
+#define SAVE_R19_TO_R28(offset) \
+ STP (R19, R20), ((offset)+0*8)(RSP) \
+ STP (R21, R22), ((offset)+2*8)(RSP) \
+ STP (R23, R24), ((offset)+4*8)(RSP) \
+ STP (R25, R26), ((offset)+6*8)(RSP) \
+ STP (R27, g), ((offset)+8*8)(RSP)
+#define RESTORE_R19_TO_R28(offset) \
+ LDP ((offset)+0*8)(RSP), (R19, R20) \
+ LDP ((offset)+2*8)(RSP), (R21, R22) \
+ LDP ((offset)+4*8)(RSP), (R23, R24) \
+ LDP ((offset)+6*8)(RSP), (R25, R26) \
+ LDP ((offset)+8*8)(RSP), (R27, g) /* R28 */
+#define SAVE_F8_TO_F15(offset) \
+ FSTPD (F8, F9), ((offset)+0*8)(RSP) \
+ FSTPD (F10, F11), ((offset)+2*8)(RSP) \
+ FSTPD (F12, F13), ((offset)+4*8)(RSP) \
+ FSTPD (F14, F15), ((offset)+6*8)(RSP)
+#define RESTORE_F8_TO_F15(offset) \
+ FLDPD ((offset)+0*8)(RSP), (F8, F9) \
+ FLDPD ((offset)+2*8)(RSP), (F10, F11) \
+ FLDPD ((offset)+4*8)(RSP), (F12, F13) \
+ FLDPD ((offset)+6*8)(RSP), (F14, F15)
diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/asm_amd64.s b/vendor/github.com/ebitengine/purego/internal/fakecgo/asm_amd64.s
new file mode 100644
index 0000000000000..2b7eb57f8ae78
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/asm_amd64.s
@@ -0,0 +1,39 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+#include "abi_amd64.h"
+
+// Called by C code generated by cmd/cgo.
+// func crosscall2(fn, a unsafe.Pointer, n int32, ctxt uintptr)
+// Saves C callee-saved registers and calls cgocallback with three arguments.
+// fn is the PC of a func(a unsafe.Pointer) function.
+// This signature is known to SWIG, so we can't change it.
+TEXT crosscall2(SB), NOSPLIT, $0-0
+ PUSH_REGS_HOST_TO_ABI0()
+
+ // Make room for arguments to cgocallback.
+ ADJSP $0x18
+
+#ifndef GOOS_windows
+ MOVQ DI, 0x0(SP) // fn
+ MOVQ SI, 0x8(SP) // arg
+
+ // Skip n in DX.
+ MOVQ CX, 0x10(SP) // ctxt
+
+#else
+ MOVQ CX, 0x0(SP) // fn
+ MOVQ DX, 0x8(SP) // arg
+
+ // Skip n in R8.
+ MOVQ R9, 0x10(SP) // ctxt
+
+#endif
+
+ CALL runtime·cgocallback(SB)
+
+ ADJSP $-0x18
+ POP_REGS_HOST_TO_ABI0()
+ RET
diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/asm_arm64.s b/vendor/github.com/ebitengine/purego/internal/fakecgo/asm_arm64.s
new file mode 100644
index 0000000000000..50e5261d922c5
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/asm_arm64.s
@@ -0,0 +1,36 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+#include "abi_arm64.h"
+
+// Called by C code generated by cmd/cgo.
+// func crosscall2(fn, a unsafe.Pointer, n int32, ctxt uintptr)
+// Saves C callee-saved registers and calls cgocallback with three arguments.
+// fn is the PC of a func(a unsafe.Pointer) function.
+TEXT crosscall2(SB), NOSPLIT|NOFRAME, $0
+/*
+ * We still need to save all callee save register as before, and then
+ * push 3 args for fn (R0, R1, R3), skipping R2.
+ * Also note that at procedure entry in gc world, 8(RSP) will be the
+ * first arg.
+ */
+ SUB $(8*24), RSP
+ STP (R0, R1), (8*1)(RSP)
+ MOVD R3, (8*3)(RSP)
+
+ SAVE_R19_TO_R28(8*4)
+ SAVE_F8_TO_F15(8*14)
+ STP (R29, R30), (8*22)(RSP)
+
+ // Initialize Go ABI environment
+ BL runtime·load_g(SB)
+ BL runtime·cgocallback(SB)
+
+ RESTORE_R19_TO_R28(8*4)
+ RESTORE_F8_TO_F15(8*14)
+ LDP (8*22)(RSP), (R29, R30)
+
+ ADD $(8*24), RSP
+ RET
diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/callbacks.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/callbacks.go
new file mode 100644
index 0000000000000..f29e690cc15b3
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/callbacks.go
@@ -0,0 +1,93 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !cgo && (darwin || freebsd || linux)
+
+package fakecgo
+
+import (
+ _ "unsafe"
+)
+
+// TODO: decide if we need _runtime_cgo_panic_internal
+
+//go:linkname x_cgo_init_trampoline x_cgo_init_trampoline
+//go:linkname _cgo_init _cgo_init
+var x_cgo_init_trampoline byte
+var _cgo_init = &x_cgo_init_trampoline
+
+// Creates a new system thread without updating any Go state.
+//
+// This method is invoked during shared library loading to create a new OS
+// thread to perform the runtime initialization. This method is similar to
+// _cgo_sys_thread_start except that it doesn't update any Go state.
+
+//go:linkname x_cgo_thread_start_trampoline x_cgo_thread_start_trampoline
+//go:linkname _cgo_thread_start _cgo_thread_start
+var x_cgo_thread_start_trampoline byte
+var _cgo_thread_start = &x_cgo_thread_start_trampoline
+
+// Notifies that the runtime has been initialized.
+//
+// We currently block at every CGO entry point (via _cgo_wait_runtime_init_done)
+// to ensure that the runtime has been initialized before the CGO call is
+// executed. This is necessary for shared libraries where we kickoff runtime
+// initialization in a separate thread and return without waiting for this
+// thread to complete the init.
+
+//go:linkname x_cgo_notify_runtime_init_done_trampoline x_cgo_notify_runtime_init_done_trampoline
+//go:linkname _cgo_notify_runtime_init_done _cgo_notify_runtime_init_done
+var x_cgo_notify_runtime_init_done_trampoline byte
+var _cgo_notify_runtime_init_done = &x_cgo_notify_runtime_init_done_trampoline
+
+// Indicates whether a dummy thread key has been created or not.
+//
+// When calling go exported function from C, we register a destructor
+// callback, for a dummy thread key, by using pthread_key_create.
+
+//go:linkname _cgo_pthread_key_created _cgo_pthread_key_created
+var x_cgo_pthread_key_created uintptr
+var _cgo_pthread_key_created = &x_cgo_pthread_key_created
+
+// Set the x_crosscall2_ptr C function pointer variable point to crosscall2.
+// It's for the runtime package to call at init time.
+func set_crosscall2() {
+ // nothing needs to be done here for fakecgo
+ // because it's possible to just call cgocallback directly
+}
+
+//go:linkname _set_crosscall2 runtime.set_crosscall2
+var _set_crosscall2 = set_crosscall2
+
+// Store the g into the thread-specific value.
+// So that pthread_key_destructor will dropm when the thread is exiting.
+
+//go:linkname x_cgo_bindm_trampoline x_cgo_bindm_trampoline
+//go:linkname _cgo_bindm _cgo_bindm
+var x_cgo_bindm_trampoline byte
+var _cgo_bindm = &x_cgo_bindm_trampoline
+
+// TODO: decide if we need x_cgo_set_context_function
+// TODO: decide if we need _cgo_yield
+
+var (
+ // In Go 1.20 the race detector was rewritten to pure Go
+ // on darwin. This means that when CGO_ENABLED=0 is set
+ // fakecgo is built with race detector code. This is not
+ // good since this code is pretending to be C. The go:norace
+ // pragma is not enough, since it only applies to the native
+ // ABIInternal function. The ABIO wrapper (which is necessary,
+ // since all references to text symbols from assembly will use it)
+ // does not inherit the go:norace pragma, so it will still be
+ // instrumented by the race detector.
+ //
+ // To circumvent this issue, using closure calls in the
+ // assembly, which forces the compiler to use the ABIInternal
+ // native implementation (which has go:norace) instead.
+ threadentry_call = threadentry
+ x_cgo_init_call = x_cgo_init
+ x_cgo_setenv_call = x_cgo_setenv
+ x_cgo_unsetenv_call = x_cgo_unsetenv
+ x_cgo_thread_start_call = x_cgo_thread_start
+)
diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/doc.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/doc.go
new file mode 100644
index 0000000000000..be82f7dfca90f
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/doc.go
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+//go:build !cgo && (darwin || freebsd || linux)
+
+// Package fakecgo implements the Cgo runtime (runtime/cgo) entirely in Go.
+// This allows code that calls into C to function properly when CGO_ENABLED=0.
+//
+// # Goals
+//
+// fakecgo attempts to replicate the same naming structure as in the runtime.
+// For example, functions that have the prefix "gcc_*" are named "go_*".
+// This makes it easier to port other GOOSs and GOARCHs as well as to keep
+// it in sync with runtime/cgo.
+//
+// # Support
+//
+// Currently, fakecgo only supports macOS on amd64 & arm64. It also cannot
+// be used with -buildmode=c-archive because that requires special initialization
+// that fakecgo does not implement at the moment.
+//
+// # Usage
+//
+// Using fakecgo is easy just import _ "github.com/ebitengine/purego" and then
+// set the environment variable CGO_ENABLED=0.
+// The recommended usage for fakecgo is to prefer using runtime/cgo if possible
+// but if cross-compiling or fast build times are important fakecgo is available.
+// Purego will pick which ever Cgo runtime is available and prefer the one that
+// comes with Go (runtime/cgo).
+package fakecgo
+
+//go:generate go run gen.go
diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/freebsd.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/freebsd.go
new file mode 100644
index 0000000000000..bb73a709e6918
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/freebsd.go
@@ -0,0 +1,27 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build freebsd && !cgo
+
+package fakecgo
+
+import _ "unsafe" // for go:linkname
+
+// Supply environ and __progname, because we don't
+// link against the standard FreeBSD crt0.o and the
+// libc dynamic library needs them.
+
+// Note: when building with cross-compiling or CGO_ENABLED=0, add
+// the following argument to `go` so that these symbols are defined by
+// making fakecgo the Cgo.
+// -gcflags="github.com/ebitengine/purego/internal/fakecgo=-std"
+
+//go:linkname _environ environ
+//go:linkname _progname __progname
+
+//go:cgo_export_dynamic environ
+//go:cgo_export_dynamic __progname
+
+var _environ uintptr
+var _progname uintptr
diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_darwin_amd64.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_darwin_amd64.go
new file mode 100644
index 0000000000000..39f5ff1f06a0e
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_darwin_amd64.go
@@ -0,0 +1,73 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !cgo
+
+package fakecgo
+
+import "unsafe"
+
+//go:nosplit
+//go:norace
+func _cgo_sys_thread_start(ts *ThreadStart) {
+ var attr pthread_attr_t
+ var ign, oset sigset_t
+ var p pthread_t
+ var size size_t
+ var err int
+
+ sigfillset(&ign)
+ pthread_sigmask(SIG_SETMASK, &ign, &oset)
+
+ size = pthread_get_stacksize_np(pthread_self())
+ pthread_attr_init(&attr)
+ pthread_attr_setstacksize(&attr, size)
+ // Leave stacklo=0 and set stackhi=size; mstart will do the rest.
+ ts.g.stackhi = uintptr(size)
+
+ err = _cgo_try_pthread_create(&p, &attr, unsafe.Pointer(threadentry_trampolineABI0), ts)
+
+ pthread_sigmask(SIG_SETMASK, &oset, nil)
+
+ if err != 0 {
+ print("fakecgo: pthread_create failed: ")
+ println(err)
+ abort()
+ }
+}
+
+// threadentry_trampolineABI0 maps the C ABI to Go ABI then calls the Go function
+//
+//go:linkname x_threadentry_trampoline threadentry_trampoline
+var x_threadentry_trampoline byte
+var threadentry_trampolineABI0 = &x_threadentry_trampoline
+
+//go:nosplit
+//go:norace
+func threadentry(v unsafe.Pointer) unsafe.Pointer {
+ ts := *(*ThreadStart)(v)
+ free(v)
+
+ setg_trampoline(setg_func, uintptr(unsafe.Pointer(ts.g)))
+
+ // faking funcs in go is a bit a... involved - but the following works :)
+ fn := uintptr(unsafe.Pointer(&ts.fn))
+ (*(*func())(unsafe.Pointer(&fn)))()
+
+ return nil
+}
+
+// here we will store a pointer to the provided setg func
+var setg_func uintptr
+
+//go:nosplit
+//go:norace
+func x_cgo_init(g *G, setg uintptr) {
+ var size size_t
+
+ setg_func = setg
+
+ size = pthread_get_stacksize_np(pthread_self())
+ g.stacklo = uintptr(unsafe.Add(unsafe.Pointer(&size), -size+4096))
+}
diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_darwin_arm64.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_darwin_arm64.go
new file mode 100644
index 0000000000000..d0868f0f79035
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_darwin_arm64.go
@@ -0,0 +1,88 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !cgo
+
+package fakecgo
+
+import "unsafe"
+
+//go:nosplit
+//go:norace
+func _cgo_sys_thread_start(ts *ThreadStart) {
+ var attr pthread_attr_t
+ var ign, oset sigset_t
+ var p pthread_t
+ var size size_t
+ var err int
+
+ sigfillset(&ign)
+ pthread_sigmask(SIG_SETMASK, &ign, &oset)
+
+ size = pthread_get_stacksize_np(pthread_self())
+ pthread_attr_init(&attr)
+ pthread_attr_setstacksize(&attr, size)
+ // Leave stacklo=0 and set stackhi=size; mstart will do the rest.
+ ts.g.stackhi = uintptr(size)
+
+ err = _cgo_try_pthread_create(&p, &attr, unsafe.Pointer(threadentry_trampolineABI0), ts)
+
+ pthread_sigmask(SIG_SETMASK, &oset, nil)
+
+ if err != 0 {
+ print("fakecgo: pthread_create failed: ")
+ println(err)
+ abort()
+ }
+}
+
+// threadentry_trampolineABI0 maps the C ABI to Go ABI then calls the Go function
+//
+//go:linkname x_threadentry_trampoline threadentry_trampoline
+var x_threadentry_trampoline byte
+var threadentry_trampolineABI0 = &x_threadentry_trampoline
+
+//go:nosplit
+//go:norace
+func threadentry(v unsafe.Pointer) unsafe.Pointer {
+ ts := *(*ThreadStart)(v)
+ free(v)
+
+ // TODO: support ios
+ //#if TARGET_OS_IPHONE
+ // darwin_arm_init_thread_exception_port();
+ //#endif
+ setg_trampoline(setg_func, uintptr(unsafe.Pointer(ts.g)))
+
+ // faking funcs in go is a bit a... involved - but the following works :)
+ fn := uintptr(unsafe.Pointer(&ts.fn))
+ (*(*func())(unsafe.Pointer(&fn)))()
+
+ return nil
+}
+
+// here we will store a pointer to the provided setg func
+var setg_func uintptr
+
+// x_cgo_init(G *g, void (*setg)(void*)) (runtime/cgo/gcc_linux_amd64.c)
+// This get's called during startup, adjusts stacklo, and provides a pointer to setg_gcc for us
+// Additionally, if we set _cgo_init to non-null, go won't do it's own TLS setup
+// This function can't be go:systemstack since go is not in a state where the systemcheck would work.
+//
+//go:nosplit
+//go:norace
+func x_cgo_init(g *G, setg uintptr) {
+ var size size_t
+
+ setg_func = setg
+ size = pthread_get_stacksize_np(pthread_self())
+ g.stacklo = uintptr(unsafe.Add(unsafe.Pointer(&size), -size+4096))
+
+ //TODO: support ios
+ //#if TARGET_OS_IPHONE
+ // darwin_arm_init_mach_exception_handler();
+ // darwin_arm_init_thread_exception_port();
+ // init_working_dir();
+ //#endif
+}
diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_freebsd_amd64.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_freebsd_amd64.go
new file mode 100644
index 0000000000000..c9ff7156a8974
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_freebsd_amd64.go
@@ -0,0 +1,95 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !cgo
+
+package fakecgo
+
+import "unsafe"
+
+//go:nosplit
+func _cgo_sys_thread_start(ts *ThreadStart) {
+ var attr pthread_attr_t
+ var ign, oset sigset_t
+ var p pthread_t
+ var size size_t
+ var err int
+
+ //fprintf(stderr, "runtime/cgo: _cgo_sys_thread_start: fn=%p, g=%p\n", ts->fn, ts->g); // debug
+ sigfillset(&ign)
+ pthread_sigmask(SIG_SETMASK, &ign, &oset)
+
+ pthread_attr_init(&attr)
+ pthread_attr_getstacksize(&attr, &size)
+ // Leave stacklo=0 and set stackhi=size; mstart will do the rest.
+ ts.g.stackhi = uintptr(size)
+
+ err = _cgo_try_pthread_create(&p, &attr, unsafe.Pointer(threadentry_trampolineABI0), ts)
+
+ pthread_sigmask(SIG_SETMASK, &oset, nil)
+
+ if err != 0 {
+ print("fakecgo: pthread_create failed: ")
+ println(err)
+ abort()
+ }
+}
+
+// threadentry_trampolineABI0 maps the C ABI to Go ABI then calls the Go function
+//
+//go:linkname x_threadentry_trampoline threadentry_trampoline
+var x_threadentry_trampoline byte
+var threadentry_trampolineABI0 = &x_threadentry_trampoline
+
+//go:nosplit
+func threadentry(v unsafe.Pointer) unsafe.Pointer {
+ ts := *(*ThreadStart)(v)
+ free(v)
+
+ setg_trampoline(setg_func, uintptr(unsafe.Pointer(ts.g)))
+
+ // faking funcs in go is a bit a... involved - but the following works :)
+ fn := uintptr(unsafe.Pointer(&ts.fn))
+ (*(*func())(unsafe.Pointer(&fn)))()
+
+ return nil
+}
+
+// here we will store a pointer to the provided setg func
+var setg_func uintptr
+
+//go:nosplit
+func x_cgo_init(g *G, setg uintptr) {
+ var size size_t
+ var attr *pthread_attr_t
+
+ /* The memory sanitizer distributed with versions of clang
+ before 3.8 has a bug: if you call mmap before malloc, mmap
+ may return an address that is later overwritten by the msan
+ library. Avoid this problem by forcing a call to malloc
+ here, before we ever call malloc.
+
+ This is only required for the memory sanitizer, so it's
+ unfortunate that we always run it. It should be possible
+ to remove this when we no longer care about versions of
+ clang before 3.8. The test for this is
+ misc/cgo/testsanitizers.
+
+ GCC works hard to eliminate a seemingly unnecessary call to
+ malloc, so we actually use the memory we allocate. */
+
+ setg_func = setg
+ attr = (*pthread_attr_t)(malloc(unsafe.Sizeof(*attr)))
+ if attr == nil {
+ println("fakecgo: malloc failed")
+ abort()
+ }
+ pthread_attr_init(attr)
+ pthread_attr_getstacksize(attr, &size)
+ // runtime/cgo uses __builtin_frame_address(0) instead of `uintptr(unsafe.Pointer(&size))`
+ // but this should be OK since we are taking the address of the first variable in this function.
+ g.stacklo = uintptr(unsafe.Pointer(&size)) - uintptr(size) + 4096
+ pthread_attr_destroy(attr)
+ free(unsafe.Pointer(attr))
+}
diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_freebsd_arm64.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_freebsd_arm64.go
new file mode 100644
index 0000000000000..e3a060b93506a
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_freebsd_arm64.go
@@ -0,0 +1,98 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !cgo
+
+package fakecgo
+
+import "unsafe"
+
+//go:nosplit
+func _cgo_sys_thread_start(ts *ThreadStart) {
+ var attr pthread_attr_t
+ var ign, oset sigset_t
+ var p pthread_t
+ var size size_t
+ var err int
+
+ // fprintf(stderr, "runtime/cgo: _cgo_sys_thread_start: fn=%p, g=%p\n", ts->fn, ts->g); // debug
+ sigfillset(&ign)
+ pthread_sigmask(SIG_SETMASK, &ign, &oset)
+
+ pthread_attr_init(&attr)
+ pthread_attr_getstacksize(&attr, &size)
+ // Leave stacklo=0 and set stackhi=size; mstart will do the rest.
+ ts.g.stackhi = uintptr(size)
+
+ err = _cgo_try_pthread_create(&p, &attr, unsafe.Pointer(threadentry_trampolineABI0), ts)
+
+ pthread_sigmask(SIG_SETMASK, &oset, nil)
+
+ if err != 0 {
+ print("fakecgo: pthread_create failed: ")
+ println(err)
+ abort()
+ }
+}
+
+// threadentry_trampolineABI0 maps the C ABI to Go ABI then calls the Go function
+//
+//go:linkname x_threadentry_trampoline threadentry_trampoline
+var x_threadentry_trampoline byte
+var threadentry_trampolineABI0 = &x_threadentry_trampoline
+
+//go:nosplit
+func threadentry(v unsafe.Pointer) unsafe.Pointer {
+ ts := *(*ThreadStart)(v)
+ free(v)
+
+ setg_trampoline(setg_func, uintptr(unsafe.Pointer(ts.g)))
+
+ // faking funcs in go is a bit a... involved - but the following works :)
+ fn := uintptr(unsafe.Pointer(&ts.fn))
+ (*(*func())(unsafe.Pointer(&fn)))()
+
+ return nil
+}
+
+// here we will store a pointer to the provided setg func
+var setg_func uintptr
+
+// x_cgo_init(G *g, void (*setg)(void*)) (runtime/cgo/gcc_linux_amd64.c)
+// This get's called during startup, adjusts stacklo, and provides a pointer to setg_gcc for us
+// Additionally, if we set _cgo_init to non-null, go won't do it's own TLS setup
+// This function can't be go:systemstack since go is not in a state where the systemcheck would work.
+//
+//go:nosplit
+func x_cgo_init(g *G, setg uintptr) {
+ var size size_t
+ var attr *pthread_attr_t
+
+ /* The memory sanitizer distributed with versions of clang
+ before 3.8 has a bug: if you call mmap before malloc, mmap
+ may return an address that is later overwritten by the msan
+ library. Avoid this problem by forcing a call to malloc
+ here, before we ever call malloc.
+
+ This is only required for the memory sanitizer, so it's
+ unfortunate that we always run it. It should be possible
+ to remove this when we no longer care about versions of
+ clang before 3.8. The test for this is
+ misc/cgo/testsanitizers.
+
+ GCC works hard to eliminate a seemingly unnecessary call to
+ malloc, so we actually use the memory we allocate. */
+
+ setg_func = setg
+ attr = (*pthread_attr_t)(malloc(unsafe.Sizeof(*attr)))
+ if attr == nil {
+ println("fakecgo: malloc failed")
+ abort()
+ }
+ pthread_attr_init(attr)
+ pthread_attr_getstacksize(attr, &size)
+ g.stacklo = uintptr(unsafe.Pointer(&size)) - uintptr(size) + 4096
+ pthread_attr_destroy(attr)
+ free(unsafe.Pointer(attr))
+}
diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_libinit.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_libinit.go
new file mode 100644
index 0000000000000..e5cb46be4557c
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_libinit.go
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+//go:build !cgo && (darwin || freebsd || linux)
+
+package fakecgo
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+var (
+ pthread_g pthread_key_t
+
+ runtime_init_cond = PTHREAD_COND_INITIALIZER
+ runtime_init_mu = PTHREAD_MUTEX_INITIALIZER
+ runtime_init_done int
+)
+
+//go:nosplit
+func x_cgo_notify_runtime_init_done() {
+ pthread_mutex_lock(&runtime_init_mu)
+ runtime_init_done = 1
+ pthread_cond_broadcast(&runtime_init_cond)
+ pthread_mutex_unlock(&runtime_init_mu)
+}
+
+// Store the g into a thread-specific value associated with the pthread key pthread_g.
+// And pthread_key_destructor will dropm when the thread is exiting.
+func x_cgo_bindm(g unsafe.Pointer) {
+ // We assume this will always succeed, otherwise, there might be extra M leaking,
+ // when a C thread exits after a cgo call.
+ // We only invoke this function once per thread in runtime.needAndBindM,
+ // and the next calls just reuse the bound m.
+ pthread_setspecific(pthread_g, g)
+}
+
+// _cgo_try_pthread_create retries pthread_create if it fails with
+// EAGAIN.
+//
+//go:nosplit
+//go:norace
+func _cgo_try_pthread_create(thread *pthread_t, attr *pthread_attr_t, pfn unsafe.Pointer, arg *ThreadStart) int {
+ var ts syscall.Timespec
+ // tries needs to be the same type as syscall.Timespec.Nsec
+ // but the fields are int32 on 32bit and int64 on 64bit.
+ // tries is assigned to syscall.Timespec.Nsec in order to match its type.
+ tries := ts.Nsec
+ var err int
+
+ for tries = 0; tries < 20; tries++ {
+ err = int(pthread_create(thread, attr, pfn, unsafe.Pointer(arg)))
+ if err == 0 {
+ pthread_detach(*thread)
+ return 0
+ }
+ if err != int(syscall.EAGAIN) {
+ return err
+ }
+ ts.Sec = 0
+ ts.Nsec = (tries + 1) * 1000 * 1000 // Milliseconds.
+ nanosleep(&ts, nil)
+ }
+ return int(syscall.EAGAIN)
+}
diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_linux_amd64.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_linux_amd64.go
new file mode 100644
index 0000000000000..c9ff7156a8974
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_linux_amd64.go
@@ -0,0 +1,95 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !cgo
+
+package fakecgo
+
+import "unsafe"
+
+//go:nosplit
+func _cgo_sys_thread_start(ts *ThreadStart) {
+ var attr pthread_attr_t
+ var ign, oset sigset_t
+ var p pthread_t
+ var size size_t
+ var err int
+
+ //fprintf(stderr, "runtime/cgo: _cgo_sys_thread_start: fn=%p, g=%p\n", ts->fn, ts->g); // debug
+ sigfillset(&ign)
+ pthread_sigmask(SIG_SETMASK, &ign, &oset)
+
+ pthread_attr_init(&attr)
+ pthread_attr_getstacksize(&attr, &size)
+ // Leave stacklo=0 and set stackhi=size; mstart will do the rest.
+ ts.g.stackhi = uintptr(size)
+
+ err = _cgo_try_pthread_create(&p, &attr, unsafe.Pointer(threadentry_trampolineABI0), ts)
+
+ pthread_sigmask(SIG_SETMASK, &oset, nil)
+
+ if err != 0 {
+ print("fakecgo: pthread_create failed: ")
+ println(err)
+ abort()
+ }
+}
+
+// threadentry_trampolineABI0 maps the C ABI to Go ABI then calls the Go function
+//
+//go:linkname x_threadentry_trampoline threadentry_trampoline
+var x_threadentry_trampoline byte
+var threadentry_trampolineABI0 = &x_threadentry_trampoline
+
+//go:nosplit
+func threadentry(v unsafe.Pointer) unsafe.Pointer {
+ ts := *(*ThreadStart)(v)
+ free(v)
+
+ setg_trampoline(setg_func, uintptr(unsafe.Pointer(ts.g)))
+
+ // faking funcs in go is a bit a... involved - but the following works :)
+ fn := uintptr(unsafe.Pointer(&ts.fn))
+ (*(*func())(unsafe.Pointer(&fn)))()
+
+ return nil
+}
+
+// here we will store a pointer to the provided setg func
+var setg_func uintptr
+
+//go:nosplit
+func x_cgo_init(g *G, setg uintptr) {
+ var size size_t
+ var attr *pthread_attr_t
+
+ /* The memory sanitizer distributed with versions of clang
+ before 3.8 has a bug: if you call mmap before malloc, mmap
+ may return an address that is later overwritten by the msan
+ library. Avoid this problem by forcing a call to malloc
+ here, before we ever call malloc.
+
+ This is only required for the memory sanitizer, so it's
+ unfortunate that we always run it. It should be possible
+ to remove this when we no longer care about versions of
+ clang before 3.8. The test for this is
+ misc/cgo/testsanitizers.
+
+ GCC works hard to eliminate a seemingly unnecessary call to
+ malloc, so we actually use the memory we allocate. */
+
+ setg_func = setg
+ attr = (*pthread_attr_t)(malloc(unsafe.Sizeof(*attr)))
+ if attr == nil {
+ println("fakecgo: malloc failed")
+ abort()
+ }
+ pthread_attr_init(attr)
+ pthread_attr_getstacksize(attr, &size)
+ // runtime/cgo uses __builtin_frame_address(0) instead of `uintptr(unsafe.Pointer(&size))`
+ // but this should be OK since we are taking the address of the first variable in this function.
+ g.stacklo = uintptr(unsafe.Pointer(&size)) - uintptr(size) + 4096
+ pthread_attr_destroy(attr)
+ free(unsafe.Pointer(attr))
+}
diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_linux_arm64.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_linux_arm64.go
new file mode 100644
index 0000000000000..a3b1cca59a086
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_linux_arm64.go
@@ -0,0 +1,98 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !cgo
+
+package fakecgo
+
+import "unsafe"
+
+//go:nosplit
+func _cgo_sys_thread_start(ts *ThreadStart) {
+ var attr pthread_attr_t
+ var ign, oset sigset_t
+ var p pthread_t
+ var size size_t
+ var err int
+
+ //fprintf(stderr, "runtime/cgo: _cgo_sys_thread_start: fn=%p, g=%p\n", ts->fn, ts->g); // debug
+ sigfillset(&ign)
+ pthread_sigmask(SIG_SETMASK, &ign, &oset)
+
+ pthread_attr_init(&attr)
+ pthread_attr_getstacksize(&attr, &size)
+ // Leave stacklo=0 and set stackhi=size; mstart will do the rest.
+ ts.g.stackhi = uintptr(size)
+
+ err = _cgo_try_pthread_create(&p, &attr, unsafe.Pointer(threadentry_trampolineABI0), ts)
+
+ pthread_sigmask(SIG_SETMASK, &oset, nil)
+
+ if err != 0 {
+ print("fakecgo: pthread_create failed: ")
+ println(err)
+ abort()
+ }
+}
+
+// threadentry_trampolineABI0 maps the C ABI to Go ABI then calls the Go function
+//
+//go:linkname x_threadentry_trampoline threadentry_trampoline
+var x_threadentry_trampoline byte
+var threadentry_trampolineABI0 = &x_threadentry_trampoline
+
+//go:nosplit
+func threadentry(v unsafe.Pointer) unsafe.Pointer {
+ ts := *(*ThreadStart)(v)
+ free(v)
+
+ setg_trampoline(setg_func, uintptr(unsafe.Pointer(ts.g)))
+
+ // faking funcs in go is a bit a... involved - but the following works :)
+ fn := uintptr(unsafe.Pointer(&ts.fn))
+ (*(*func())(unsafe.Pointer(&fn)))()
+
+ return nil
+}
+
+// here we will store a pointer to the provided setg func
+var setg_func uintptr
+
+// x_cgo_init(G *g, void (*setg)(void*)) (runtime/cgo/gcc_linux_amd64.c)
+// This get's called during startup, adjusts stacklo, and provides a pointer to setg_gcc for us
+// Additionally, if we set _cgo_init to non-null, go won't do it's own TLS setup
+// This function can't be go:systemstack since go is not in a state where the systemcheck would work.
+//
+//go:nosplit
+func x_cgo_init(g *G, setg uintptr) {
+ var size size_t
+ var attr *pthread_attr_t
+
+ /* The memory sanitizer distributed with versions of clang
+ before 3.8 has a bug: if you call mmap before malloc, mmap
+ may return an address that is later overwritten by the msan
+ library. Avoid this problem by forcing a call to malloc
+ here, before we ever call malloc.
+
+ This is only required for the memory sanitizer, so it's
+ unfortunate that we always run it. It should be possible
+ to remove this when we no longer care about versions of
+ clang before 3.8. The test for this is
+ misc/cgo/testsanitizers.
+
+ GCC works hard to eliminate a seemingly unnecessary call to
+ malloc, so we actually use the memory we allocate. */
+
+ setg_func = setg
+ attr = (*pthread_attr_t)(malloc(unsafe.Sizeof(*attr)))
+ if attr == nil {
+ println("fakecgo: malloc failed")
+ abort()
+ }
+ pthread_attr_init(attr)
+ pthread_attr_getstacksize(attr, &size)
+ g.stacklo = uintptr(unsafe.Pointer(&size)) - uintptr(size) + 4096
+ pthread_attr_destroy(attr)
+ free(unsafe.Pointer(attr))
+}
diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_setenv.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_setenv.go
new file mode 100644
index 0000000000000..e42d84f0b75ea
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_setenv.go
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+//go:build !cgo && (darwin || freebsd || linux)
+
+package fakecgo
+
+//go:nosplit
+//go:norace
+func x_cgo_setenv(arg *[2]*byte) {
+ setenv(arg[0], arg[1], 1)
+}
+
+//go:nosplit
+//go:norace
+func x_cgo_unsetenv(arg *[1]*byte) {
+ unsetenv(arg[0])
+}
diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_util.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_util.go
new file mode 100644
index 0000000000000..0ac10d1f1578d
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_util.go
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+//go:build !cgo && (darwin || freebsd || linux)
+
+package fakecgo
+
+import "unsafe"
+
+// _cgo_thread_start is split into three parts in cgo since only one part is system dependent (keep it here for easier handling)
+
+// _cgo_thread_start(ThreadStart *arg) (runtime/cgo/gcc_util.c)
+// This get's called instead of the go code for creating new threads
+// -> pthread_* stuff is used, so threads are setup correctly for C
+// If this is missing, TLS is only setup correctly on thread 1!
+// This function should be go:systemstack instead of go:nosplit (but that requires runtime)
+//
+//go:nosplit
+//go:norace
+func x_cgo_thread_start(arg *ThreadStart) {
+ var ts *ThreadStart
+ // Make our own copy that can persist after we return.
+ // _cgo_tsan_acquire();
+ ts = (*ThreadStart)(malloc(unsafe.Sizeof(*ts)))
+ // _cgo_tsan_release();
+ if ts == nil {
+ println("fakecgo: out of memory in thread_start")
+ abort()
+ }
+ // *ts = *arg would cause a writebarrier so copy using slices
+ s1 := unsafe.Slice((*uintptr)(unsafe.Pointer(ts)), unsafe.Sizeof(*ts)/8)
+ s2 := unsafe.Slice((*uintptr)(unsafe.Pointer(arg)), unsafe.Sizeof(*arg)/8)
+ for i := range s2 {
+ s1[i] = s2[i]
+ }
+ _cgo_sys_thread_start(ts) // OS-dependent half
+}
diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/iscgo.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/iscgo.go
new file mode 100644
index 0000000000000..28af41cc64072
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/iscgo.go
@@ -0,0 +1,19 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !cgo && (darwin || freebsd || linux)
+
+// The runtime package contains an uninitialized definition
+// for runtime·iscgo. Override it to tell the runtime we're here.
+// There are various function pointers that should be set too,
+// but those depend on dynamic linker magic to get initialized
+// correctly, and sometimes they break. This variable is a
+// backup: it depends only on old C style static linking rules.
+
+package fakecgo
+
+import _ "unsafe" // for go:linkname
+
+//go:linkname _iscgo runtime.iscgo
+var _iscgo bool = true
diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo.go
new file mode 100644
index 0000000000000..74626c64a0e9a
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo.go
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+//go:build !cgo && (darwin || freebsd || linux)
+
+package fakecgo
+
+type (
+ size_t uintptr
+ sigset_t [128]byte
+ pthread_attr_t [64]byte
+ pthread_t int
+ pthread_key_t uint64
+)
+
+// for pthread_sigmask:
+
+type sighow int32
+
+const (
+ SIG_BLOCK sighow = 0
+ SIG_UNBLOCK sighow = 1
+ SIG_SETMASK sighow = 2
+)
+
+type G struct {
+ stacklo uintptr
+ stackhi uintptr
+}
+
+type ThreadStart struct {
+ g *G
+ tls *uintptr
+ fn uintptr
+}
diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_darwin.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_darwin.go
new file mode 100644
index 0000000000000..af148333f6d9c
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_darwin.go
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+//go:build !cgo
+
+package fakecgo
+
+type (
+ pthread_mutex_t struct {
+ sig int64
+ opaque [56]byte
+ }
+ pthread_cond_t struct {
+ sig int64
+ opaque [40]byte
+ }
+)
+
+var (
+ PTHREAD_COND_INITIALIZER = pthread_cond_t{sig: 0x3CB0B1BB}
+ PTHREAD_MUTEX_INITIALIZER = pthread_mutex_t{sig: 0x32AAABA7}
+)
diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_freebsd.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_freebsd.go
new file mode 100644
index 0000000000000..ca1f722c93989
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_freebsd.go
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+//go:build !cgo
+
+package fakecgo
+
+type (
+ pthread_cond_t uintptr
+ pthread_mutex_t uintptr
+)
+
+var (
+ PTHREAD_COND_INITIALIZER = pthread_cond_t(0)
+ PTHREAD_MUTEX_INITIALIZER = pthread_mutex_t(0)
+)
diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_linux.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_linux.go
new file mode 100644
index 0000000000000..c4b6e9ea5a4cf
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_linux.go
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+//go:build !cgo
+
+package fakecgo
+
+type (
+ pthread_cond_t [48]byte
+ pthread_mutex_t [48]byte
+)
+
+var (
+ PTHREAD_COND_INITIALIZER = pthread_cond_t{}
+ PTHREAD_MUTEX_INITIALIZER = pthread_mutex_t{}
+)
diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/setenv.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/setenv.go
new file mode 100644
index 0000000000000..f30af0e151569
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/setenv.go
@@ -0,0 +1,19 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !cgo && (darwin || freebsd || linux)
+
+package fakecgo
+
+import _ "unsafe" // for go:linkname
+
+//go:linkname x_cgo_setenv_trampoline x_cgo_setenv_trampoline
+//go:linkname _cgo_setenv runtime._cgo_setenv
+var x_cgo_setenv_trampoline byte
+var _cgo_setenv = &x_cgo_setenv_trampoline
+
+//go:linkname x_cgo_unsetenv_trampoline x_cgo_unsetenv_trampoline
+//go:linkname _cgo_unsetenv runtime._cgo_unsetenv
+var x_cgo_unsetenv_trampoline byte
+var _cgo_unsetenv = &x_cgo_unsetenv_trampoline
diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols.go
new file mode 100644
index 0000000000000..3d19fd822a73e
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols.go
@@ -0,0 +1,181 @@
+// Code generated by 'go generate' with gen.go. DO NOT EDIT.
+
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+//go:build !cgo && (darwin || freebsd || linux)
+
+package fakecgo
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+// setg_trampoline calls setg with the G provided
+func setg_trampoline(setg uintptr, G uintptr)
+
+// call5 takes fn the C function and 5 arguments and calls the function with those arguments
+func call5(fn, a1, a2, a3, a4, a5 uintptr) uintptr
+
+func malloc(size uintptr) unsafe.Pointer {
+ ret := call5(mallocABI0, uintptr(size), 0, 0, 0, 0)
+ // this indirection is to avoid go vet complaining about possible misuse of unsafe.Pointer
+ return *(*unsafe.Pointer)(unsafe.Pointer(&ret))
+}
+
+func free(ptr unsafe.Pointer) {
+ call5(freeABI0, uintptr(ptr), 0, 0, 0, 0)
+}
+
+func setenv(name *byte, value *byte, overwrite int32) int32 {
+ return int32(call5(setenvABI0, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)), uintptr(overwrite), 0, 0))
+}
+
+func unsetenv(name *byte) int32 {
+ return int32(call5(unsetenvABI0, uintptr(unsafe.Pointer(name)), 0, 0, 0, 0))
+}
+
+func sigfillset(set *sigset_t) int32 {
+ return int32(call5(sigfillsetABI0, uintptr(unsafe.Pointer(set)), 0, 0, 0, 0))
+}
+
+func nanosleep(ts *syscall.Timespec, rem *syscall.Timespec) int32 {
+ return int32(call5(nanosleepABI0, uintptr(unsafe.Pointer(ts)), uintptr(unsafe.Pointer(rem)), 0, 0, 0))
+}
+
+func abort() {
+ call5(abortABI0, 0, 0, 0, 0, 0)
+}
+
+func pthread_attr_init(attr *pthread_attr_t) int32 {
+ return int32(call5(pthread_attr_initABI0, uintptr(unsafe.Pointer(attr)), 0, 0, 0, 0))
+}
+
+func pthread_create(thread *pthread_t, attr *pthread_attr_t, start unsafe.Pointer, arg unsafe.Pointer) int32 {
+ return int32(call5(pthread_createABI0, uintptr(unsafe.Pointer(thread)), uintptr(unsafe.Pointer(attr)), uintptr(start), uintptr(arg), 0))
+}
+
+func pthread_detach(thread pthread_t) int32 {
+ return int32(call5(pthread_detachABI0, uintptr(thread), 0, 0, 0, 0))
+}
+
+func pthread_sigmask(how sighow, ign *sigset_t, oset *sigset_t) int32 {
+ return int32(call5(pthread_sigmaskABI0, uintptr(how), uintptr(unsafe.Pointer(ign)), uintptr(unsafe.Pointer(oset)), 0, 0))
+}
+
+func pthread_self() pthread_t {
+ return pthread_t(call5(pthread_selfABI0, 0, 0, 0, 0, 0))
+}
+
+func pthread_get_stacksize_np(thread pthread_t) size_t {
+ return size_t(call5(pthread_get_stacksize_npABI0, uintptr(thread), 0, 0, 0, 0))
+}
+
+func pthread_attr_getstacksize(attr *pthread_attr_t, stacksize *size_t) int32 {
+ return int32(call5(pthread_attr_getstacksizeABI0, uintptr(unsafe.Pointer(attr)), uintptr(unsafe.Pointer(stacksize)), 0, 0, 0))
+}
+
+func pthread_attr_setstacksize(attr *pthread_attr_t, size size_t) int32 {
+ return int32(call5(pthread_attr_setstacksizeABI0, uintptr(unsafe.Pointer(attr)), uintptr(size), 0, 0, 0))
+}
+
+func pthread_attr_destroy(attr *pthread_attr_t) int32 {
+ return int32(call5(pthread_attr_destroyABI0, uintptr(unsafe.Pointer(attr)), 0, 0, 0, 0))
+}
+
+func pthread_mutex_lock(mutex *pthread_mutex_t) int32 {
+ return int32(call5(pthread_mutex_lockABI0, uintptr(unsafe.Pointer(mutex)), 0, 0, 0, 0))
+}
+
+func pthread_mutex_unlock(mutex *pthread_mutex_t) int32 {
+ return int32(call5(pthread_mutex_unlockABI0, uintptr(unsafe.Pointer(mutex)), 0, 0, 0, 0))
+}
+
+func pthread_cond_broadcast(cond *pthread_cond_t) int32 {
+ return int32(call5(pthread_cond_broadcastABI0, uintptr(unsafe.Pointer(cond)), 0, 0, 0, 0))
+}
+
+func pthread_setspecific(key pthread_key_t, value unsafe.Pointer) int32 {
+ return int32(call5(pthread_setspecificABI0, uintptr(key), uintptr(value), 0, 0, 0))
+}
+
+//go:linkname _malloc _malloc
+var _malloc uintptr
+var mallocABI0 = uintptr(unsafe.Pointer(&_malloc))
+
+//go:linkname _free _free
+var _free uintptr
+var freeABI0 = uintptr(unsafe.Pointer(&_free))
+
+//go:linkname _setenv _setenv
+var _setenv uintptr
+var setenvABI0 = uintptr(unsafe.Pointer(&_setenv))
+
+//go:linkname _unsetenv _unsetenv
+var _unsetenv uintptr
+var unsetenvABI0 = uintptr(unsafe.Pointer(&_unsetenv))
+
+//go:linkname _sigfillset _sigfillset
+var _sigfillset uintptr
+var sigfillsetABI0 = uintptr(unsafe.Pointer(&_sigfillset))
+
+//go:linkname _nanosleep _nanosleep
+var _nanosleep uintptr
+var nanosleepABI0 = uintptr(unsafe.Pointer(&_nanosleep))
+
+//go:linkname _abort _abort
+var _abort uintptr
+var abortABI0 = uintptr(unsafe.Pointer(&_abort))
+
+//go:linkname _pthread_attr_init _pthread_attr_init
+var _pthread_attr_init uintptr
+var pthread_attr_initABI0 = uintptr(unsafe.Pointer(&_pthread_attr_init))
+
+//go:linkname _pthread_create _pthread_create
+var _pthread_create uintptr
+var pthread_createABI0 = uintptr(unsafe.Pointer(&_pthread_create))
+
+//go:linkname _pthread_detach _pthread_detach
+var _pthread_detach uintptr
+var pthread_detachABI0 = uintptr(unsafe.Pointer(&_pthread_detach))
+
+//go:linkname _pthread_sigmask _pthread_sigmask
+var _pthread_sigmask uintptr
+var pthread_sigmaskABI0 = uintptr(unsafe.Pointer(&_pthread_sigmask))
+
+//go:linkname _pthread_self _pthread_self
+var _pthread_self uintptr
+var pthread_selfABI0 = uintptr(unsafe.Pointer(&_pthread_self))
+
+//go:linkname _pthread_get_stacksize_np _pthread_get_stacksize_np
+var _pthread_get_stacksize_np uintptr
+var pthread_get_stacksize_npABI0 = uintptr(unsafe.Pointer(&_pthread_get_stacksize_np))
+
+//go:linkname _pthread_attr_getstacksize _pthread_attr_getstacksize
+var _pthread_attr_getstacksize uintptr
+var pthread_attr_getstacksizeABI0 = uintptr(unsafe.Pointer(&_pthread_attr_getstacksize))
+
+//go:linkname _pthread_attr_setstacksize _pthread_attr_setstacksize
+var _pthread_attr_setstacksize uintptr
+var pthread_attr_setstacksizeABI0 = uintptr(unsafe.Pointer(&_pthread_attr_setstacksize))
+
+//go:linkname _pthread_attr_destroy _pthread_attr_destroy
+var _pthread_attr_destroy uintptr
+var pthread_attr_destroyABI0 = uintptr(unsafe.Pointer(&_pthread_attr_destroy))
+
+//go:linkname _pthread_mutex_lock _pthread_mutex_lock
+var _pthread_mutex_lock uintptr
+var pthread_mutex_lockABI0 = uintptr(unsafe.Pointer(&_pthread_mutex_lock))
+
+//go:linkname _pthread_mutex_unlock _pthread_mutex_unlock
+var _pthread_mutex_unlock uintptr
+var pthread_mutex_unlockABI0 = uintptr(unsafe.Pointer(&_pthread_mutex_unlock))
+
+//go:linkname _pthread_cond_broadcast _pthread_cond_broadcast
+var _pthread_cond_broadcast uintptr
+var pthread_cond_broadcastABI0 = uintptr(unsafe.Pointer(&_pthread_cond_broadcast))
+
+//go:linkname _pthread_setspecific _pthread_setspecific
+var _pthread_setspecific uintptr
+var pthread_setspecificABI0 = uintptr(unsafe.Pointer(&_pthread_setspecific))
diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_darwin.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_darwin.go
new file mode 100644
index 0000000000000..54aaa46285c86
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_darwin.go
@@ -0,0 +1,29 @@
+// Code generated by 'go generate' with gen.go. DO NOT EDIT.
+
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+//go:build !cgo
+
+package fakecgo
+
+//go:cgo_import_dynamic purego_malloc malloc "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic purego_free free "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic purego_setenv setenv "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic purego_unsetenv unsetenv "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic purego_sigfillset sigfillset "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic purego_nanosleep nanosleep "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic purego_abort abort "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic purego_pthread_attr_init pthread_attr_init "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic purego_pthread_create pthread_create "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic purego_pthread_detach pthread_detach "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic purego_pthread_sigmask pthread_sigmask "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic purego_pthread_self pthread_self "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic purego_pthread_get_stacksize_np pthread_get_stacksize_np "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic purego_pthread_attr_getstacksize pthread_attr_getstacksize "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic purego_pthread_attr_setstacksize pthread_attr_setstacksize "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic purego_pthread_attr_destroy pthread_attr_destroy "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic purego_pthread_mutex_lock pthread_mutex_lock "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic purego_pthread_mutex_unlock pthread_mutex_unlock "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic purego_pthread_cond_broadcast pthread_cond_broadcast "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic purego_pthread_setspecific pthread_setspecific "/usr/lib/libSystem.B.dylib"
diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_freebsd.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_freebsd.go
new file mode 100644
index 0000000000000..81538119799f5
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_freebsd.go
@@ -0,0 +1,29 @@
+// Code generated by 'go generate' with gen.go. DO NOT EDIT.
+
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+//go:build !cgo
+
+package fakecgo
+
+//go:cgo_import_dynamic purego_malloc malloc "libc.so.7"
+//go:cgo_import_dynamic purego_free free "libc.so.7"
+//go:cgo_import_dynamic purego_setenv setenv "libc.so.7"
+//go:cgo_import_dynamic purego_unsetenv unsetenv "libc.so.7"
+//go:cgo_import_dynamic purego_sigfillset sigfillset "libc.so.7"
+//go:cgo_import_dynamic purego_nanosleep nanosleep "libc.so.7"
+//go:cgo_import_dynamic purego_abort abort "libc.so.7"
+//go:cgo_import_dynamic purego_pthread_attr_init pthread_attr_init "libpthread.so"
+//go:cgo_import_dynamic purego_pthread_create pthread_create "libpthread.so"
+//go:cgo_import_dynamic purego_pthread_detach pthread_detach "libpthread.so"
+//go:cgo_import_dynamic purego_pthread_sigmask pthread_sigmask "libpthread.so"
+//go:cgo_import_dynamic purego_pthread_self pthread_self "libpthread.so"
+//go:cgo_import_dynamic purego_pthread_get_stacksize_np pthread_get_stacksize_np "libpthread.so"
+//go:cgo_import_dynamic purego_pthread_attr_getstacksize pthread_attr_getstacksize "libpthread.so"
+//go:cgo_import_dynamic purego_pthread_attr_setstacksize pthread_attr_setstacksize "libpthread.so"
+//go:cgo_import_dynamic purego_pthread_attr_destroy pthread_attr_destroy "libpthread.so"
+//go:cgo_import_dynamic purego_pthread_mutex_lock pthread_mutex_lock "libpthread.so"
+//go:cgo_import_dynamic purego_pthread_mutex_unlock pthread_mutex_unlock "libpthread.so"
+//go:cgo_import_dynamic purego_pthread_cond_broadcast pthread_cond_broadcast "libpthread.so"
+//go:cgo_import_dynamic purego_pthread_setspecific pthread_setspecific "libpthread.so"
diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_linux.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_linux.go
new file mode 100644
index 0000000000000..180057d0156d8
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_linux.go
@@ -0,0 +1,29 @@
+// Code generated by 'go generate' with gen.go. DO NOT EDIT.
+
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+//go:build !cgo
+
+package fakecgo
+
+//go:cgo_import_dynamic purego_malloc malloc "libc.so.6"
+//go:cgo_import_dynamic purego_free free "libc.so.6"
+//go:cgo_import_dynamic purego_setenv setenv "libc.so.6"
+//go:cgo_import_dynamic purego_unsetenv unsetenv "libc.so.6"
+//go:cgo_import_dynamic purego_sigfillset sigfillset "libc.so.6"
+//go:cgo_import_dynamic purego_nanosleep nanosleep "libc.so.6"
+//go:cgo_import_dynamic purego_abort abort "libc.so.6"
+//go:cgo_import_dynamic purego_pthread_attr_init pthread_attr_init "libpthread.so.0"
+//go:cgo_import_dynamic purego_pthread_create pthread_create "libpthread.so.0"
+//go:cgo_import_dynamic purego_pthread_detach pthread_detach "libpthread.so.0"
+//go:cgo_import_dynamic purego_pthread_sigmask pthread_sigmask "libpthread.so.0"
+//go:cgo_import_dynamic purego_pthread_self pthread_self "libpthread.so.0"
+//go:cgo_import_dynamic purego_pthread_get_stacksize_np pthread_get_stacksize_np "libpthread.so.0"
+//go:cgo_import_dynamic purego_pthread_attr_getstacksize pthread_attr_getstacksize "libpthread.so.0"
+//go:cgo_import_dynamic purego_pthread_attr_setstacksize pthread_attr_setstacksize "libpthread.so.0"
+//go:cgo_import_dynamic purego_pthread_attr_destroy pthread_attr_destroy "libpthread.so.0"
+//go:cgo_import_dynamic purego_pthread_mutex_lock pthread_mutex_lock "libpthread.so.0"
+//go:cgo_import_dynamic purego_pthread_mutex_unlock pthread_mutex_unlock "libpthread.so.0"
+//go:cgo_import_dynamic purego_pthread_cond_broadcast pthread_cond_broadcast "libpthread.so.0"
+//go:cgo_import_dynamic purego_pthread_setspecific pthread_setspecific "libpthread.so.0"
diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_amd64.s b/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_amd64.s
new file mode 100644
index 0000000000000..c9a3cc09eb318
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_amd64.s
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+//go:build !cgo && (darwin || linux || freebsd)
+
+/*
+trampoline for emulating required C functions for cgo in go (see cgo.go)
+(we convert cdecl calling convention to go and vice-versa)
+
+Since we're called from go and call into C we can cheat a bit with the calling conventions:
+ - in go all the registers are caller saved
+ - in C we have a couple of callee saved registers
+
+=> we can use BX, R12, R13, R14, R15 instead of the stack
+
+C Calling convention cdecl used here (we only need integer args):
+1. arg: DI
+2. arg: SI
+3. arg: DX
+4. arg: CX
+5. arg: R8
+6. arg: R9
+We don't need floats with these functions -> AX=0
+return value will be in AX
+*/
+#include "textflag.h"
+#include "go_asm.h"
+
+// these trampolines map the gcc ABI to Go ABI and then calls into the Go equivalent functions.
+
+TEXT x_cgo_init_trampoline(SB), NOSPLIT, $16
+ MOVQ DI, AX
+ MOVQ SI, BX
+ MOVQ ·x_cgo_init_call(SB), DX
+ MOVQ (DX), CX
+ CALL CX
+ RET
+
+TEXT x_cgo_thread_start_trampoline(SB), NOSPLIT, $8
+ MOVQ DI, AX
+ MOVQ ·x_cgo_thread_start_call(SB), DX
+ MOVQ (DX), CX
+ CALL CX
+ RET
+
+TEXT x_cgo_setenv_trampoline(SB), NOSPLIT, $8
+ MOVQ DI, AX
+ MOVQ ·x_cgo_setenv_call(SB), DX
+ MOVQ (DX), CX
+ CALL CX
+ RET
+
+TEXT x_cgo_unsetenv_trampoline(SB), NOSPLIT, $8
+ MOVQ DI, AX
+ MOVQ ·x_cgo_unsetenv_call(SB), DX
+ MOVQ (DX), CX
+ CALL CX
+ RET
+
+TEXT x_cgo_notify_runtime_init_done_trampoline(SB), NOSPLIT, $0
+ CALL ·x_cgo_notify_runtime_init_done(SB)
+ RET
+
+TEXT x_cgo_bindm_trampoline(SB), NOSPLIT, $0
+ CALL ·x_cgo_bindm(SB)
+ RET
+
+// func setg_trampoline(setg uintptr, g uintptr)
+TEXT ·setg_trampoline(SB), NOSPLIT, $0-16
+ MOVQ G+8(FP), DI
+ MOVQ setg+0(FP), BX
+ XORL AX, AX
+ CALL BX
+ RET
+
+TEXT threadentry_trampoline(SB), NOSPLIT, $16
+ MOVQ DI, AX
+ MOVQ ·threadentry_call(SB), DX
+ MOVQ (DX), CX
+ CALL CX
+ RET
+
+TEXT ·call5(SB), NOSPLIT, $0-56
+ MOVQ fn+0(FP), BX
+ MOVQ a1+8(FP), DI
+ MOVQ a2+16(FP), SI
+ MOVQ a3+24(FP), DX
+ MOVQ a4+32(FP), CX
+ MOVQ a5+40(FP), R8
+
+ XORL AX, AX // no floats
+
+ PUSHQ BP // save BP
+ MOVQ SP, BP // save SP inside BP bc BP is callee-saved
+ SUBQ $16, SP // allocate space for alignment
+ ANDQ $-16, SP // align on 16 bytes for SSE
+
+ CALL BX
+
+ MOVQ BP, SP // get SP back
+ POPQ BP // restore BP
+
+ MOVQ AX, ret+48(FP)
+ RET
diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_arm64.s b/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_arm64.s
new file mode 100644
index 0000000000000..9dbdbc0139db4
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_arm64.s
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+//go:build !cgo && (darwin || freebsd || linux)
+
+#include "textflag.h"
+#include "go_asm.h"
+
+// these trampolines map the gcc ABI to Go ABI and then calls into the Go equivalent functions.
+
+TEXT x_cgo_init_trampoline(SB), NOSPLIT, $0-0
+ MOVD R0, 8(RSP)
+ MOVD R1, 16(RSP)
+ MOVD ·x_cgo_init_call(SB), R26
+ MOVD (R26), R2
+ CALL (R2)
+ RET
+
+TEXT x_cgo_thread_start_trampoline(SB), NOSPLIT, $0-0
+ MOVD R0, 8(RSP)
+ MOVD ·x_cgo_thread_start_call(SB), R26
+ MOVD (R26), R2
+ CALL (R2)
+ RET
+
+TEXT x_cgo_setenv_trampoline(SB), NOSPLIT, $0-0
+ MOVD R0, 8(RSP)
+ MOVD ·x_cgo_setenv_call(SB), R26
+ MOVD (R26), R2
+ CALL (R2)
+ RET
+
+TEXT x_cgo_unsetenv_trampoline(SB), NOSPLIT, $0-0
+ MOVD R0, 8(RSP)
+ MOVD ·x_cgo_unsetenv_call(SB), R26
+ MOVD (R26), R2
+ CALL (R2)
+ RET
+
+TEXT x_cgo_notify_runtime_init_done_trampoline(SB), NOSPLIT, $0-0
+ CALL ·x_cgo_notify_runtime_init_done(SB)
+ RET
+
+TEXT x_cgo_bindm_trampoline(SB), NOSPLIT, $0
+ CALL ·x_cgo_bindm(SB)
+ RET
+
+// func setg_trampoline(setg uintptr, g uintptr)
+TEXT ·setg_trampoline(SB), NOSPLIT, $0-16
+ MOVD G+8(FP), R0
+ MOVD setg+0(FP), R1
+ CALL R1
+ RET
+
+TEXT threadentry_trampoline(SB), NOSPLIT, $0-0
+ MOVD R0, 8(RSP)
+ MOVD ·threadentry_call(SB), R26
+ MOVD (R26), R2
+ CALL (R2)
+ MOVD $0, R0 // TODO: get the return value from threadentry
+ RET
+
+TEXT ·call5(SB), NOSPLIT, $0-0
+ MOVD fn+0(FP), R6
+ MOVD a1+8(FP), R0
+ MOVD a2+16(FP), R1
+ MOVD a3+24(FP), R2
+ MOVD a4+32(FP), R3
+ MOVD a5+40(FP), R4
+ CALL R6
+ MOVD R0, ret+48(FP)
+ RET
diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_stubs.s b/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_stubs.s
new file mode 100644
index 0000000000000..a65b2012c1b40
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_stubs.s
@@ -0,0 +1,90 @@
+// Code generated by 'go generate' with gen.go. DO NOT EDIT.
+
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+//go:build !cgo && (darwin || freebsd || linux)
+
+#include "textflag.h"
+
+// these stubs are here because it is not possible to go:linkname directly the C functions on darwin arm64
+
+TEXT _malloc(SB), NOSPLIT|NOFRAME, $0-0
+ JMP purego_malloc(SB)
+ RET
+
+TEXT _free(SB), NOSPLIT|NOFRAME, $0-0
+ JMP purego_free(SB)
+ RET
+
+TEXT _setenv(SB), NOSPLIT|NOFRAME, $0-0
+ JMP purego_setenv(SB)
+ RET
+
+TEXT _unsetenv(SB), NOSPLIT|NOFRAME, $0-0
+ JMP purego_unsetenv(SB)
+ RET
+
+TEXT _sigfillset(SB), NOSPLIT|NOFRAME, $0-0
+ JMP purego_sigfillset(SB)
+ RET
+
+TEXT _nanosleep(SB), NOSPLIT|NOFRAME, $0-0
+ JMP purego_nanosleep(SB)
+ RET
+
+TEXT _abort(SB), NOSPLIT|NOFRAME, $0-0
+ JMP purego_abort(SB)
+ RET
+
+TEXT _pthread_attr_init(SB), NOSPLIT|NOFRAME, $0-0
+ JMP purego_pthread_attr_init(SB)
+ RET
+
+TEXT _pthread_create(SB), NOSPLIT|NOFRAME, $0-0
+ JMP purego_pthread_create(SB)
+ RET
+
+TEXT _pthread_detach(SB), NOSPLIT|NOFRAME, $0-0
+ JMP purego_pthread_detach(SB)
+ RET
+
+TEXT _pthread_sigmask(SB), NOSPLIT|NOFRAME, $0-0
+ JMP purego_pthread_sigmask(SB)
+ RET
+
+TEXT _pthread_self(SB), NOSPLIT|NOFRAME, $0-0
+ JMP purego_pthread_self(SB)
+ RET
+
+TEXT _pthread_get_stacksize_np(SB), NOSPLIT|NOFRAME, $0-0
+ JMP purego_pthread_get_stacksize_np(SB)
+ RET
+
+TEXT _pthread_attr_getstacksize(SB), NOSPLIT|NOFRAME, $0-0
+ JMP purego_pthread_attr_getstacksize(SB)
+ RET
+
+TEXT _pthread_attr_setstacksize(SB), NOSPLIT|NOFRAME, $0-0
+ JMP purego_pthread_attr_setstacksize(SB)
+ RET
+
+TEXT _pthread_attr_destroy(SB), NOSPLIT|NOFRAME, $0-0
+ JMP purego_pthread_attr_destroy(SB)
+ RET
+
+TEXT _pthread_mutex_lock(SB), NOSPLIT|NOFRAME, $0-0
+ JMP purego_pthread_mutex_lock(SB)
+ RET
+
+TEXT _pthread_mutex_unlock(SB), NOSPLIT|NOFRAME, $0-0
+ JMP purego_pthread_mutex_unlock(SB)
+ RET
+
+TEXT _pthread_cond_broadcast(SB), NOSPLIT|NOFRAME, $0-0
+ JMP purego_pthread_cond_broadcast(SB)
+ RET
+
+TEXT _pthread_setspecific(SB), NOSPLIT|NOFRAME, $0-0
+ JMP purego_pthread_setspecific(SB)
+ RET
diff --git a/vendor/github.com/ebitengine/purego/internal/strings/strings.go b/vendor/github.com/ebitengine/purego/internal/strings/strings.go
new file mode 100644
index 0000000000000..5b0d252255477
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/internal/strings/strings.go
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+package strings
+
+import (
+ "unsafe"
+)
+
+// hasSuffix tests whether the string s ends with suffix.
+func hasSuffix(s, suffix string) bool {
+ return len(s) >= len(suffix) && s[len(s)-len(suffix):] == suffix
+}
+
+// CString converts a go string to *byte that can be passed to C code.
+func CString(name string) *byte {
+ if hasSuffix(name, "\x00") {
+ return &(*(*[]byte)(unsafe.Pointer(&name)))[0]
+ }
+ b := make([]byte, len(name)+1)
+ copy(b, name)
+ return &b[0]
+}
+
+// GoString copies a null-terminated char* to a Go string.
+func GoString(c uintptr) string {
+ // We take the address and then dereference it to trick go vet from creating a possible misuse of unsafe.Pointer
+ ptr := *(*unsafe.Pointer)(unsafe.Pointer(&c))
+ if ptr == nil {
+ return ""
+ }
+ var length int
+ for {
+ if *(*byte)(unsafe.Add(ptr, uintptr(length))) == '\x00' {
+ break
+ }
+ length++
+ }
+ return string(unsafe.Slice((*byte)(ptr), length))
+}
diff --git a/vendor/github.com/ebitengine/purego/is_ios.go b/vendor/github.com/ebitengine/purego/is_ios.go
new file mode 100644
index 0000000000000..ed31da9782401
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/is_ios.go
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+//go:build !cgo
+
+package purego
+
+// if you are getting this error it means that you have
+// CGO_ENABLED=0 while trying to build for ios.
+// purego does not support this mode yet.
+// the fix is to set CGO_ENABLED=1 which will require
+// a C compiler.
+var _ = _PUREGO_REQUIRES_CGO_ON_IOS
diff --git a/vendor/github.com/ebitengine/purego/nocgo.go b/vendor/github.com/ebitengine/purego/nocgo.go
new file mode 100644
index 0000000000000..5b989ea814e70
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/nocgo.go
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+//go:build !cgo && (darwin || freebsd || linux)
+
+package purego
+
+// if CGO_ENABLED=0 import fakecgo to setup the Cgo runtime correctly.
+// This is required since some frameworks need TLS setup the C way which Go doesn't do.
+// We currently don't support ios in fakecgo mode so force Cgo or fail
+//
+// The way that the Cgo runtime (runtime/cgo) works is by setting some variables found
+// in runtime with non-null GCC compiled functions. The variables that are replaced are
+// var (
+// iscgo bool // in runtime/cgo.go
+// _cgo_init unsafe.Pointer // in runtime/cgo.go
+// _cgo_thread_start unsafe.Pointer // in runtime/cgo.go
+// _cgo_notify_runtime_init_done unsafe.Pointer // in runtime/cgo.go
+// _cgo_setenv unsafe.Pointer // in runtime/env_posix.go
+// _cgo_unsetenv unsafe.Pointer // in runtime/env_posix.go
+// )
+// importing fakecgo will set these (using //go:linkname) with functions written
+// entirely in Go (except for some assembly trampolines to change GCC ABI to Go ABI).
+// Doing so makes it possible to build applications that call into C without CGO_ENABLED=1.
+import _ "github.com/ebitengine/purego/internal/fakecgo"
diff --git a/vendor/github.com/ebitengine/purego/struct_amd64.go b/vendor/github.com/ebitengine/purego/struct_amd64.go
new file mode 100644
index 0000000000000..06a82dd8c5b4b
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/struct_amd64.go
@@ -0,0 +1,272 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2024 The Ebitengine Authors
+
+package purego
+
+import (
+ "math"
+ "reflect"
+ "unsafe"
+)
+
+func getStruct(outType reflect.Type, syscall syscall15Args) (v reflect.Value) {
+ outSize := outType.Size()
+ switch {
+ case outSize == 0:
+ return reflect.New(outType).Elem()
+ case outSize <= 8:
+ if isAllFloats(outType) {
+ // 2 float32s or 1 float64s are return in the float register
+ return reflect.NewAt(outType, unsafe.Pointer(&struct{ a uintptr }{syscall.f1})).Elem()
+ }
+ // up to 8 bytes is returned in RAX
+ return reflect.NewAt(outType, unsafe.Pointer(&struct{ a uintptr }{syscall.a1})).Elem()
+ case outSize <= 16:
+ r1, r2 := syscall.a1, syscall.a2
+ if isAllFloats(outType) {
+ r1 = syscall.f1
+ r2 = syscall.f2
+ } else {
+ // check first 8 bytes if it's floats
+ hasFirstFloat := false
+ f1 := outType.Field(0).Type
+ if f1.Kind() == reflect.Float64 || f1.Kind() == reflect.Float32 && outType.Field(1).Type.Kind() == reflect.Float32 {
+ r1 = syscall.f1
+ hasFirstFloat = true
+ }
+
+ // find index of the field that starts the second 8 bytes
+ var i int
+ for i = 0; i < outType.NumField(); i++ {
+ if outType.Field(i).Offset == 8 {
+ break
+ }
+ }
+
+ // check last 8 bytes if they are floats
+ f1 = outType.Field(i).Type
+ if f1.Kind() == reflect.Float64 || f1.Kind() == reflect.Float32 && i+1 == outType.NumField() {
+ r2 = syscall.f1
+ } else if hasFirstFloat {
+ // if the first field was a float then that means the second integer field
+ // comes from the first integer register
+ r2 = syscall.a1
+ }
+ }
+ return reflect.NewAt(outType, unsafe.Pointer(&struct{ a, b uintptr }{r1, r2})).Elem()
+ default:
+ // create struct from the Go pointer created above
+ // weird pointer dereference to circumvent go vet
+ return reflect.NewAt(outType, *(*unsafe.Pointer)(unsafe.Pointer(&syscall.a1))).Elem()
+ }
+}
+
+func isAllFloats(ty reflect.Type) bool {
+ for i := 0; i < ty.NumField(); i++ {
+ f := ty.Field(i)
+ switch f.Type.Kind() {
+ case reflect.Float64, reflect.Float32:
+ default:
+ return false
+ }
+ }
+ return true
+}
+
+// https://refspecs.linuxbase.org/elf/x86_64-abi-0.99.pdf
+// https://gitlab.com/x86-psABIs/x86-64-ABI
+// Class determines where the 8 byte value goes.
+// Higher value classes win over lower value classes
+const (
+ _NO_CLASS = 0b0000
+ _SSE = 0b0001
+ _X87 = 0b0011 // long double not used in Go
+ _INTEGER = 0b0111
+ _MEMORY = 0b1111
+)
+
+func addStruct(v reflect.Value, numInts, numFloats, numStack *int, addInt, addFloat, addStack func(uintptr), keepAlive []interface{}) []interface{} {
+ if v.Type().Size() == 0 {
+ return keepAlive
+ }
+
+ // if greater than 64 bytes place on stack
+ if v.Type().Size() > 8*8 {
+ placeStack(v, addStack)
+ return keepAlive
+ }
+ var (
+ savedNumFloats = *numFloats
+ savedNumInts = *numInts
+ savedNumStack = *numStack
+ )
+ placeOnStack := postMerger(v.Type()) || !tryPlaceRegister(v, addFloat, addInt)
+ if placeOnStack {
+ // reset any values placed in registers
+ *numFloats = savedNumFloats
+ *numInts = savedNumInts
+ *numStack = savedNumStack
+ placeStack(v, addStack)
+ }
+ return keepAlive
+}
+
+func postMerger(t reflect.Type) bool {
+ // (c) If the size of the aggregate exceeds two eightbytes and the first eight- byte isn’t SSE or any other
+ // eightbyte isn’t SSEUP, the whole argument is passed in memory.
+ if t.Kind() != reflect.Struct {
+ return false
+ }
+ if t.Size() <= 2*8 {
+ return false
+ }
+ first := getFirst(t).Kind()
+ if first != reflect.Float32 && first != reflect.Float64 {
+ return false
+ }
+ return true
+}
+
+func getFirst(t reflect.Type) reflect.Type {
+ first := t.Field(0).Type
+ if first.Kind() == reflect.Struct {
+ return getFirst(first)
+ }
+ return first
+}
+
+func tryPlaceRegister(v reflect.Value, addFloat func(uintptr), addInt func(uintptr)) (ok bool) {
+ ok = true
+ var val uint64
+ var shift byte // # of bits to shift
+ var flushed bool
+ class := _NO_CLASS
+ flushIfNeeded := func() {
+ if flushed {
+ return
+ }
+ flushed = true
+ if class == _SSE {
+ addFloat(uintptr(val))
+ } else {
+ addInt(uintptr(val))
+ }
+ val = 0
+ shift = 0
+ class = _NO_CLASS
+ }
+ var place func(v reflect.Value)
+ place = func(v reflect.Value) {
+ var numFields int
+ if v.Kind() == reflect.Struct {
+ numFields = v.Type().NumField()
+ } else {
+ numFields = v.Type().Len()
+ }
+
+ for i := 0; i < numFields; i++ {
+ flushed = false
+ var f reflect.Value
+ if v.Kind() == reflect.Struct {
+ f = v.Field(i)
+ } else {
+ f = v.Index(i)
+ }
+ switch f.Kind() {
+ case reflect.Struct:
+ place(f)
+ case reflect.Bool:
+ if f.Bool() {
+ val |= 1
+ }
+ shift += 8
+ class |= _INTEGER
+ case reflect.Pointer:
+ ok = false
+ return
+ case reflect.Int8:
+ val |= uint64(f.Int()&0xFF) << shift
+ shift += 8
+ class |= _INTEGER
+ case reflect.Int16:
+ val |= uint64(f.Int()&0xFFFF) << shift
+ shift += 16
+ class |= _INTEGER
+ case reflect.Int32:
+ val |= uint64(f.Int()&0xFFFF_FFFF) << shift
+ shift += 32
+ class |= _INTEGER
+ case reflect.Int64:
+ val = uint64(f.Int())
+ shift = 64
+ class = _INTEGER
+ case reflect.Uint8:
+ val |= f.Uint() << shift
+ shift += 8
+ class |= _INTEGER
+ case reflect.Uint16:
+ val |= f.Uint() << shift
+ shift += 16
+ class |= _INTEGER
+ case reflect.Uint32:
+ val |= f.Uint() << shift
+ shift += 32
+ class |= _INTEGER
+ case reflect.Uint64:
+ val = f.Uint()
+ shift = 64
+ class = _INTEGER
+ case reflect.Float32:
+ val |= uint64(math.Float32bits(float32(f.Float()))) << shift
+ shift += 32
+ class |= _SSE
+ case reflect.Float64:
+ if v.Type().Size() > 16 {
+ ok = false
+ return
+ }
+ val = uint64(math.Float64bits(f.Float()))
+ shift = 64
+ class = _SSE
+ case reflect.Array:
+ place(f)
+ default:
+ panic("purego: unsupported kind " + f.Kind().String())
+ }
+
+ if shift == 64 {
+ flushIfNeeded()
+ } else if shift > 64 {
+ // Should never happen, but may if we forget to reset shift after flush (or forget to flush),
+ // better fall apart here, than corrupt arguments.
+ panic("purego: tryPlaceRegisters shift > 64")
+ }
+ }
+ }
+
+ place(v)
+ flushIfNeeded()
+ return ok
+}
+
+func placeStack(v reflect.Value, addStack func(uintptr)) {
+ for i := 0; i < v.Type().NumField(); i++ {
+ f := v.Field(i)
+ switch f.Kind() {
+ case reflect.Pointer:
+ addStack(f.Pointer())
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ addStack(uintptr(f.Int()))
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ addStack(uintptr(f.Uint()))
+ case reflect.Float32:
+ addStack(uintptr(math.Float32bits(float32(f.Float()))))
+ case reflect.Float64:
+ addStack(uintptr(math.Float64bits(f.Float())))
+ case reflect.Struct:
+ placeStack(f, addStack)
+ default:
+ panic("purego: unsupported kind " + f.Kind().String())
+ }
+ }
+}
diff --git a/vendor/github.com/ebitengine/purego/struct_arm64.go b/vendor/github.com/ebitengine/purego/struct_arm64.go
new file mode 100644
index 0000000000000..11c36bd6e47b9
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/struct_arm64.go
@@ -0,0 +1,274 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2024 The Ebitengine Authors
+
+package purego
+
+import (
+ "math"
+ "reflect"
+ "unsafe"
+)
+
+func getStruct(outType reflect.Type, syscall syscall15Args) (v reflect.Value) {
+ outSize := outType.Size()
+ switch {
+ case outSize == 0:
+ return reflect.New(outType).Elem()
+ case outSize <= 8:
+ r1 := syscall.a1
+ if isAllFloats, numFields := isAllSameFloat(outType); isAllFloats {
+ r1 = syscall.f1
+ if numFields == 2 {
+ r1 = syscall.f2<<32 | syscall.f1
+ }
+ }
+ return reflect.NewAt(outType, unsafe.Pointer(&struct{ a uintptr }{r1})).Elem()
+ case outSize <= 16:
+ r1, r2 := syscall.a1, syscall.a2
+ if isAllFloats, numFields := isAllSameFloat(outType); isAllFloats {
+ switch numFields {
+ case 4:
+ r1 = syscall.f2<<32 | syscall.f1
+ r2 = syscall.f4<<32 | syscall.f3
+ case 3:
+ r1 = syscall.f2<<32 | syscall.f1
+ r2 = syscall.f3
+ case 2:
+ r1 = syscall.f1
+ r2 = syscall.f2
+ default:
+ panic("unreachable")
+ }
+ }
+ return reflect.NewAt(outType, unsafe.Pointer(&struct{ a, b uintptr }{r1, r2})).Elem()
+ default:
+ if isAllFloats, numFields := isAllSameFloat(outType); isAllFloats && numFields <= 4 {
+ switch numFields {
+ case 4:
+ return reflect.NewAt(outType, unsafe.Pointer(&struct{ a, b, c, d uintptr }{syscall.f1, syscall.f2, syscall.f3, syscall.f4})).Elem()
+ case 3:
+ return reflect.NewAt(outType, unsafe.Pointer(&struct{ a, b, c uintptr }{syscall.f1, syscall.f2, syscall.f3})).Elem()
+ default:
+ panic("unreachable")
+ }
+ }
+ // create struct from the Go pointer created in arm64_r8
+ // weird pointer dereference to circumvent go vet
+ return reflect.NewAt(outType, *(*unsafe.Pointer)(unsafe.Pointer(&syscall.arm64_r8))).Elem()
+ }
+}
+
+// https://github.com/ARM-software/abi-aa/blob/main/sysvabi64/sysvabi64.rst
+const (
+ _NO_CLASS = 0b00
+ _FLOAT = 0b01
+ _INT = 0b11
+)
+
+func addStruct(v reflect.Value, numInts, numFloats, numStack *int, addInt, addFloat, addStack func(uintptr), keepAlive []interface{}) []interface{} {
+ if v.Type().Size() == 0 {
+ return keepAlive
+ }
+
+ if hva, hfa, size := isHVA(v.Type()), isHFA(v.Type()), v.Type().Size(); hva || hfa || size <= 16 {
+ // if this doesn't fit entirely in registers then
+ // each element goes onto the stack
+ if hfa && *numFloats+v.NumField() > numOfFloats {
+ *numFloats = numOfFloats
+ } else if hva && *numInts+v.NumField() > numOfIntegerRegisters() {
+ *numInts = numOfIntegerRegisters()
+ }
+
+ placeRegisters(v, addFloat, addInt)
+ } else {
+ keepAlive = placeStack(v, keepAlive, addInt)
+ }
+ return keepAlive // the struct was allocated so don't panic
+}
+
+func placeRegisters(v reflect.Value, addFloat func(uintptr), addInt func(uintptr)) {
+ var val uint64
+ var shift byte
+ var flushed bool
+ class := _NO_CLASS
+ var place func(v reflect.Value)
+ place = func(v reflect.Value) {
+ var numFields int
+ if v.Kind() == reflect.Struct {
+ numFields = v.Type().NumField()
+ } else {
+ numFields = v.Type().Len()
+ }
+ for k := 0; k < numFields; k++ {
+ flushed = false
+ var f reflect.Value
+ if v.Kind() == reflect.Struct {
+ f = v.Field(k)
+ } else {
+ f = v.Index(k)
+ }
+ if shift >= 64 {
+ shift = 0
+ flushed = true
+ if class == _FLOAT {
+ addFloat(uintptr(val))
+ } else {
+ addInt(uintptr(val))
+ }
+ }
+ switch f.Type().Kind() {
+ case reflect.Struct:
+ place(f)
+ case reflect.Bool:
+ if f.Bool() {
+ val |= 1
+ }
+ shift += 8
+ class |= _INT
+ case reflect.Uint8:
+ val |= f.Uint() << shift
+ shift += 8
+ class |= _INT
+ case reflect.Uint16:
+ val |= f.Uint() << shift
+ shift += 16
+ class |= _INT
+ case reflect.Uint32:
+ val |= f.Uint() << shift
+ shift += 32
+ class |= _INT
+ case reflect.Uint64:
+ addInt(uintptr(f.Uint()))
+ shift = 0
+ flushed = true
+ case reflect.Int8:
+ val |= uint64(f.Int()&0xFF) << shift
+ shift += 8
+ class |= _INT
+ case reflect.Int16:
+ val |= uint64(f.Int()&0xFFFF) << shift
+ shift += 16
+ class |= _INT
+ case reflect.Int32:
+ val |= uint64(f.Int()&0xFFFF_FFFF) << shift
+ shift += 32
+ class |= _INT
+ case reflect.Int64:
+ addInt(uintptr(f.Int()))
+ shift = 0
+ flushed = true
+ case reflect.Float32:
+ if class == _FLOAT {
+ addFloat(uintptr(val))
+ val = 0
+ shift = 0
+ }
+ val |= uint64(math.Float32bits(float32(f.Float()))) << shift
+ shift += 32
+ class |= _FLOAT
+ case reflect.Float64:
+ addFloat(uintptr(math.Float64bits(float64(f.Float()))))
+ shift = 0
+ flushed = true
+ case reflect.Array:
+ place(f)
+ default:
+ panic("purego: unsupported kind " + f.Kind().String())
+ }
+ }
+ }
+ place(v)
+ if !flushed {
+ if class == _FLOAT {
+ addFloat(uintptr(val))
+ } else {
+ addInt(uintptr(val))
+ }
+ }
+}
+
+func placeStack(v reflect.Value, keepAlive []interface{}, addInt func(uintptr)) []interface{} {
+ // Struct is too big to be placed in registers.
+ // Copy to heap and place the pointer in register
+ ptrStruct := reflect.New(v.Type())
+ ptrStruct.Elem().Set(v)
+ ptr := ptrStruct.Elem().Addr().UnsafePointer()
+ keepAlive = append(keepAlive, ptr)
+ addInt(uintptr(ptr))
+ return keepAlive
+}
+
+// isHFA reports a Homogeneous Floating-point Aggregate (HFA) which is a Fundamental Data Type that is a
+// Floating-Point type and at most four uniquely addressable members (5.9.5.1 in [Arm64 Calling Convention]).
+// This type of struct will be placed more compactly than the individual fields.
+//
+// [Arm64 Calling Convention]: https://github.com/ARM-software/abi-aa/blob/main/sysvabi64/sysvabi64.rst
+func isHFA(t reflect.Type) bool {
+ // round up struct size to nearest 8 see section B.4
+ structSize := roundUpTo8(t.Size())
+ if structSize == 0 || t.NumField() > 4 {
+ return false
+ }
+ first := t.Field(0)
+ switch first.Type.Kind() {
+ case reflect.Float32, reflect.Float64:
+ firstKind := first.Type.Kind()
+ for i := 0; i < t.NumField(); i++ {
+ if t.Field(i).Type.Kind() != firstKind {
+ return false
+ }
+ }
+ return true
+ case reflect.Array:
+ switch first.Type.Elem().Kind() {
+ case reflect.Float32, reflect.Float64:
+ return true
+ default:
+ return false
+ }
+ case reflect.Struct:
+ for i := 0; i < first.Type.NumField(); i++ {
+ if !isHFA(first.Type) {
+ return false
+ }
+ }
+ return true
+ default:
+ return false
+ }
+}
+
+// isHVA reports a Homogeneous Aggregate with a Fundamental Data Type that is a Short-Vector type
+// and at most four uniquely addressable members (5.9.5.2 in [Arm64 Calling Convention]).
+// A short vector is a machine type that is composed of repeated instances of one fundamental integral or
+// floating-point type. It may be 8 or 16 bytes in total size (5.4 in [Arm64 Calling Convention]).
+// This type of struct will be placed more compactly than the individual fields.
+//
+// [Arm64 Calling Convention]: https://github.com/ARM-software/abi-aa/blob/main/sysvabi64/sysvabi64.rst
+func isHVA(t reflect.Type) bool {
+ // round up struct size to nearest 8 see section B.4
+ structSize := roundUpTo8(t.Size())
+ if structSize == 0 || (structSize != 8 && structSize != 16) {
+ return false
+ }
+ first := t.Field(0)
+ switch first.Type.Kind() {
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Int8, reflect.Int16, reflect.Int32:
+ firstKind := first.Type.Kind()
+ for i := 0; i < t.NumField(); i++ {
+ if t.Field(i).Type.Kind() != firstKind {
+ return false
+ }
+ }
+ return true
+ case reflect.Array:
+ switch first.Type.Elem().Kind() {
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Int8, reflect.Int16, reflect.Int32:
+ return true
+ default:
+ return false
+ }
+ default:
+ return false
+ }
+}
diff --git a/vendor/github.com/ebitengine/purego/struct_other.go b/vendor/github.com/ebitengine/purego/struct_other.go
new file mode 100644
index 0000000000000..9d42adac898e5
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/struct_other.go
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2024 The Ebitengine Authors
+
+//go:build !amd64 && !arm64
+
+package purego
+
+import "reflect"
+
+func addStruct(v reflect.Value, numInts, numFloats, numStack *int, addInt, addFloat, addStack func(uintptr), keepAlive []interface{}) []interface{} {
+ panic("purego: struct arguments are not supported")
+}
+
+func getStruct(outType reflect.Type, syscall syscall15Args) (v reflect.Value) {
+ panic("purego: struct returns are not supported")
+}
diff --git a/vendor/github.com/ebitengine/purego/sys_amd64.s b/vendor/github.com/ebitengine/purego/sys_amd64.s
new file mode 100644
index 0000000000000..cabde1a584e98
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/sys_amd64.s
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+//go:build darwin || freebsd || linux
+
+#include "textflag.h"
+#include "abi_amd64.h"
+#include "go_asm.h"
+#include "funcdata.h"
+
+#define STACK_SIZE 80
+#define PTR_ADDRESS (STACK_SIZE - 8)
+
+// syscall15X calls a function in libc on behalf of the syscall package.
+// syscall15X takes a pointer to a struct like:
+// struct {
+// fn uintptr
+// a1 uintptr
+// a2 uintptr
+// a3 uintptr
+// a4 uintptr
+// a5 uintptr
+// a6 uintptr
+// a7 uintptr
+// a8 uintptr
+// a9 uintptr
+// a10 uintptr
+// a11 uintptr
+// a12 uintptr
+// a13 uintptr
+// a14 uintptr
+// a15 uintptr
+// r1 uintptr
+// r2 uintptr
+// err uintptr
+// }
+// syscall15X must be called on the g0 stack with the
+// C calling convention (use libcCall).
+GLOBL ·syscall15XABI0(SB), NOPTR|RODATA, $8
+DATA ·syscall15XABI0(SB)/8, $syscall15X(SB)
+TEXT syscall15X(SB), NOSPLIT|NOFRAME, $0
+ PUSHQ BP
+ MOVQ SP, BP
+ SUBQ $STACK_SIZE, SP
+ MOVQ DI, PTR_ADDRESS(BP) // save the pointer
+ MOVQ DI, R11
+
+ MOVQ syscall15Args_f1(R11), X0 // f1
+ MOVQ syscall15Args_f2(R11), X1 // f2
+ MOVQ syscall15Args_f3(R11), X2 // f3
+ MOVQ syscall15Args_f4(R11), X3 // f4
+ MOVQ syscall15Args_f5(R11), X4 // f5
+ MOVQ syscall15Args_f6(R11), X5 // f6
+ MOVQ syscall15Args_f7(R11), X6 // f7
+ MOVQ syscall15Args_f8(R11), X7 // f8
+
+ MOVQ syscall15Args_a1(R11), DI // a1
+ MOVQ syscall15Args_a2(R11), SI // a2
+ MOVQ syscall15Args_a3(R11), DX // a3
+ MOVQ syscall15Args_a4(R11), CX // a4
+ MOVQ syscall15Args_a5(R11), R8 // a5
+ MOVQ syscall15Args_a6(R11), R9 // a6
+
+ // push the remaining paramters onto the stack
+ MOVQ syscall15Args_a7(R11), R12
+ MOVQ R12, 0(SP) // push a7
+ MOVQ syscall15Args_a8(R11), R12
+ MOVQ R12, 8(SP) // push a8
+ MOVQ syscall15Args_a9(R11), R12
+ MOVQ R12, 16(SP) // push a9
+ MOVQ syscall15Args_a10(R11), R12
+ MOVQ R12, 24(SP) // push a10
+ MOVQ syscall15Args_a11(R11), R12
+ MOVQ R12, 32(SP) // push a11
+ MOVQ syscall15Args_a12(R11), R12
+ MOVQ R12, 40(SP) // push a12
+ MOVQ syscall15Args_a13(R11), R12
+ MOVQ R12, 48(SP) // push a13
+ MOVQ syscall15Args_a14(R11), R12
+ MOVQ R12, 56(SP) // push a14
+ MOVQ syscall15Args_a15(R11), R12
+ MOVQ R12, 64(SP) // push a15
+ XORL AX, AX // vararg: say "no float args"
+
+ MOVQ syscall15Args_fn(R11), R10 // fn
+ CALL R10
+
+ MOVQ PTR_ADDRESS(BP), DI // get the pointer back
+ MOVQ AX, syscall15Args_a1(DI) // r1
+ MOVQ DX, syscall15Args_a2(DI) // r3
+ MOVQ X0, syscall15Args_f1(DI) // f1
+ MOVQ X1, syscall15Args_f2(DI) // f2
+
+ XORL AX, AX // no error (it's ignored anyway)
+ ADDQ $STACK_SIZE, SP
+ MOVQ BP, SP
+ POPQ BP
+ RET
+
+TEXT callbackasm1(SB), NOSPLIT|NOFRAME, $0
+ MOVQ 0(SP), AX // save the return address to calculate the cb index
+ MOVQ 8(SP), R10 // get the return SP so that we can align register args with stack args
+ ADDQ $8, SP // remove return address from stack, we are not returning to callbackasm, but to its caller.
+
+ // make space for first six int and 8 float arguments below the frame
+ ADJSP $14*8, SP
+ MOVSD X0, (1*8)(SP)
+ MOVSD X1, (2*8)(SP)
+ MOVSD X2, (3*8)(SP)
+ MOVSD X3, (4*8)(SP)
+ MOVSD X4, (5*8)(SP)
+ MOVSD X5, (6*8)(SP)
+ MOVSD X6, (7*8)(SP)
+ MOVSD X7, (8*8)(SP)
+ MOVQ DI, (9*8)(SP)
+ MOVQ SI, (10*8)(SP)
+ MOVQ DX, (11*8)(SP)
+ MOVQ CX, (12*8)(SP)
+ MOVQ R8, (13*8)(SP)
+ MOVQ R9, (14*8)(SP)
+ LEAQ 8(SP), R8 // R8 = address of args vector
+
+ PUSHQ R10 // push the stack pointer below registers
+
+ // Switch from the host ABI to the Go ABI.
+ PUSH_REGS_HOST_TO_ABI0()
+
+ // determine index into runtime·cbs table
+ MOVQ $callbackasm(SB), DX
+ SUBQ DX, AX
+ MOVQ $0, DX
+ MOVQ $5, CX // divide by 5 because each call instruction in ·callbacks is 5 bytes long
+ DIVL CX
+ SUBQ $1, AX // subtract 1 because return PC is to the next slot
+
+ // Create a struct callbackArgs on our stack to be passed as
+ // the "frame" to cgocallback and on to callbackWrap.
+ // $24 to make enough room for the arguments to runtime.cgocallback
+ SUBQ $(24+callbackArgs__size), SP
+ MOVQ AX, (24+callbackArgs_index)(SP) // callback index
+ MOVQ R8, (24+callbackArgs_args)(SP) // address of args vector
+ MOVQ $0, (24+callbackArgs_result)(SP) // result
+ LEAQ 24(SP), AX // take the address of callbackArgs
+
+ // Call cgocallback, which will call callbackWrap(frame).
+ MOVQ ·callbackWrap_call(SB), DI // Get the ABIInternal function pointer
+ MOVQ (DI), DI // without by using a closure.
+ MOVQ AX, SI // frame (address of callbackArgs)
+ MOVQ $0, CX // context
+
+ CALL crosscall2(SB) // runtime.cgocallback(fn, frame, ctxt uintptr)
+
+ // Get callback result.
+ MOVQ (24+callbackArgs_result)(SP), AX
+ ADDQ $(24+callbackArgs__size), SP // remove callbackArgs struct
+
+ POP_REGS_HOST_TO_ABI0()
+
+ POPQ R10 // get the SP back
+ ADJSP $-14*8, SP // remove arguments
+
+ MOVQ R10, 0(SP)
+
+ RET
diff --git a/vendor/github.com/ebitengine/purego/sys_arm64.s b/vendor/github.com/ebitengine/purego/sys_arm64.s
new file mode 100644
index 0000000000000..a68fdb99ba7aa
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/sys_arm64.s
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+//go:build darwin || freebsd || linux || windows
+
+#include "textflag.h"
+#include "go_asm.h"
+#include "funcdata.h"
+
+#define STACK_SIZE 64
+#define PTR_ADDRESS (STACK_SIZE - 8)
+
+// syscall15X calls a function in libc on behalf of the syscall package.
+// syscall15X takes a pointer to a struct like:
+// struct {
+// fn uintptr
+// a1 uintptr
+// a2 uintptr
+// a3 uintptr
+// a4 uintptr
+// a5 uintptr
+// a6 uintptr
+// a7 uintptr
+// a8 uintptr
+// a9 uintptr
+// a10 uintptr
+// a11 uintptr
+// a12 uintptr
+// a13 uintptr
+// a14 uintptr
+// a15 uintptr
+// r1 uintptr
+// r2 uintptr
+// err uintptr
+// }
+// syscall15X must be called on the g0 stack with the
+// C calling convention (use libcCall).
+GLOBL ·syscall15XABI0(SB), NOPTR|RODATA, $8
+DATA ·syscall15XABI0(SB)/8, $syscall15X(SB)
+TEXT syscall15X(SB), NOSPLIT, $0
+ SUB $STACK_SIZE, RSP // push structure pointer
+ MOVD R0, PTR_ADDRESS(RSP)
+ MOVD R0, R9
+
+ FMOVD syscall15Args_f1(R9), F0 // f1
+ FMOVD syscall15Args_f2(R9), F1 // f2
+ FMOVD syscall15Args_f3(R9), F2 // f3
+ FMOVD syscall15Args_f4(R9), F3 // f4
+ FMOVD syscall15Args_f5(R9), F4 // f5
+ FMOVD syscall15Args_f6(R9), F5 // f6
+ FMOVD syscall15Args_f7(R9), F6 // f7
+ FMOVD syscall15Args_f8(R9), F7 // f8
+
+ MOVD syscall15Args_a1(R9), R0 // a1
+ MOVD syscall15Args_a2(R9), R1 // a2
+ MOVD syscall15Args_a3(R9), R2 // a3
+ MOVD syscall15Args_a4(R9), R3 // a4
+ MOVD syscall15Args_a5(R9), R4 // a5
+ MOVD syscall15Args_a6(R9), R5 // a6
+ MOVD syscall15Args_a7(R9), R6 // a7
+ MOVD syscall15Args_a8(R9), R7 // a8
+ MOVD syscall15Args_arm64_r8(R9), R8 // r8
+
+ MOVD syscall15Args_a9(R9), R10
+ MOVD R10, 0(RSP) // push a9 onto stack
+ MOVD syscall15Args_a10(R9), R10
+ MOVD R10, 8(RSP) // push a10 onto stack
+ MOVD syscall15Args_a11(R9), R10
+ MOVD R10, 16(RSP) // push a11 onto stack
+ MOVD syscall15Args_a12(R9), R10
+ MOVD R10, 24(RSP) // push a12 onto stack
+ MOVD syscall15Args_a13(R9), R10
+ MOVD R10, 32(RSP) // push a13 onto stack
+ MOVD syscall15Args_a14(R9), R10
+ MOVD R10, 40(RSP) // push a14 onto stack
+ MOVD syscall15Args_a15(R9), R10
+ MOVD R10, 48(RSP) // push a15 onto stack
+
+ MOVD syscall15Args_fn(R9), R10 // fn
+ BL (R10)
+
+ MOVD PTR_ADDRESS(RSP), R2 // pop structure pointer
+ ADD $STACK_SIZE, RSP
+
+ MOVD R0, syscall15Args_a1(R2) // save r1
+ MOVD R1, syscall15Args_a2(R2) // save r3
+ FMOVD F0, syscall15Args_f1(R2) // save f0
+ FMOVD F1, syscall15Args_f2(R2) // save f1
+ FMOVD F2, syscall15Args_f3(R2) // save f2
+ FMOVD F3, syscall15Args_f4(R2) // save f3
+
+ RET
diff --git a/vendor/github.com/ebitengine/purego/sys_unix_arm64.s b/vendor/github.com/ebitengine/purego/sys_unix_arm64.s
new file mode 100644
index 0000000000000..6da06b4d18826
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/sys_unix_arm64.s
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2023 The Ebitengine Authors
+
+//go:build darwin || freebsd || linux
+
+#include "textflag.h"
+#include "go_asm.h"
+#include "funcdata.h"
+#include "abi_arm64.h"
+
+TEXT callbackasm1(SB), NOSPLIT|NOFRAME, $0
+ NO_LOCAL_POINTERS
+
+ // On entry, the trampoline in zcallback_darwin_arm64.s left
+ // the callback index in R12 (which is volatile in the C ABI).
+
+ // Save callback register arguments R0-R7 and F0-F7.
+ // We do this at the top of the frame so they're contiguous with stack arguments.
+ SUB $(16*8), RSP, R14
+ FSTPD (F0, F1), (0*8)(R14)
+ FSTPD (F2, F3), (2*8)(R14)
+ FSTPD (F4, F5), (4*8)(R14)
+ FSTPD (F6, F7), (6*8)(R14)
+ STP (R0, R1), (8*8)(R14)
+ STP (R2, R3), (10*8)(R14)
+ STP (R4, R5), (12*8)(R14)
+ STP (R6, R7), (14*8)(R14)
+
+ // Adjust SP by frame size.
+ SUB $(26*8), RSP
+
+ // It is important to save R27 because the go assembler
+ // uses it for move instructions for a variable.
+ // This line:
+ // MOVD ·callbackWrap_call(SB), R0
+ // Creates the instructions:
+ // ADRP 14335(PC), R27
+ // MOVD 388(27), R0
+ // R27 is a callee saved register so we are responsible
+ // for ensuring its value doesn't change. So save it and
+ // restore it at the end of this function.
+ // R30 is the link register. crosscall2 doesn't save it
+ // so it's saved here.
+ STP (R27, R30), 0(RSP)
+
+ // Create a struct callbackArgs on our stack.
+ MOVD $(callbackArgs__size)(RSP), R13
+ MOVD R12, callbackArgs_index(R13) // callback index
+ MOVD R14, callbackArgs_args(R13) // address of args vector
+ MOVD ZR, callbackArgs_result(R13) // result
+
+ // Move parameters into registers
+ // Get the ABIInternal function pointer
+ // without by using a closure.
+ MOVD ·callbackWrap_call(SB), R0
+ MOVD (R0), R0 // fn unsafe.Pointer
+ MOVD R13, R1 // frame (&callbackArgs{...})
+ MOVD $0, R3 // ctxt uintptr
+
+ BL crosscall2(SB)
+
+ // Get callback result.
+ MOVD $(callbackArgs__size)(RSP), R13
+ MOVD callbackArgs_result(R13), R0
+
+ // Restore LR and R27
+ LDP 0(RSP), (R27, R30)
+ ADD $(26*8), RSP
+
+ RET
diff --git a/vendor/github.com/ebitengine/purego/syscall.go b/vendor/github.com/ebitengine/purego/syscall.go
new file mode 100644
index 0000000000000..c30688dda130e
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/syscall.go
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+//go:build darwin || freebsd || linux || windows
+
+package purego
+
+// CDecl marks a function as being called using the __cdecl calling convention as defined in
+// the [MSDocs] when passed to NewCallback. It must be the first argument to the function.
+// This is only useful on 386 Windows, but it is safe to use on other platforms.
+//
+// [MSDocs]: https://learn.microsoft.com/en-us/cpp/cpp/cdecl?view=msvc-170
+type CDecl struct{}
+
+const (
+ maxArgs = 15
+ numOfFloats = 8 // arm64 and amd64 both have 8 float registers
+)
+
+type syscall15Args struct {
+ fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15 uintptr
+ f1, f2, f3, f4, f5, f6, f7, f8 uintptr
+ arm64_r8 uintptr
+}
+
+// SyscallN takes fn, a C function pointer and a list of arguments as uintptr.
+// There is an internal maximum number of arguments that SyscallN can take. It panics
+// when the maximum is exceeded. It returns the result and the libc error code if there is one.
+//
+// NOTE: SyscallN does not properly call functions that have both integer and float parameters.
+// See discussion comment https://github.com/ebiten/purego/pull/1#issuecomment-1128057607
+// for an explanation of why that is.
+//
+// On amd64, if there are more than 8 floats the 9th and so on will be placed incorrectly on the
+// stack.
+//
+// The pragma go:nosplit is not needed at this function declaration because it uses go:uintptrescapes
+// which forces all the objects that the uintptrs point to onto the heap where a stack split won't affect
+// their memory location.
+//
+//go:uintptrescapes
+func SyscallN(fn uintptr, args ...uintptr) (r1, r2, err uintptr) {
+ if fn == 0 {
+ panic("purego: fn is nil")
+ }
+ if len(args) > maxArgs {
+ panic("purego: too many arguments to SyscallN")
+ }
+ // add padding so there is no out-of-bounds slicing
+ var tmp [maxArgs]uintptr
+ copy(tmp[:], args)
+ return syscall_syscall15X(fn, tmp[0], tmp[1], tmp[2], tmp[3], tmp[4], tmp[5], tmp[6], tmp[7], tmp[8], tmp[9], tmp[10], tmp[11], tmp[12], tmp[13], tmp[14])
+}
diff --git a/vendor/github.com/ebitengine/purego/syscall_cgo_linux.go b/vendor/github.com/ebitengine/purego/syscall_cgo_linux.go
new file mode 100644
index 0000000000000..36ee14e3b732a
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/syscall_cgo_linux.go
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+//go:build cgo && !(amd64 || arm64)
+
+package purego
+
+import (
+ "github.com/ebitengine/purego/internal/cgo"
+)
+
+var syscall15XABI0 = uintptr(cgo.Syscall15XABI0)
+
+//go:nosplit
+func syscall_syscall15X(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15 uintptr) (r1, r2, err uintptr) {
+ return cgo.Syscall15X(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15)
+}
+
+func NewCallback(_ interface{}) uintptr {
+ panic("purego: NewCallback on Linux is only supported on amd64/arm64")
+}
diff --git a/vendor/github.com/ebitengine/purego/syscall_sysv.go b/vendor/github.com/ebitengine/purego/syscall_sysv.go
new file mode 100644
index 0000000000000..cce171c8f609a
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/syscall_sysv.go
@@ -0,0 +1,223 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+//go:build darwin || freebsd || (linux && (amd64 || arm64))
+
+package purego
+
+import (
+ "reflect"
+ "runtime"
+ "sync"
+ "unsafe"
+)
+
+var syscall15XABI0 uintptr
+
+//go:nosplit
+func syscall_syscall15X(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15 uintptr) (r1, r2, err uintptr) {
+ args := syscall15Args{
+ fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15,
+ a1, a2, a3, a4, a5, a6, a7, a8,
+ 0,
+ }
+ runtime_cgocall(syscall15XABI0, unsafe.Pointer(&args))
+ return args.a1, args.a2, 0
+}
+
+// NewCallback converts a Go function to a function pointer conforming to the C calling convention.
+// This is useful when interoperating with C code requiring callbacks. The argument is expected to be a
+// function with zero or one uintptr-sized result. The function must not have arguments with size larger than the size
+// of uintptr. Only a limited number of callbacks may be created in a single Go process, and any memory allocated
+// for these callbacks is never released. At least 2000 callbacks can always be created. Although this function
+// provides similar functionality to windows.NewCallback it is distinct.
+func NewCallback(fn interface{}) uintptr {
+ ty := reflect.TypeOf(fn)
+ for i := 0; i < ty.NumIn(); i++ {
+ in := ty.In(i)
+ if !in.AssignableTo(reflect.TypeOf(CDecl{})) {
+ continue
+ }
+ if i != 0 {
+ panic("purego: CDecl must be the first argument")
+ }
+ }
+ return compileCallback(fn)
+}
+
+// maxCb is the maximum number of callbacks
+// only increase this if you have added more to the callbackasm function
+const maxCB = 2000
+
+var cbs struct {
+ lock sync.Mutex
+ numFn int // the number of functions currently in cbs.funcs
+ funcs [maxCB]reflect.Value // the saved callbacks
+}
+
+type callbackArgs struct {
+ index uintptr
+ // args points to the argument block.
+ //
+ // The structure of the arguments goes
+ // float registers followed by the
+ // integer registers followed by the stack.
+ //
+ // This variable is treated as a continuous
+ // block of memory containing all of the arguments
+ // for this callback.
+ args unsafe.Pointer
+ // Below are out-args from callbackWrap
+ result uintptr
+}
+
+func compileCallback(fn interface{}) uintptr {
+ val := reflect.ValueOf(fn)
+ if val.Kind() != reflect.Func {
+ panic("purego: the type must be a function but was not")
+ }
+ if val.IsNil() {
+ panic("purego: function must not be nil")
+ }
+ ty := val.Type()
+ for i := 0; i < ty.NumIn(); i++ {
+ in := ty.In(i)
+ switch in.Kind() {
+ case reflect.Struct:
+ if i == 0 && in.AssignableTo(reflect.TypeOf(CDecl{})) {
+ continue
+ }
+ fallthrough
+ case reflect.Interface, reflect.Func, reflect.Slice,
+ reflect.Chan, reflect.Complex64, reflect.Complex128,
+ reflect.String, reflect.Map, reflect.Invalid:
+ panic("purego: unsupported argument type: " + in.Kind().String())
+ }
+ }
+output:
+ switch {
+ case ty.NumOut() == 1:
+ switch ty.Out(0).Kind() {
+ case reflect.Pointer, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
+ reflect.Bool, reflect.UnsafePointer:
+ break output
+ }
+ panic("purego: unsupported return type: " + ty.String())
+ case ty.NumOut() > 1:
+ panic("purego: callbacks can only have one return")
+ }
+ cbs.lock.Lock()
+ defer cbs.lock.Unlock()
+ if cbs.numFn >= maxCB {
+ panic("purego: the maximum number of callbacks has been reached")
+ }
+ cbs.funcs[cbs.numFn] = val
+ cbs.numFn++
+ return callbackasmAddr(cbs.numFn - 1)
+}
+
+const ptrSize = unsafe.Sizeof((*int)(nil))
+
+const callbackMaxFrame = 64 * ptrSize
+
+// callbackasm is implemented in zcallback_GOOS_GOARCH.s
+//
+//go:linkname __callbackasm callbackasm
+var __callbackasm byte
+var callbackasmABI0 = uintptr(unsafe.Pointer(&__callbackasm))
+
+// callbackWrap_call allows the calling of the ABIInternal wrapper
+// which is required for runtime.cgocallback without the
+// tag which is only allowed in the runtime.
+// This closure is used inside sys_darwin_GOARCH.s
+var callbackWrap_call = callbackWrap
+
+// callbackWrap is called by assembly code which determines which Go function to call.
+// This function takes the arguments and passes them to the Go function and returns the result.
+func callbackWrap(a *callbackArgs) {
+ cbs.lock.Lock()
+ fn := cbs.funcs[a.index]
+ cbs.lock.Unlock()
+ fnType := fn.Type()
+ args := make([]reflect.Value, fnType.NumIn())
+ frame := (*[callbackMaxFrame]uintptr)(a.args)
+ var floatsN int // floatsN represents the number of float arguments processed
+ var intsN int // intsN represents the number of integer arguments processed
+ // stack points to the index into frame of the current stack element.
+ // The stack begins after the float and integer registers.
+ stack := numOfIntegerRegisters() + numOfFloats
+ for i := range args {
+ var pos int
+ switch fnType.In(i).Kind() {
+ case reflect.Float32, reflect.Float64:
+ if floatsN >= numOfFloats {
+ pos = stack
+ stack++
+ } else {
+ pos = floatsN
+ }
+ floatsN++
+ case reflect.Struct:
+ // This is the CDecl field
+ args[i] = reflect.Zero(fnType.In(i))
+ continue
+ default:
+
+ if intsN >= numOfIntegerRegisters() {
+ pos = stack
+ stack++
+ } else {
+ // the integers begin after the floats in frame
+ pos = intsN + numOfFloats
+ }
+ intsN++
+ }
+ args[i] = reflect.NewAt(fnType.In(i), unsafe.Pointer(&frame[pos])).Elem()
+ }
+ ret := fn.Call(args)
+ if len(ret) > 0 {
+ switch k := ret[0].Kind(); k {
+ case reflect.Uint, reflect.Uint64, reflect.Uint32, reflect.Uint16, reflect.Uint8, reflect.Uintptr:
+ a.result = uintptr(ret[0].Uint())
+ case reflect.Int, reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8:
+ a.result = uintptr(ret[0].Int())
+ case reflect.Bool:
+ if ret[0].Bool() {
+ a.result = 1
+ } else {
+ a.result = 0
+ }
+ case reflect.Pointer:
+ a.result = ret[0].Pointer()
+ case reflect.UnsafePointer:
+ a.result = ret[0].Pointer()
+ default:
+ panic("purego: unsupported kind: " + k.String())
+ }
+ }
+}
+
+// callbackasmAddr returns address of runtime.callbackasm
+// function adjusted by i.
+// On x86 and amd64, runtime.callbackasm is a series of CALL instructions,
+// and we want callback to arrive at
+// correspondent call instruction instead of start of
+// runtime.callbackasm.
+// On ARM, runtime.callbackasm is a series of mov and branch instructions.
+// R12 is loaded with the callback index. Each entry is two instructions,
+// hence 8 bytes.
+func callbackasmAddr(i int) uintptr {
+ var entrySize int
+ switch runtime.GOARCH {
+ default:
+ panic("purego: unsupported architecture")
+ case "386", "amd64":
+ entrySize = 5
+ case "arm", "arm64":
+ // On ARM and ARM64, each entry is a MOV instruction
+ // followed by a branch instruction
+ entrySize = 8
+ }
+ return callbackasmABI0 + uintptr(i*entrySize)
+}
diff --git a/vendor/github.com/ebitengine/purego/syscall_windows.go b/vendor/github.com/ebitengine/purego/syscall_windows.go
new file mode 100644
index 0000000000000..5fbfcabfdc930
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/syscall_windows.go
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2022 The Ebitengine Authors
+
+package purego
+
+import (
+ "reflect"
+ "syscall"
+)
+
+var syscall15XABI0 uintptr
+
+func syscall_syscall15X(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15 uintptr) (r1, r2, err uintptr) {
+ r1, r2, errno := syscall.Syscall15(fn, 15, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15)
+ return r1, r2, uintptr(errno)
+}
+
+// NewCallback converts a Go function to a function pointer conforming to the stdcall calling convention.
+// This is useful when interoperating with Windows code requiring callbacks. The argument is expected to be a
+// function with one uintptr-sized result. The function must not have arguments with size larger than the
+// size of uintptr. Only a limited number of callbacks may be created in a single Go process, and any memory
+// allocated for these callbacks is never released. Between NewCallback and NewCallbackCDecl, at least 1024
+// callbacks can always be created. Although this function is similiar to the darwin version it may act
+// differently.
+func NewCallback(fn interface{}) uintptr {
+ isCDecl := false
+ ty := reflect.TypeOf(fn)
+ for i := 0; i < ty.NumIn(); i++ {
+ in := ty.In(i)
+ if !in.AssignableTo(reflect.TypeOf(CDecl{})) {
+ continue
+ }
+ if i != 0 {
+ panic("purego: CDecl must be the first argument")
+ }
+ isCDecl = true
+ }
+ if isCDecl {
+ return syscall.NewCallbackCDecl(fn)
+ }
+ return syscall.NewCallback(fn)
+}
+
+func loadSymbol(handle uintptr, name string) (uintptr, error) {
+ return syscall.GetProcAddress(syscall.Handle(handle), name)
+}
diff --git a/vendor/github.com/ebitengine/purego/zcallback_amd64.s b/vendor/github.com/ebitengine/purego/zcallback_amd64.s
new file mode 100644
index 0000000000000..6a778bfcad177
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/zcallback_amd64.s
@@ -0,0 +1,2014 @@
+// Code generated by wincallback.go using 'go generate'. DO NOT EDIT.
+
+//go:build darwin || freebsd || linux
+
+// runtime·callbackasm is called by external code to
+// execute Go implemented callback function. It is not
+// called from the start, instead runtime·compilecallback
+// always returns address into runtime·callbackasm offset
+// appropriately so different callbacks start with different
+// CALL instruction in runtime·callbackasm. This determines
+// which Go callback function is executed later on.
+#include "textflag.h"
+
+TEXT callbackasm(SB), NOSPLIT|NOFRAME, $0
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
+ CALL callbackasm1(SB)
diff --git a/vendor/github.com/ebitengine/purego/zcallback_arm64.s b/vendor/github.com/ebitengine/purego/zcallback_arm64.s
new file mode 100644
index 0000000000000..c079b8038e377
--- /dev/null
+++ b/vendor/github.com/ebitengine/purego/zcallback_arm64.s
@@ -0,0 +1,4014 @@
+// Code generated by wincallback.go using 'go generate'. DO NOT EDIT.
+
+//go:build darwin || freebsd || linux
+
+// External code calls into callbackasm at an offset corresponding
+// to the callback index. Callbackasm is a table of MOV and B instructions.
+// The MOV instruction loads R12 with the callback index, and the
+// B instruction branches to callbackasm1.
+// callbackasm1 takes the callback index from R12 and
+// indexes into an array that stores information about each callback.
+// It then calls the Go implementation for that callback.
+#include "textflag.h"
+
+TEXT callbackasm(SB), NOSPLIT|NOFRAME, $0
+ MOVD $0, R12
+ B callbackasm1(SB)
+ MOVD $1, R12
+ B callbackasm1(SB)
+ MOVD $2, R12
+ B callbackasm1(SB)
+ MOVD $3, R12
+ B callbackasm1(SB)
+ MOVD $4, R12
+ B callbackasm1(SB)
+ MOVD $5, R12
+ B callbackasm1(SB)
+ MOVD $6, R12
+ B callbackasm1(SB)
+ MOVD $7, R12
+ B callbackasm1(SB)
+ MOVD $8, R12
+ B callbackasm1(SB)
+ MOVD $9, R12
+ B callbackasm1(SB)
+ MOVD $10, R12
+ B callbackasm1(SB)
+ MOVD $11, R12
+ B callbackasm1(SB)
+ MOVD $12, R12
+ B callbackasm1(SB)
+ MOVD $13, R12
+ B callbackasm1(SB)
+ MOVD $14, R12
+ B callbackasm1(SB)
+ MOVD $15, R12
+ B callbackasm1(SB)
+ MOVD $16, R12
+ B callbackasm1(SB)
+ MOVD $17, R12
+ B callbackasm1(SB)
+ MOVD $18, R12
+ B callbackasm1(SB)
+ MOVD $19, R12
+ B callbackasm1(SB)
+ MOVD $20, R12
+ B callbackasm1(SB)
+ MOVD $21, R12
+ B callbackasm1(SB)
+ MOVD $22, R12
+ B callbackasm1(SB)
+ MOVD $23, R12
+ B callbackasm1(SB)
+ MOVD $24, R12
+ B callbackasm1(SB)
+ MOVD $25, R12
+ B callbackasm1(SB)
+ MOVD $26, R12
+ B callbackasm1(SB)
+ MOVD $27, R12
+ B callbackasm1(SB)
+ MOVD $28, R12
+ B callbackasm1(SB)
+ MOVD $29, R12
+ B callbackasm1(SB)
+ MOVD $30, R12
+ B callbackasm1(SB)
+ MOVD $31, R12
+ B callbackasm1(SB)
+ MOVD $32, R12
+ B callbackasm1(SB)
+ MOVD $33, R12
+ B callbackasm1(SB)
+ MOVD $34, R12
+ B callbackasm1(SB)
+ MOVD $35, R12
+ B callbackasm1(SB)
+ MOVD $36, R12
+ B callbackasm1(SB)
+ MOVD $37, R12
+ B callbackasm1(SB)
+ MOVD $38, R12
+ B callbackasm1(SB)
+ MOVD $39, R12
+ B callbackasm1(SB)
+ MOVD $40, R12
+ B callbackasm1(SB)
+ MOVD $41, R12
+ B callbackasm1(SB)
+ MOVD $42, R12
+ B callbackasm1(SB)
+ MOVD $43, R12
+ B callbackasm1(SB)
+ MOVD $44, R12
+ B callbackasm1(SB)
+ MOVD $45, R12
+ B callbackasm1(SB)
+ MOVD $46, R12
+ B callbackasm1(SB)
+ MOVD $47, R12
+ B callbackasm1(SB)
+ MOVD $48, R12
+ B callbackasm1(SB)
+ MOVD $49, R12
+ B callbackasm1(SB)
+ MOVD $50, R12
+ B callbackasm1(SB)
+ MOVD $51, R12
+ B callbackasm1(SB)
+ MOVD $52, R12
+ B callbackasm1(SB)
+ MOVD $53, R12
+ B callbackasm1(SB)
+ MOVD $54, R12
+ B callbackasm1(SB)
+ MOVD $55, R12
+ B callbackasm1(SB)
+ MOVD $56, R12
+ B callbackasm1(SB)
+ MOVD $57, R12
+ B callbackasm1(SB)
+ MOVD $58, R12
+ B callbackasm1(SB)
+ MOVD $59, R12
+ B callbackasm1(SB)
+ MOVD $60, R12
+ B callbackasm1(SB)
+ MOVD $61, R12
+ B callbackasm1(SB)
+ MOVD $62, R12
+ B callbackasm1(SB)
+ MOVD $63, R12
+ B callbackasm1(SB)
+ MOVD $64, R12
+ B callbackasm1(SB)
+ MOVD $65, R12
+ B callbackasm1(SB)
+ MOVD $66, R12
+ B callbackasm1(SB)
+ MOVD $67, R12
+ B callbackasm1(SB)
+ MOVD $68, R12
+ B callbackasm1(SB)
+ MOVD $69, R12
+ B callbackasm1(SB)
+ MOVD $70, R12
+ B callbackasm1(SB)
+ MOVD $71, R12
+ B callbackasm1(SB)
+ MOVD $72, R12
+ B callbackasm1(SB)
+ MOVD $73, R12
+ B callbackasm1(SB)
+ MOVD $74, R12
+ B callbackasm1(SB)
+ MOVD $75, R12
+ B callbackasm1(SB)
+ MOVD $76, R12
+ B callbackasm1(SB)
+ MOVD $77, R12
+ B callbackasm1(SB)
+ MOVD $78, R12
+ B callbackasm1(SB)
+ MOVD $79, R12
+ B callbackasm1(SB)
+ MOVD $80, R12
+ B callbackasm1(SB)
+ MOVD $81, R12
+ B callbackasm1(SB)
+ MOVD $82, R12
+ B callbackasm1(SB)
+ MOVD $83, R12
+ B callbackasm1(SB)
+ MOVD $84, R12
+ B callbackasm1(SB)
+ MOVD $85, R12
+ B callbackasm1(SB)
+ MOVD $86, R12
+ B callbackasm1(SB)
+ MOVD $87, R12
+ B callbackasm1(SB)
+ MOVD $88, R12
+ B callbackasm1(SB)
+ MOVD $89, R12
+ B callbackasm1(SB)
+ MOVD $90, R12
+ B callbackasm1(SB)
+ MOVD $91, R12
+ B callbackasm1(SB)
+ MOVD $92, R12
+ B callbackasm1(SB)
+ MOVD $93, R12
+ B callbackasm1(SB)
+ MOVD $94, R12
+ B callbackasm1(SB)
+ MOVD $95, R12
+ B callbackasm1(SB)
+ MOVD $96, R12
+ B callbackasm1(SB)
+ MOVD $97, R12
+ B callbackasm1(SB)
+ MOVD $98, R12
+ B callbackasm1(SB)
+ MOVD $99, R12
+ B callbackasm1(SB)
+ MOVD $100, R12
+ B callbackasm1(SB)
+ MOVD $101, R12
+ B callbackasm1(SB)
+ MOVD $102, R12
+ B callbackasm1(SB)
+ MOVD $103, R12
+ B callbackasm1(SB)
+ MOVD $104, R12
+ B callbackasm1(SB)
+ MOVD $105, R12
+ B callbackasm1(SB)
+ MOVD $106, R12
+ B callbackasm1(SB)
+ MOVD $107, R12
+ B callbackasm1(SB)
+ MOVD $108, R12
+ B callbackasm1(SB)
+ MOVD $109, R12
+ B callbackasm1(SB)
+ MOVD $110, R12
+ B callbackasm1(SB)
+ MOVD $111, R12
+ B callbackasm1(SB)
+ MOVD $112, R12
+ B callbackasm1(SB)
+ MOVD $113, R12
+ B callbackasm1(SB)
+ MOVD $114, R12
+ B callbackasm1(SB)
+ MOVD $115, R12
+ B callbackasm1(SB)
+ MOVD $116, R12
+ B callbackasm1(SB)
+ MOVD $117, R12
+ B callbackasm1(SB)
+ MOVD $118, R12
+ B callbackasm1(SB)
+ MOVD $119, R12
+ B callbackasm1(SB)
+ MOVD $120, R12
+ B callbackasm1(SB)
+ MOVD $121, R12
+ B callbackasm1(SB)
+ MOVD $122, R12
+ B callbackasm1(SB)
+ MOVD $123, R12
+ B callbackasm1(SB)
+ MOVD $124, R12
+ B callbackasm1(SB)
+ MOVD $125, R12
+ B callbackasm1(SB)
+ MOVD $126, R12
+ B callbackasm1(SB)
+ MOVD $127, R12
+ B callbackasm1(SB)
+ MOVD $128, R12
+ B callbackasm1(SB)
+ MOVD $129, R12
+ B callbackasm1(SB)
+ MOVD $130, R12
+ B callbackasm1(SB)
+ MOVD $131, R12
+ B callbackasm1(SB)
+ MOVD $132, R12
+ B callbackasm1(SB)
+ MOVD $133, R12
+ B callbackasm1(SB)
+ MOVD $134, R12
+ B callbackasm1(SB)
+ MOVD $135, R12
+ B callbackasm1(SB)
+ MOVD $136, R12
+ B callbackasm1(SB)
+ MOVD $137, R12
+ B callbackasm1(SB)
+ MOVD $138, R12
+ B callbackasm1(SB)
+ MOVD $139, R12
+ B callbackasm1(SB)
+ MOVD $140, R12
+ B callbackasm1(SB)
+ MOVD $141, R12
+ B callbackasm1(SB)
+ MOVD $142, R12
+ B callbackasm1(SB)
+ MOVD $143, R12
+ B callbackasm1(SB)
+ MOVD $144, R12
+ B callbackasm1(SB)
+ MOVD $145, R12
+ B callbackasm1(SB)
+ MOVD $146, R12
+ B callbackasm1(SB)
+ MOVD $147, R12
+ B callbackasm1(SB)
+ MOVD $148, R12
+ B callbackasm1(SB)
+ MOVD $149, R12
+ B callbackasm1(SB)
+ MOVD $150, R12
+ B callbackasm1(SB)
+ MOVD $151, R12
+ B callbackasm1(SB)
+ MOVD $152, R12
+ B callbackasm1(SB)
+ MOVD $153, R12
+ B callbackasm1(SB)
+ MOVD $154, R12
+ B callbackasm1(SB)
+ MOVD $155, R12
+ B callbackasm1(SB)
+ MOVD $156, R12
+ B callbackasm1(SB)
+ MOVD $157, R12
+ B callbackasm1(SB)
+ MOVD $158, R12
+ B callbackasm1(SB)
+ MOVD $159, R12
+ B callbackasm1(SB)
+ MOVD $160, R12
+ B callbackasm1(SB)
+ MOVD $161, R12
+ B callbackasm1(SB)
+ MOVD $162, R12
+ B callbackasm1(SB)
+ MOVD $163, R12
+ B callbackasm1(SB)
+ MOVD $164, R12
+ B callbackasm1(SB)
+ MOVD $165, R12
+ B callbackasm1(SB)
+ MOVD $166, R12
+ B callbackasm1(SB)
+ MOVD $167, R12
+ B callbackasm1(SB)
+ MOVD $168, R12
+ B callbackasm1(SB)
+ MOVD $169, R12
+ B callbackasm1(SB)
+ MOVD $170, R12
+ B callbackasm1(SB)
+ MOVD $171, R12
+ B callbackasm1(SB)
+ MOVD $172, R12
+ B callbackasm1(SB)
+ MOVD $173, R12
+ B callbackasm1(SB)
+ MOVD $174, R12
+ B callbackasm1(SB)
+ MOVD $175, R12
+ B callbackasm1(SB)
+ MOVD $176, R12
+ B callbackasm1(SB)
+ MOVD $177, R12
+ B callbackasm1(SB)
+ MOVD $178, R12
+ B callbackasm1(SB)
+ MOVD $179, R12
+ B callbackasm1(SB)
+ MOVD $180, R12
+ B callbackasm1(SB)
+ MOVD $181, R12
+ B callbackasm1(SB)
+ MOVD $182, R12
+ B callbackasm1(SB)
+ MOVD $183, R12
+ B callbackasm1(SB)
+ MOVD $184, R12
+ B callbackasm1(SB)
+ MOVD $185, R12
+ B callbackasm1(SB)
+ MOVD $186, R12
+ B callbackasm1(SB)
+ MOVD $187, R12
+ B callbackasm1(SB)
+ MOVD $188, R12
+ B callbackasm1(SB)
+ MOVD $189, R12
+ B callbackasm1(SB)
+ MOVD $190, R12
+ B callbackasm1(SB)
+ MOVD $191, R12
+ B callbackasm1(SB)
+ MOVD $192, R12
+ B callbackasm1(SB)
+ MOVD $193, R12
+ B callbackasm1(SB)
+ MOVD $194, R12
+ B callbackasm1(SB)
+ MOVD $195, R12
+ B callbackasm1(SB)
+ MOVD $196, R12
+ B callbackasm1(SB)
+ MOVD $197, R12
+ B callbackasm1(SB)
+ MOVD $198, R12
+ B callbackasm1(SB)
+ MOVD $199, R12
+ B callbackasm1(SB)
+ MOVD $200, R12
+ B callbackasm1(SB)
+ MOVD $201, R12
+ B callbackasm1(SB)
+ MOVD $202, R12
+ B callbackasm1(SB)
+ MOVD $203, R12
+ B callbackasm1(SB)
+ MOVD $204, R12
+ B callbackasm1(SB)
+ MOVD $205, R12
+ B callbackasm1(SB)
+ MOVD $206, R12
+ B callbackasm1(SB)
+ MOVD $207, R12
+ B callbackasm1(SB)
+ MOVD $208, R12
+ B callbackasm1(SB)
+ MOVD $209, R12
+ B callbackasm1(SB)
+ MOVD $210, R12
+ B callbackasm1(SB)
+ MOVD $211, R12
+ B callbackasm1(SB)
+ MOVD $212, R12
+ B callbackasm1(SB)
+ MOVD $213, R12
+ B callbackasm1(SB)
+ MOVD $214, R12
+ B callbackasm1(SB)
+ MOVD $215, R12
+ B callbackasm1(SB)
+ MOVD $216, R12
+ B callbackasm1(SB)
+ MOVD $217, R12
+ B callbackasm1(SB)
+ MOVD $218, R12
+ B callbackasm1(SB)
+ MOVD $219, R12
+ B callbackasm1(SB)
+ MOVD $220, R12
+ B callbackasm1(SB)
+ MOVD $221, R12
+ B callbackasm1(SB)
+ MOVD $222, R12
+ B callbackasm1(SB)
+ MOVD $223, R12
+ B callbackasm1(SB)
+ MOVD $224, R12
+ B callbackasm1(SB)
+ MOVD $225, R12
+ B callbackasm1(SB)
+ MOVD $226, R12
+ B callbackasm1(SB)
+ MOVD $227, R12
+ B callbackasm1(SB)
+ MOVD $228, R12
+ B callbackasm1(SB)
+ MOVD $229, R12
+ B callbackasm1(SB)
+ MOVD $230, R12
+ B callbackasm1(SB)
+ MOVD $231, R12
+ B callbackasm1(SB)
+ MOVD $232, R12
+ B callbackasm1(SB)
+ MOVD $233, R12
+ B callbackasm1(SB)
+ MOVD $234, R12
+ B callbackasm1(SB)
+ MOVD $235, R12
+ B callbackasm1(SB)
+ MOVD $236, R12
+ B callbackasm1(SB)
+ MOVD $237, R12
+ B callbackasm1(SB)
+ MOVD $238, R12
+ B callbackasm1(SB)
+ MOVD $239, R12
+ B callbackasm1(SB)
+ MOVD $240, R12
+ B callbackasm1(SB)
+ MOVD $241, R12
+ B callbackasm1(SB)
+ MOVD $242, R12
+ B callbackasm1(SB)
+ MOVD $243, R12
+ B callbackasm1(SB)
+ MOVD $244, R12
+ B callbackasm1(SB)
+ MOVD $245, R12
+ B callbackasm1(SB)
+ MOVD $246, R12
+ B callbackasm1(SB)
+ MOVD $247, R12
+ B callbackasm1(SB)
+ MOVD $248, R12
+ B callbackasm1(SB)
+ MOVD $249, R12
+ B callbackasm1(SB)
+ MOVD $250, R12
+ B callbackasm1(SB)
+ MOVD $251, R12
+ B callbackasm1(SB)
+ MOVD $252, R12
+ B callbackasm1(SB)
+ MOVD $253, R12
+ B callbackasm1(SB)
+ MOVD $254, R12
+ B callbackasm1(SB)
+ MOVD $255, R12
+ B callbackasm1(SB)
+ MOVD $256, R12
+ B callbackasm1(SB)
+ MOVD $257, R12
+ B callbackasm1(SB)
+ MOVD $258, R12
+ B callbackasm1(SB)
+ MOVD $259, R12
+ B callbackasm1(SB)
+ MOVD $260, R12
+ B callbackasm1(SB)
+ MOVD $261, R12
+ B callbackasm1(SB)
+ MOVD $262, R12
+ B callbackasm1(SB)
+ MOVD $263, R12
+ B callbackasm1(SB)
+ MOVD $264, R12
+ B callbackasm1(SB)
+ MOVD $265, R12
+ B callbackasm1(SB)
+ MOVD $266, R12
+ B callbackasm1(SB)
+ MOVD $267, R12
+ B callbackasm1(SB)
+ MOVD $268, R12
+ B callbackasm1(SB)
+ MOVD $269, R12
+ B callbackasm1(SB)
+ MOVD $270, R12
+ B callbackasm1(SB)
+ MOVD $271, R12
+ B callbackasm1(SB)
+ MOVD $272, R12
+ B callbackasm1(SB)
+ MOVD $273, R12
+ B callbackasm1(SB)
+ MOVD $274, R12
+ B callbackasm1(SB)
+ MOVD $275, R12
+ B callbackasm1(SB)
+ MOVD $276, R12
+ B callbackasm1(SB)
+ MOVD $277, R12
+ B callbackasm1(SB)
+ MOVD $278, R12
+ B callbackasm1(SB)
+ MOVD $279, R12
+ B callbackasm1(SB)
+ MOVD $280, R12
+ B callbackasm1(SB)
+ MOVD $281, R12
+ B callbackasm1(SB)
+ MOVD $282, R12
+ B callbackasm1(SB)
+ MOVD $283, R12
+ B callbackasm1(SB)
+ MOVD $284, R12
+ B callbackasm1(SB)
+ MOVD $285, R12
+ B callbackasm1(SB)
+ MOVD $286, R12
+ B callbackasm1(SB)
+ MOVD $287, R12
+ B callbackasm1(SB)
+ MOVD $288, R12
+ B callbackasm1(SB)
+ MOVD $289, R12
+ B callbackasm1(SB)
+ MOVD $290, R12
+ B callbackasm1(SB)
+ MOVD $291, R12
+ B callbackasm1(SB)
+ MOVD $292, R12
+ B callbackasm1(SB)
+ MOVD $293, R12
+ B callbackasm1(SB)
+ MOVD $294, R12
+ B callbackasm1(SB)
+ MOVD $295, R12
+ B callbackasm1(SB)
+ MOVD $296, R12
+ B callbackasm1(SB)
+ MOVD $297, R12
+ B callbackasm1(SB)
+ MOVD $298, R12
+ B callbackasm1(SB)
+ MOVD $299, R12
+ B callbackasm1(SB)
+ MOVD $300, R12
+ B callbackasm1(SB)
+ MOVD $301, R12
+ B callbackasm1(SB)
+ MOVD $302, R12
+ B callbackasm1(SB)
+ MOVD $303, R12
+ B callbackasm1(SB)
+ MOVD $304, R12
+ B callbackasm1(SB)
+ MOVD $305, R12
+ B callbackasm1(SB)
+ MOVD $306, R12
+ B callbackasm1(SB)
+ MOVD $307, R12
+ B callbackasm1(SB)
+ MOVD $308, R12
+ B callbackasm1(SB)
+ MOVD $309, R12
+ B callbackasm1(SB)
+ MOVD $310, R12
+ B callbackasm1(SB)
+ MOVD $311, R12
+ B callbackasm1(SB)
+ MOVD $312, R12
+ B callbackasm1(SB)
+ MOVD $313, R12
+ B callbackasm1(SB)
+ MOVD $314, R12
+ B callbackasm1(SB)
+ MOVD $315, R12
+ B callbackasm1(SB)
+ MOVD $316, R12
+ B callbackasm1(SB)
+ MOVD $317, R12
+ B callbackasm1(SB)
+ MOVD $318, R12
+ B callbackasm1(SB)
+ MOVD $319, R12
+ B callbackasm1(SB)
+ MOVD $320, R12
+ B callbackasm1(SB)
+ MOVD $321, R12
+ B callbackasm1(SB)
+ MOVD $322, R12
+ B callbackasm1(SB)
+ MOVD $323, R12
+ B callbackasm1(SB)
+ MOVD $324, R12
+ B callbackasm1(SB)
+ MOVD $325, R12
+ B callbackasm1(SB)
+ MOVD $326, R12
+ B callbackasm1(SB)
+ MOVD $327, R12
+ B callbackasm1(SB)
+ MOVD $328, R12
+ B callbackasm1(SB)
+ MOVD $329, R12
+ B callbackasm1(SB)
+ MOVD $330, R12
+ B callbackasm1(SB)
+ MOVD $331, R12
+ B callbackasm1(SB)
+ MOVD $332, R12
+ B callbackasm1(SB)
+ MOVD $333, R12
+ B callbackasm1(SB)
+ MOVD $334, R12
+ B callbackasm1(SB)
+ MOVD $335, R12
+ B callbackasm1(SB)
+ MOVD $336, R12
+ B callbackasm1(SB)
+ MOVD $337, R12
+ B callbackasm1(SB)
+ MOVD $338, R12
+ B callbackasm1(SB)
+ MOVD $339, R12
+ B callbackasm1(SB)
+ MOVD $340, R12
+ B callbackasm1(SB)
+ MOVD $341, R12
+ B callbackasm1(SB)
+ MOVD $342, R12
+ B callbackasm1(SB)
+ MOVD $343, R12
+ B callbackasm1(SB)
+ MOVD $344, R12
+ B callbackasm1(SB)
+ MOVD $345, R12
+ B callbackasm1(SB)
+ MOVD $346, R12
+ B callbackasm1(SB)
+ MOVD $347, R12
+ B callbackasm1(SB)
+ MOVD $348, R12
+ B callbackasm1(SB)
+ MOVD $349, R12
+ B callbackasm1(SB)
+ MOVD $350, R12
+ B callbackasm1(SB)
+ MOVD $351, R12
+ B callbackasm1(SB)
+ MOVD $352, R12
+ B callbackasm1(SB)
+ MOVD $353, R12
+ B callbackasm1(SB)
+ MOVD $354, R12
+ B callbackasm1(SB)
+ MOVD $355, R12
+ B callbackasm1(SB)
+ MOVD $356, R12
+ B callbackasm1(SB)
+ MOVD $357, R12
+ B callbackasm1(SB)
+ MOVD $358, R12
+ B callbackasm1(SB)
+ MOVD $359, R12
+ B callbackasm1(SB)
+ MOVD $360, R12
+ B callbackasm1(SB)
+ MOVD $361, R12
+ B callbackasm1(SB)
+ MOVD $362, R12
+ B callbackasm1(SB)
+ MOVD $363, R12
+ B callbackasm1(SB)
+ MOVD $364, R12
+ B callbackasm1(SB)
+ MOVD $365, R12
+ B callbackasm1(SB)
+ MOVD $366, R12
+ B callbackasm1(SB)
+ MOVD $367, R12
+ B callbackasm1(SB)
+ MOVD $368, R12
+ B callbackasm1(SB)
+ MOVD $369, R12
+ B callbackasm1(SB)
+ MOVD $370, R12
+ B callbackasm1(SB)
+ MOVD $371, R12
+ B callbackasm1(SB)
+ MOVD $372, R12
+ B callbackasm1(SB)
+ MOVD $373, R12
+ B callbackasm1(SB)
+ MOVD $374, R12
+ B callbackasm1(SB)
+ MOVD $375, R12
+ B callbackasm1(SB)
+ MOVD $376, R12
+ B callbackasm1(SB)
+ MOVD $377, R12
+ B callbackasm1(SB)
+ MOVD $378, R12
+ B callbackasm1(SB)
+ MOVD $379, R12
+ B callbackasm1(SB)
+ MOVD $380, R12
+ B callbackasm1(SB)
+ MOVD $381, R12
+ B callbackasm1(SB)
+ MOVD $382, R12
+ B callbackasm1(SB)
+ MOVD $383, R12
+ B callbackasm1(SB)
+ MOVD $384, R12
+ B callbackasm1(SB)
+ MOVD $385, R12
+ B callbackasm1(SB)
+ MOVD $386, R12
+ B callbackasm1(SB)
+ MOVD $387, R12
+ B callbackasm1(SB)
+ MOVD $388, R12
+ B callbackasm1(SB)
+ MOVD $389, R12
+ B callbackasm1(SB)
+ MOVD $390, R12
+ B callbackasm1(SB)
+ MOVD $391, R12
+ B callbackasm1(SB)
+ MOVD $392, R12
+ B callbackasm1(SB)
+ MOVD $393, R12
+ B callbackasm1(SB)
+ MOVD $394, R12
+ B callbackasm1(SB)
+ MOVD $395, R12
+ B callbackasm1(SB)
+ MOVD $396, R12
+ B callbackasm1(SB)
+ MOVD $397, R12
+ B callbackasm1(SB)
+ MOVD $398, R12
+ B callbackasm1(SB)
+ MOVD $399, R12
+ B callbackasm1(SB)
+ MOVD $400, R12
+ B callbackasm1(SB)
+ MOVD $401, R12
+ B callbackasm1(SB)
+ MOVD $402, R12
+ B callbackasm1(SB)
+ MOVD $403, R12
+ B callbackasm1(SB)
+ MOVD $404, R12
+ B callbackasm1(SB)
+ MOVD $405, R12
+ B callbackasm1(SB)
+ MOVD $406, R12
+ B callbackasm1(SB)
+ MOVD $407, R12
+ B callbackasm1(SB)
+ MOVD $408, R12
+ B callbackasm1(SB)
+ MOVD $409, R12
+ B callbackasm1(SB)
+ MOVD $410, R12
+ B callbackasm1(SB)
+ MOVD $411, R12
+ B callbackasm1(SB)
+ MOVD $412, R12
+ B callbackasm1(SB)
+ MOVD $413, R12
+ B callbackasm1(SB)
+ MOVD $414, R12
+ B callbackasm1(SB)
+ MOVD $415, R12
+ B callbackasm1(SB)
+ MOVD $416, R12
+ B callbackasm1(SB)
+ MOVD $417, R12
+ B callbackasm1(SB)
+ MOVD $418, R12
+ B callbackasm1(SB)
+ MOVD $419, R12
+ B callbackasm1(SB)
+ MOVD $420, R12
+ B callbackasm1(SB)
+ MOVD $421, R12
+ B callbackasm1(SB)
+ MOVD $422, R12
+ B callbackasm1(SB)
+ MOVD $423, R12
+ B callbackasm1(SB)
+ MOVD $424, R12
+ B callbackasm1(SB)
+ MOVD $425, R12
+ B callbackasm1(SB)
+ MOVD $426, R12
+ B callbackasm1(SB)
+ MOVD $427, R12
+ B callbackasm1(SB)
+ MOVD $428, R12
+ B callbackasm1(SB)
+ MOVD $429, R12
+ B callbackasm1(SB)
+ MOVD $430, R12
+ B callbackasm1(SB)
+ MOVD $431, R12
+ B callbackasm1(SB)
+ MOVD $432, R12
+ B callbackasm1(SB)
+ MOVD $433, R12
+ B callbackasm1(SB)
+ MOVD $434, R12
+ B callbackasm1(SB)
+ MOVD $435, R12
+ B callbackasm1(SB)
+ MOVD $436, R12
+ B callbackasm1(SB)
+ MOVD $437, R12
+ B callbackasm1(SB)
+ MOVD $438, R12
+ B callbackasm1(SB)
+ MOVD $439, R12
+ B callbackasm1(SB)
+ MOVD $440, R12
+ B callbackasm1(SB)
+ MOVD $441, R12
+ B callbackasm1(SB)
+ MOVD $442, R12
+ B callbackasm1(SB)
+ MOVD $443, R12
+ B callbackasm1(SB)
+ MOVD $444, R12
+ B callbackasm1(SB)
+ MOVD $445, R12
+ B callbackasm1(SB)
+ MOVD $446, R12
+ B callbackasm1(SB)
+ MOVD $447, R12
+ B callbackasm1(SB)
+ MOVD $448, R12
+ B callbackasm1(SB)
+ MOVD $449, R12
+ B callbackasm1(SB)
+ MOVD $450, R12
+ B callbackasm1(SB)
+ MOVD $451, R12
+ B callbackasm1(SB)
+ MOVD $452, R12
+ B callbackasm1(SB)
+ MOVD $453, R12
+ B callbackasm1(SB)
+ MOVD $454, R12
+ B callbackasm1(SB)
+ MOVD $455, R12
+ B callbackasm1(SB)
+ MOVD $456, R12
+ B callbackasm1(SB)
+ MOVD $457, R12
+ B callbackasm1(SB)
+ MOVD $458, R12
+ B callbackasm1(SB)
+ MOVD $459, R12
+ B callbackasm1(SB)
+ MOVD $460, R12
+ B callbackasm1(SB)
+ MOVD $461, R12
+ B callbackasm1(SB)
+ MOVD $462, R12
+ B callbackasm1(SB)
+ MOVD $463, R12
+ B callbackasm1(SB)
+ MOVD $464, R12
+ B callbackasm1(SB)
+ MOVD $465, R12
+ B callbackasm1(SB)
+ MOVD $466, R12
+ B callbackasm1(SB)
+ MOVD $467, R12
+ B callbackasm1(SB)
+ MOVD $468, R12
+ B callbackasm1(SB)
+ MOVD $469, R12
+ B callbackasm1(SB)
+ MOVD $470, R12
+ B callbackasm1(SB)
+ MOVD $471, R12
+ B callbackasm1(SB)
+ MOVD $472, R12
+ B callbackasm1(SB)
+ MOVD $473, R12
+ B callbackasm1(SB)
+ MOVD $474, R12
+ B callbackasm1(SB)
+ MOVD $475, R12
+ B callbackasm1(SB)
+ MOVD $476, R12
+ B callbackasm1(SB)
+ MOVD $477, R12
+ B callbackasm1(SB)
+ MOVD $478, R12
+ B callbackasm1(SB)
+ MOVD $479, R12
+ B callbackasm1(SB)
+ MOVD $480, R12
+ B callbackasm1(SB)
+ MOVD $481, R12
+ B callbackasm1(SB)
+ MOVD $482, R12
+ B callbackasm1(SB)
+ MOVD $483, R12
+ B callbackasm1(SB)
+ MOVD $484, R12
+ B callbackasm1(SB)
+ MOVD $485, R12
+ B callbackasm1(SB)
+ MOVD $486, R12
+ B callbackasm1(SB)
+ MOVD $487, R12
+ B callbackasm1(SB)
+ MOVD $488, R12
+ B callbackasm1(SB)
+ MOVD $489, R12
+ B callbackasm1(SB)
+ MOVD $490, R12
+ B callbackasm1(SB)
+ MOVD $491, R12
+ B callbackasm1(SB)
+ MOVD $492, R12
+ B callbackasm1(SB)
+ MOVD $493, R12
+ B callbackasm1(SB)
+ MOVD $494, R12
+ B callbackasm1(SB)
+ MOVD $495, R12
+ B callbackasm1(SB)
+ MOVD $496, R12
+ B callbackasm1(SB)
+ MOVD $497, R12
+ B callbackasm1(SB)
+ MOVD $498, R12
+ B callbackasm1(SB)
+ MOVD $499, R12
+ B callbackasm1(SB)
+ MOVD $500, R12
+ B callbackasm1(SB)
+ MOVD $501, R12
+ B callbackasm1(SB)
+ MOVD $502, R12
+ B callbackasm1(SB)
+ MOVD $503, R12
+ B callbackasm1(SB)
+ MOVD $504, R12
+ B callbackasm1(SB)
+ MOVD $505, R12
+ B callbackasm1(SB)
+ MOVD $506, R12
+ B callbackasm1(SB)
+ MOVD $507, R12
+ B callbackasm1(SB)
+ MOVD $508, R12
+ B callbackasm1(SB)
+ MOVD $509, R12
+ B callbackasm1(SB)
+ MOVD $510, R12
+ B callbackasm1(SB)
+ MOVD $511, R12
+ B callbackasm1(SB)
+ MOVD $512, R12
+ B callbackasm1(SB)
+ MOVD $513, R12
+ B callbackasm1(SB)
+ MOVD $514, R12
+ B callbackasm1(SB)
+ MOVD $515, R12
+ B callbackasm1(SB)
+ MOVD $516, R12
+ B callbackasm1(SB)
+ MOVD $517, R12
+ B callbackasm1(SB)
+ MOVD $518, R12
+ B callbackasm1(SB)
+ MOVD $519, R12
+ B callbackasm1(SB)
+ MOVD $520, R12
+ B callbackasm1(SB)
+ MOVD $521, R12
+ B callbackasm1(SB)
+ MOVD $522, R12
+ B callbackasm1(SB)
+ MOVD $523, R12
+ B callbackasm1(SB)
+ MOVD $524, R12
+ B callbackasm1(SB)
+ MOVD $525, R12
+ B callbackasm1(SB)
+ MOVD $526, R12
+ B callbackasm1(SB)
+ MOVD $527, R12
+ B callbackasm1(SB)
+ MOVD $528, R12
+ B callbackasm1(SB)
+ MOVD $529, R12
+ B callbackasm1(SB)
+ MOVD $530, R12
+ B callbackasm1(SB)
+ MOVD $531, R12
+ B callbackasm1(SB)
+ MOVD $532, R12
+ B callbackasm1(SB)
+ MOVD $533, R12
+ B callbackasm1(SB)
+ MOVD $534, R12
+ B callbackasm1(SB)
+ MOVD $535, R12
+ B callbackasm1(SB)
+ MOVD $536, R12
+ B callbackasm1(SB)
+ MOVD $537, R12
+ B callbackasm1(SB)
+ MOVD $538, R12
+ B callbackasm1(SB)
+ MOVD $539, R12
+ B callbackasm1(SB)
+ MOVD $540, R12
+ B callbackasm1(SB)
+ MOVD $541, R12
+ B callbackasm1(SB)
+ MOVD $542, R12
+ B callbackasm1(SB)
+ MOVD $543, R12
+ B callbackasm1(SB)
+ MOVD $544, R12
+ B callbackasm1(SB)
+ MOVD $545, R12
+ B callbackasm1(SB)
+ MOVD $546, R12
+ B callbackasm1(SB)
+ MOVD $547, R12
+ B callbackasm1(SB)
+ MOVD $548, R12
+ B callbackasm1(SB)
+ MOVD $549, R12
+ B callbackasm1(SB)
+ MOVD $550, R12
+ B callbackasm1(SB)
+ MOVD $551, R12
+ B callbackasm1(SB)
+ MOVD $552, R12
+ B callbackasm1(SB)
+ MOVD $553, R12
+ B callbackasm1(SB)
+ MOVD $554, R12
+ B callbackasm1(SB)
+ MOVD $555, R12
+ B callbackasm1(SB)
+ MOVD $556, R12
+ B callbackasm1(SB)
+ MOVD $557, R12
+ B callbackasm1(SB)
+ MOVD $558, R12
+ B callbackasm1(SB)
+ MOVD $559, R12
+ B callbackasm1(SB)
+ MOVD $560, R12
+ B callbackasm1(SB)
+ MOVD $561, R12
+ B callbackasm1(SB)
+ MOVD $562, R12
+ B callbackasm1(SB)
+ MOVD $563, R12
+ B callbackasm1(SB)
+ MOVD $564, R12
+ B callbackasm1(SB)
+ MOVD $565, R12
+ B callbackasm1(SB)
+ MOVD $566, R12
+ B callbackasm1(SB)
+ MOVD $567, R12
+ B callbackasm1(SB)
+ MOVD $568, R12
+ B callbackasm1(SB)
+ MOVD $569, R12
+ B callbackasm1(SB)
+ MOVD $570, R12
+ B callbackasm1(SB)
+ MOVD $571, R12
+ B callbackasm1(SB)
+ MOVD $572, R12
+ B callbackasm1(SB)
+ MOVD $573, R12
+ B callbackasm1(SB)
+ MOVD $574, R12
+ B callbackasm1(SB)
+ MOVD $575, R12
+ B callbackasm1(SB)
+ MOVD $576, R12
+ B callbackasm1(SB)
+ MOVD $577, R12
+ B callbackasm1(SB)
+ MOVD $578, R12
+ B callbackasm1(SB)
+ MOVD $579, R12
+ B callbackasm1(SB)
+ MOVD $580, R12
+ B callbackasm1(SB)
+ MOVD $581, R12
+ B callbackasm1(SB)
+ MOVD $582, R12
+ B callbackasm1(SB)
+ MOVD $583, R12
+ B callbackasm1(SB)
+ MOVD $584, R12
+ B callbackasm1(SB)
+ MOVD $585, R12
+ B callbackasm1(SB)
+ MOVD $586, R12
+ B callbackasm1(SB)
+ MOVD $587, R12
+ B callbackasm1(SB)
+ MOVD $588, R12
+ B callbackasm1(SB)
+ MOVD $589, R12
+ B callbackasm1(SB)
+ MOVD $590, R12
+ B callbackasm1(SB)
+ MOVD $591, R12
+ B callbackasm1(SB)
+ MOVD $592, R12
+ B callbackasm1(SB)
+ MOVD $593, R12
+ B callbackasm1(SB)
+ MOVD $594, R12
+ B callbackasm1(SB)
+ MOVD $595, R12
+ B callbackasm1(SB)
+ MOVD $596, R12
+ B callbackasm1(SB)
+ MOVD $597, R12
+ B callbackasm1(SB)
+ MOVD $598, R12
+ B callbackasm1(SB)
+ MOVD $599, R12
+ B callbackasm1(SB)
+ MOVD $600, R12
+ B callbackasm1(SB)
+ MOVD $601, R12
+ B callbackasm1(SB)
+ MOVD $602, R12
+ B callbackasm1(SB)
+ MOVD $603, R12
+ B callbackasm1(SB)
+ MOVD $604, R12
+ B callbackasm1(SB)
+ MOVD $605, R12
+ B callbackasm1(SB)
+ MOVD $606, R12
+ B callbackasm1(SB)
+ MOVD $607, R12
+ B callbackasm1(SB)
+ MOVD $608, R12
+ B callbackasm1(SB)
+ MOVD $609, R12
+ B callbackasm1(SB)
+ MOVD $610, R12
+ B callbackasm1(SB)
+ MOVD $611, R12
+ B callbackasm1(SB)
+ MOVD $612, R12
+ B callbackasm1(SB)
+ MOVD $613, R12
+ B callbackasm1(SB)
+ MOVD $614, R12
+ B callbackasm1(SB)
+ MOVD $615, R12
+ B callbackasm1(SB)
+ MOVD $616, R12
+ B callbackasm1(SB)
+ MOVD $617, R12
+ B callbackasm1(SB)
+ MOVD $618, R12
+ B callbackasm1(SB)
+ MOVD $619, R12
+ B callbackasm1(SB)
+ MOVD $620, R12
+ B callbackasm1(SB)
+ MOVD $621, R12
+ B callbackasm1(SB)
+ MOVD $622, R12
+ B callbackasm1(SB)
+ MOVD $623, R12
+ B callbackasm1(SB)
+ MOVD $624, R12
+ B callbackasm1(SB)
+ MOVD $625, R12
+ B callbackasm1(SB)
+ MOVD $626, R12
+ B callbackasm1(SB)
+ MOVD $627, R12
+ B callbackasm1(SB)
+ MOVD $628, R12
+ B callbackasm1(SB)
+ MOVD $629, R12
+ B callbackasm1(SB)
+ MOVD $630, R12
+ B callbackasm1(SB)
+ MOVD $631, R12
+ B callbackasm1(SB)
+ MOVD $632, R12
+ B callbackasm1(SB)
+ MOVD $633, R12
+ B callbackasm1(SB)
+ MOVD $634, R12
+ B callbackasm1(SB)
+ MOVD $635, R12
+ B callbackasm1(SB)
+ MOVD $636, R12
+ B callbackasm1(SB)
+ MOVD $637, R12
+ B callbackasm1(SB)
+ MOVD $638, R12
+ B callbackasm1(SB)
+ MOVD $639, R12
+ B callbackasm1(SB)
+ MOVD $640, R12
+ B callbackasm1(SB)
+ MOVD $641, R12
+ B callbackasm1(SB)
+ MOVD $642, R12
+ B callbackasm1(SB)
+ MOVD $643, R12
+ B callbackasm1(SB)
+ MOVD $644, R12
+ B callbackasm1(SB)
+ MOVD $645, R12
+ B callbackasm1(SB)
+ MOVD $646, R12
+ B callbackasm1(SB)
+ MOVD $647, R12
+ B callbackasm1(SB)
+ MOVD $648, R12
+ B callbackasm1(SB)
+ MOVD $649, R12
+ B callbackasm1(SB)
+ MOVD $650, R12
+ B callbackasm1(SB)
+ MOVD $651, R12
+ B callbackasm1(SB)
+ MOVD $652, R12
+ B callbackasm1(SB)
+ MOVD $653, R12
+ B callbackasm1(SB)
+ MOVD $654, R12
+ B callbackasm1(SB)
+ MOVD $655, R12
+ B callbackasm1(SB)
+ MOVD $656, R12
+ B callbackasm1(SB)
+ MOVD $657, R12
+ B callbackasm1(SB)
+ MOVD $658, R12
+ B callbackasm1(SB)
+ MOVD $659, R12
+ B callbackasm1(SB)
+ MOVD $660, R12
+ B callbackasm1(SB)
+ MOVD $661, R12
+ B callbackasm1(SB)
+ MOVD $662, R12
+ B callbackasm1(SB)
+ MOVD $663, R12
+ B callbackasm1(SB)
+ MOVD $664, R12
+ B callbackasm1(SB)
+ MOVD $665, R12
+ B callbackasm1(SB)
+ MOVD $666, R12
+ B callbackasm1(SB)
+ MOVD $667, R12
+ B callbackasm1(SB)
+ MOVD $668, R12
+ B callbackasm1(SB)
+ MOVD $669, R12
+ B callbackasm1(SB)
+ MOVD $670, R12
+ B callbackasm1(SB)
+ MOVD $671, R12
+ B callbackasm1(SB)
+ MOVD $672, R12
+ B callbackasm1(SB)
+ MOVD $673, R12
+ B callbackasm1(SB)
+ MOVD $674, R12
+ B callbackasm1(SB)
+ MOVD $675, R12
+ B callbackasm1(SB)
+ MOVD $676, R12
+ B callbackasm1(SB)
+ MOVD $677, R12
+ B callbackasm1(SB)
+ MOVD $678, R12
+ B callbackasm1(SB)
+ MOVD $679, R12
+ B callbackasm1(SB)
+ MOVD $680, R12
+ B callbackasm1(SB)
+ MOVD $681, R12
+ B callbackasm1(SB)
+ MOVD $682, R12
+ B callbackasm1(SB)
+ MOVD $683, R12
+ B callbackasm1(SB)
+ MOVD $684, R12
+ B callbackasm1(SB)
+ MOVD $685, R12
+ B callbackasm1(SB)
+ MOVD $686, R12
+ B callbackasm1(SB)
+ MOVD $687, R12
+ B callbackasm1(SB)
+ MOVD $688, R12
+ B callbackasm1(SB)
+ MOVD $689, R12
+ B callbackasm1(SB)
+ MOVD $690, R12
+ B callbackasm1(SB)
+ MOVD $691, R12
+ B callbackasm1(SB)
+ MOVD $692, R12
+ B callbackasm1(SB)
+ MOVD $693, R12
+ B callbackasm1(SB)
+ MOVD $694, R12
+ B callbackasm1(SB)
+ MOVD $695, R12
+ B callbackasm1(SB)
+ MOVD $696, R12
+ B callbackasm1(SB)
+ MOVD $697, R12
+ B callbackasm1(SB)
+ MOVD $698, R12
+ B callbackasm1(SB)
+ MOVD $699, R12
+ B callbackasm1(SB)
+ MOVD $700, R12
+ B callbackasm1(SB)
+ MOVD $701, R12
+ B callbackasm1(SB)
+ MOVD $702, R12
+ B callbackasm1(SB)
+ MOVD $703, R12
+ B callbackasm1(SB)
+ MOVD $704, R12
+ B callbackasm1(SB)
+ MOVD $705, R12
+ B callbackasm1(SB)
+ MOVD $706, R12
+ B callbackasm1(SB)
+ MOVD $707, R12
+ B callbackasm1(SB)
+ MOVD $708, R12
+ B callbackasm1(SB)
+ MOVD $709, R12
+ B callbackasm1(SB)
+ MOVD $710, R12
+ B callbackasm1(SB)
+ MOVD $711, R12
+ B callbackasm1(SB)
+ MOVD $712, R12
+ B callbackasm1(SB)
+ MOVD $713, R12
+ B callbackasm1(SB)
+ MOVD $714, R12
+ B callbackasm1(SB)
+ MOVD $715, R12
+ B callbackasm1(SB)
+ MOVD $716, R12
+ B callbackasm1(SB)
+ MOVD $717, R12
+ B callbackasm1(SB)
+ MOVD $718, R12
+ B callbackasm1(SB)
+ MOVD $719, R12
+ B callbackasm1(SB)
+ MOVD $720, R12
+ B callbackasm1(SB)
+ MOVD $721, R12
+ B callbackasm1(SB)
+ MOVD $722, R12
+ B callbackasm1(SB)
+ MOVD $723, R12
+ B callbackasm1(SB)
+ MOVD $724, R12
+ B callbackasm1(SB)
+ MOVD $725, R12
+ B callbackasm1(SB)
+ MOVD $726, R12
+ B callbackasm1(SB)
+ MOVD $727, R12
+ B callbackasm1(SB)
+ MOVD $728, R12
+ B callbackasm1(SB)
+ MOVD $729, R12
+ B callbackasm1(SB)
+ MOVD $730, R12
+ B callbackasm1(SB)
+ MOVD $731, R12
+ B callbackasm1(SB)
+ MOVD $732, R12
+ B callbackasm1(SB)
+ MOVD $733, R12
+ B callbackasm1(SB)
+ MOVD $734, R12
+ B callbackasm1(SB)
+ MOVD $735, R12
+ B callbackasm1(SB)
+ MOVD $736, R12
+ B callbackasm1(SB)
+ MOVD $737, R12
+ B callbackasm1(SB)
+ MOVD $738, R12
+ B callbackasm1(SB)
+ MOVD $739, R12
+ B callbackasm1(SB)
+ MOVD $740, R12
+ B callbackasm1(SB)
+ MOVD $741, R12
+ B callbackasm1(SB)
+ MOVD $742, R12
+ B callbackasm1(SB)
+ MOVD $743, R12
+ B callbackasm1(SB)
+ MOVD $744, R12
+ B callbackasm1(SB)
+ MOVD $745, R12
+ B callbackasm1(SB)
+ MOVD $746, R12
+ B callbackasm1(SB)
+ MOVD $747, R12
+ B callbackasm1(SB)
+ MOVD $748, R12
+ B callbackasm1(SB)
+ MOVD $749, R12
+ B callbackasm1(SB)
+ MOVD $750, R12
+ B callbackasm1(SB)
+ MOVD $751, R12
+ B callbackasm1(SB)
+ MOVD $752, R12
+ B callbackasm1(SB)
+ MOVD $753, R12
+ B callbackasm1(SB)
+ MOVD $754, R12
+ B callbackasm1(SB)
+ MOVD $755, R12
+ B callbackasm1(SB)
+ MOVD $756, R12
+ B callbackasm1(SB)
+ MOVD $757, R12
+ B callbackasm1(SB)
+ MOVD $758, R12
+ B callbackasm1(SB)
+ MOVD $759, R12
+ B callbackasm1(SB)
+ MOVD $760, R12
+ B callbackasm1(SB)
+ MOVD $761, R12
+ B callbackasm1(SB)
+ MOVD $762, R12
+ B callbackasm1(SB)
+ MOVD $763, R12
+ B callbackasm1(SB)
+ MOVD $764, R12
+ B callbackasm1(SB)
+ MOVD $765, R12
+ B callbackasm1(SB)
+ MOVD $766, R12
+ B callbackasm1(SB)
+ MOVD $767, R12
+ B callbackasm1(SB)
+ MOVD $768, R12
+ B callbackasm1(SB)
+ MOVD $769, R12
+ B callbackasm1(SB)
+ MOVD $770, R12
+ B callbackasm1(SB)
+ MOVD $771, R12
+ B callbackasm1(SB)
+ MOVD $772, R12
+ B callbackasm1(SB)
+ MOVD $773, R12
+ B callbackasm1(SB)
+ MOVD $774, R12
+ B callbackasm1(SB)
+ MOVD $775, R12
+ B callbackasm1(SB)
+ MOVD $776, R12
+ B callbackasm1(SB)
+ MOVD $777, R12
+ B callbackasm1(SB)
+ MOVD $778, R12
+ B callbackasm1(SB)
+ MOVD $779, R12
+ B callbackasm1(SB)
+ MOVD $780, R12
+ B callbackasm1(SB)
+ MOVD $781, R12
+ B callbackasm1(SB)
+ MOVD $782, R12
+ B callbackasm1(SB)
+ MOVD $783, R12
+ B callbackasm1(SB)
+ MOVD $784, R12
+ B callbackasm1(SB)
+ MOVD $785, R12
+ B callbackasm1(SB)
+ MOVD $786, R12
+ B callbackasm1(SB)
+ MOVD $787, R12
+ B callbackasm1(SB)
+ MOVD $788, R12
+ B callbackasm1(SB)
+ MOVD $789, R12
+ B callbackasm1(SB)
+ MOVD $790, R12
+ B callbackasm1(SB)
+ MOVD $791, R12
+ B callbackasm1(SB)
+ MOVD $792, R12
+ B callbackasm1(SB)
+ MOVD $793, R12
+ B callbackasm1(SB)
+ MOVD $794, R12
+ B callbackasm1(SB)
+ MOVD $795, R12
+ B callbackasm1(SB)
+ MOVD $796, R12
+ B callbackasm1(SB)
+ MOVD $797, R12
+ B callbackasm1(SB)
+ MOVD $798, R12
+ B callbackasm1(SB)
+ MOVD $799, R12
+ B callbackasm1(SB)
+ MOVD $800, R12
+ B callbackasm1(SB)
+ MOVD $801, R12
+ B callbackasm1(SB)
+ MOVD $802, R12
+ B callbackasm1(SB)
+ MOVD $803, R12
+ B callbackasm1(SB)
+ MOVD $804, R12
+ B callbackasm1(SB)
+ MOVD $805, R12
+ B callbackasm1(SB)
+ MOVD $806, R12
+ B callbackasm1(SB)
+ MOVD $807, R12
+ B callbackasm1(SB)
+ MOVD $808, R12
+ B callbackasm1(SB)
+ MOVD $809, R12
+ B callbackasm1(SB)
+ MOVD $810, R12
+ B callbackasm1(SB)
+ MOVD $811, R12
+ B callbackasm1(SB)
+ MOVD $812, R12
+ B callbackasm1(SB)
+ MOVD $813, R12
+ B callbackasm1(SB)
+ MOVD $814, R12
+ B callbackasm1(SB)
+ MOVD $815, R12
+ B callbackasm1(SB)
+ MOVD $816, R12
+ B callbackasm1(SB)
+ MOVD $817, R12
+ B callbackasm1(SB)
+ MOVD $818, R12
+ B callbackasm1(SB)
+ MOVD $819, R12
+ B callbackasm1(SB)
+ MOVD $820, R12
+ B callbackasm1(SB)
+ MOVD $821, R12
+ B callbackasm1(SB)
+ MOVD $822, R12
+ B callbackasm1(SB)
+ MOVD $823, R12
+ B callbackasm1(SB)
+ MOVD $824, R12
+ B callbackasm1(SB)
+ MOVD $825, R12
+ B callbackasm1(SB)
+ MOVD $826, R12
+ B callbackasm1(SB)
+ MOVD $827, R12
+ B callbackasm1(SB)
+ MOVD $828, R12
+ B callbackasm1(SB)
+ MOVD $829, R12
+ B callbackasm1(SB)
+ MOVD $830, R12
+ B callbackasm1(SB)
+ MOVD $831, R12
+ B callbackasm1(SB)
+ MOVD $832, R12
+ B callbackasm1(SB)
+ MOVD $833, R12
+ B callbackasm1(SB)
+ MOVD $834, R12
+ B callbackasm1(SB)
+ MOVD $835, R12
+ B callbackasm1(SB)
+ MOVD $836, R12
+ B callbackasm1(SB)
+ MOVD $837, R12
+ B callbackasm1(SB)
+ MOVD $838, R12
+ B callbackasm1(SB)
+ MOVD $839, R12
+ B callbackasm1(SB)
+ MOVD $840, R12
+ B callbackasm1(SB)
+ MOVD $841, R12
+ B callbackasm1(SB)
+ MOVD $842, R12
+ B callbackasm1(SB)
+ MOVD $843, R12
+ B callbackasm1(SB)
+ MOVD $844, R12
+ B callbackasm1(SB)
+ MOVD $845, R12
+ B callbackasm1(SB)
+ MOVD $846, R12
+ B callbackasm1(SB)
+ MOVD $847, R12
+ B callbackasm1(SB)
+ MOVD $848, R12
+ B callbackasm1(SB)
+ MOVD $849, R12
+ B callbackasm1(SB)
+ MOVD $850, R12
+ B callbackasm1(SB)
+ MOVD $851, R12
+ B callbackasm1(SB)
+ MOVD $852, R12
+ B callbackasm1(SB)
+ MOVD $853, R12
+ B callbackasm1(SB)
+ MOVD $854, R12
+ B callbackasm1(SB)
+ MOVD $855, R12
+ B callbackasm1(SB)
+ MOVD $856, R12
+ B callbackasm1(SB)
+ MOVD $857, R12
+ B callbackasm1(SB)
+ MOVD $858, R12
+ B callbackasm1(SB)
+ MOVD $859, R12
+ B callbackasm1(SB)
+ MOVD $860, R12
+ B callbackasm1(SB)
+ MOVD $861, R12
+ B callbackasm1(SB)
+ MOVD $862, R12
+ B callbackasm1(SB)
+ MOVD $863, R12
+ B callbackasm1(SB)
+ MOVD $864, R12
+ B callbackasm1(SB)
+ MOVD $865, R12
+ B callbackasm1(SB)
+ MOVD $866, R12
+ B callbackasm1(SB)
+ MOVD $867, R12
+ B callbackasm1(SB)
+ MOVD $868, R12
+ B callbackasm1(SB)
+ MOVD $869, R12
+ B callbackasm1(SB)
+ MOVD $870, R12
+ B callbackasm1(SB)
+ MOVD $871, R12
+ B callbackasm1(SB)
+ MOVD $872, R12
+ B callbackasm1(SB)
+ MOVD $873, R12
+ B callbackasm1(SB)
+ MOVD $874, R12
+ B callbackasm1(SB)
+ MOVD $875, R12
+ B callbackasm1(SB)
+ MOVD $876, R12
+ B callbackasm1(SB)
+ MOVD $877, R12
+ B callbackasm1(SB)
+ MOVD $878, R12
+ B callbackasm1(SB)
+ MOVD $879, R12
+ B callbackasm1(SB)
+ MOVD $880, R12
+ B callbackasm1(SB)
+ MOVD $881, R12
+ B callbackasm1(SB)
+ MOVD $882, R12
+ B callbackasm1(SB)
+ MOVD $883, R12
+ B callbackasm1(SB)
+ MOVD $884, R12
+ B callbackasm1(SB)
+ MOVD $885, R12
+ B callbackasm1(SB)
+ MOVD $886, R12
+ B callbackasm1(SB)
+ MOVD $887, R12
+ B callbackasm1(SB)
+ MOVD $888, R12
+ B callbackasm1(SB)
+ MOVD $889, R12
+ B callbackasm1(SB)
+ MOVD $890, R12
+ B callbackasm1(SB)
+ MOVD $891, R12
+ B callbackasm1(SB)
+ MOVD $892, R12
+ B callbackasm1(SB)
+ MOVD $893, R12
+ B callbackasm1(SB)
+ MOVD $894, R12
+ B callbackasm1(SB)
+ MOVD $895, R12
+ B callbackasm1(SB)
+ MOVD $896, R12
+ B callbackasm1(SB)
+ MOVD $897, R12
+ B callbackasm1(SB)
+ MOVD $898, R12
+ B callbackasm1(SB)
+ MOVD $899, R12
+ B callbackasm1(SB)
+ MOVD $900, R12
+ B callbackasm1(SB)
+ MOVD $901, R12
+ B callbackasm1(SB)
+ MOVD $902, R12
+ B callbackasm1(SB)
+ MOVD $903, R12
+ B callbackasm1(SB)
+ MOVD $904, R12
+ B callbackasm1(SB)
+ MOVD $905, R12
+ B callbackasm1(SB)
+ MOVD $906, R12
+ B callbackasm1(SB)
+ MOVD $907, R12
+ B callbackasm1(SB)
+ MOVD $908, R12
+ B callbackasm1(SB)
+ MOVD $909, R12
+ B callbackasm1(SB)
+ MOVD $910, R12
+ B callbackasm1(SB)
+ MOVD $911, R12
+ B callbackasm1(SB)
+ MOVD $912, R12
+ B callbackasm1(SB)
+ MOVD $913, R12
+ B callbackasm1(SB)
+ MOVD $914, R12
+ B callbackasm1(SB)
+ MOVD $915, R12
+ B callbackasm1(SB)
+ MOVD $916, R12
+ B callbackasm1(SB)
+ MOVD $917, R12
+ B callbackasm1(SB)
+ MOVD $918, R12
+ B callbackasm1(SB)
+ MOVD $919, R12
+ B callbackasm1(SB)
+ MOVD $920, R12
+ B callbackasm1(SB)
+ MOVD $921, R12
+ B callbackasm1(SB)
+ MOVD $922, R12
+ B callbackasm1(SB)
+ MOVD $923, R12
+ B callbackasm1(SB)
+ MOVD $924, R12
+ B callbackasm1(SB)
+ MOVD $925, R12
+ B callbackasm1(SB)
+ MOVD $926, R12
+ B callbackasm1(SB)
+ MOVD $927, R12
+ B callbackasm1(SB)
+ MOVD $928, R12
+ B callbackasm1(SB)
+ MOVD $929, R12
+ B callbackasm1(SB)
+ MOVD $930, R12
+ B callbackasm1(SB)
+ MOVD $931, R12
+ B callbackasm1(SB)
+ MOVD $932, R12
+ B callbackasm1(SB)
+ MOVD $933, R12
+ B callbackasm1(SB)
+ MOVD $934, R12
+ B callbackasm1(SB)
+ MOVD $935, R12
+ B callbackasm1(SB)
+ MOVD $936, R12
+ B callbackasm1(SB)
+ MOVD $937, R12
+ B callbackasm1(SB)
+ MOVD $938, R12
+ B callbackasm1(SB)
+ MOVD $939, R12
+ B callbackasm1(SB)
+ MOVD $940, R12
+ B callbackasm1(SB)
+ MOVD $941, R12
+ B callbackasm1(SB)
+ MOVD $942, R12
+ B callbackasm1(SB)
+ MOVD $943, R12
+ B callbackasm1(SB)
+ MOVD $944, R12
+ B callbackasm1(SB)
+ MOVD $945, R12
+ B callbackasm1(SB)
+ MOVD $946, R12
+ B callbackasm1(SB)
+ MOVD $947, R12
+ B callbackasm1(SB)
+ MOVD $948, R12
+ B callbackasm1(SB)
+ MOVD $949, R12
+ B callbackasm1(SB)
+ MOVD $950, R12
+ B callbackasm1(SB)
+ MOVD $951, R12
+ B callbackasm1(SB)
+ MOVD $952, R12
+ B callbackasm1(SB)
+ MOVD $953, R12
+ B callbackasm1(SB)
+ MOVD $954, R12
+ B callbackasm1(SB)
+ MOVD $955, R12
+ B callbackasm1(SB)
+ MOVD $956, R12
+ B callbackasm1(SB)
+ MOVD $957, R12
+ B callbackasm1(SB)
+ MOVD $958, R12
+ B callbackasm1(SB)
+ MOVD $959, R12
+ B callbackasm1(SB)
+ MOVD $960, R12
+ B callbackasm1(SB)
+ MOVD $961, R12
+ B callbackasm1(SB)
+ MOVD $962, R12
+ B callbackasm1(SB)
+ MOVD $963, R12
+ B callbackasm1(SB)
+ MOVD $964, R12
+ B callbackasm1(SB)
+ MOVD $965, R12
+ B callbackasm1(SB)
+ MOVD $966, R12
+ B callbackasm1(SB)
+ MOVD $967, R12
+ B callbackasm1(SB)
+ MOVD $968, R12
+ B callbackasm1(SB)
+ MOVD $969, R12
+ B callbackasm1(SB)
+ MOVD $970, R12
+ B callbackasm1(SB)
+ MOVD $971, R12
+ B callbackasm1(SB)
+ MOVD $972, R12
+ B callbackasm1(SB)
+ MOVD $973, R12
+ B callbackasm1(SB)
+ MOVD $974, R12
+ B callbackasm1(SB)
+ MOVD $975, R12
+ B callbackasm1(SB)
+ MOVD $976, R12
+ B callbackasm1(SB)
+ MOVD $977, R12
+ B callbackasm1(SB)
+ MOVD $978, R12
+ B callbackasm1(SB)
+ MOVD $979, R12
+ B callbackasm1(SB)
+ MOVD $980, R12
+ B callbackasm1(SB)
+ MOVD $981, R12
+ B callbackasm1(SB)
+ MOVD $982, R12
+ B callbackasm1(SB)
+ MOVD $983, R12
+ B callbackasm1(SB)
+ MOVD $984, R12
+ B callbackasm1(SB)
+ MOVD $985, R12
+ B callbackasm1(SB)
+ MOVD $986, R12
+ B callbackasm1(SB)
+ MOVD $987, R12
+ B callbackasm1(SB)
+ MOVD $988, R12
+ B callbackasm1(SB)
+ MOVD $989, R12
+ B callbackasm1(SB)
+ MOVD $990, R12
+ B callbackasm1(SB)
+ MOVD $991, R12
+ B callbackasm1(SB)
+ MOVD $992, R12
+ B callbackasm1(SB)
+ MOVD $993, R12
+ B callbackasm1(SB)
+ MOVD $994, R12
+ B callbackasm1(SB)
+ MOVD $995, R12
+ B callbackasm1(SB)
+ MOVD $996, R12
+ B callbackasm1(SB)
+ MOVD $997, R12
+ B callbackasm1(SB)
+ MOVD $998, R12
+ B callbackasm1(SB)
+ MOVD $999, R12
+ B callbackasm1(SB)
+ MOVD $1000, R12
+ B callbackasm1(SB)
+ MOVD $1001, R12
+ B callbackasm1(SB)
+ MOVD $1002, R12
+ B callbackasm1(SB)
+ MOVD $1003, R12
+ B callbackasm1(SB)
+ MOVD $1004, R12
+ B callbackasm1(SB)
+ MOVD $1005, R12
+ B callbackasm1(SB)
+ MOVD $1006, R12
+ B callbackasm1(SB)
+ MOVD $1007, R12
+ B callbackasm1(SB)
+ MOVD $1008, R12
+ B callbackasm1(SB)
+ MOVD $1009, R12
+ B callbackasm1(SB)
+ MOVD $1010, R12
+ B callbackasm1(SB)
+ MOVD $1011, R12
+ B callbackasm1(SB)
+ MOVD $1012, R12
+ B callbackasm1(SB)
+ MOVD $1013, R12
+ B callbackasm1(SB)
+ MOVD $1014, R12
+ B callbackasm1(SB)
+ MOVD $1015, R12
+ B callbackasm1(SB)
+ MOVD $1016, R12
+ B callbackasm1(SB)
+ MOVD $1017, R12
+ B callbackasm1(SB)
+ MOVD $1018, R12
+ B callbackasm1(SB)
+ MOVD $1019, R12
+ B callbackasm1(SB)
+ MOVD $1020, R12
+ B callbackasm1(SB)
+ MOVD $1021, R12
+ B callbackasm1(SB)
+ MOVD $1022, R12
+ B callbackasm1(SB)
+ MOVD $1023, R12
+ B callbackasm1(SB)
+ MOVD $1024, R12
+ B callbackasm1(SB)
+ MOVD $1025, R12
+ B callbackasm1(SB)
+ MOVD $1026, R12
+ B callbackasm1(SB)
+ MOVD $1027, R12
+ B callbackasm1(SB)
+ MOVD $1028, R12
+ B callbackasm1(SB)
+ MOVD $1029, R12
+ B callbackasm1(SB)
+ MOVD $1030, R12
+ B callbackasm1(SB)
+ MOVD $1031, R12
+ B callbackasm1(SB)
+ MOVD $1032, R12
+ B callbackasm1(SB)
+ MOVD $1033, R12
+ B callbackasm1(SB)
+ MOVD $1034, R12
+ B callbackasm1(SB)
+ MOVD $1035, R12
+ B callbackasm1(SB)
+ MOVD $1036, R12
+ B callbackasm1(SB)
+ MOVD $1037, R12
+ B callbackasm1(SB)
+ MOVD $1038, R12
+ B callbackasm1(SB)
+ MOVD $1039, R12
+ B callbackasm1(SB)
+ MOVD $1040, R12
+ B callbackasm1(SB)
+ MOVD $1041, R12
+ B callbackasm1(SB)
+ MOVD $1042, R12
+ B callbackasm1(SB)
+ MOVD $1043, R12
+ B callbackasm1(SB)
+ MOVD $1044, R12
+ B callbackasm1(SB)
+ MOVD $1045, R12
+ B callbackasm1(SB)
+ MOVD $1046, R12
+ B callbackasm1(SB)
+ MOVD $1047, R12
+ B callbackasm1(SB)
+ MOVD $1048, R12
+ B callbackasm1(SB)
+ MOVD $1049, R12
+ B callbackasm1(SB)
+ MOVD $1050, R12
+ B callbackasm1(SB)
+ MOVD $1051, R12
+ B callbackasm1(SB)
+ MOVD $1052, R12
+ B callbackasm1(SB)
+ MOVD $1053, R12
+ B callbackasm1(SB)
+ MOVD $1054, R12
+ B callbackasm1(SB)
+ MOVD $1055, R12
+ B callbackasm1(SB)
+ MOVD $1056, R12
+ B callbackasm1(SB)
+ MOVD $1057, R12
+ B callbackasm1(SB)
+ MOVD $1058, R12
+ B callbackasm1(SB)
+ MOVD $1059, R12
+ B callbackasm1(SB)
+ MOVD $1060, R12
+ B callbackasm1(SB)
+ MOVD $1061, R12
+ B callbackasm1(SB)
+ MOVD $1062, R12
+ B callbackasm1(SB)
+ MOVD $1063, R12
+ B callbackasm1(SB)
+ MOVD $1064, R12
+ B callbackasm1(SB)
+ MOVD $1065, R12
+ B callbackasm1(SB)
+ MOVD $1066, R12
+ B callbackasm1(SB)
+ MOVD $1067, R12
+ B callbackasm1(SB)
+ MOVD $1068, R12
+ B callbackasm1(SB)
+ MOVD $1069, R12
+ B callbackasm1(SB)
+ MOVD $1070, R12
+ B callbackasm1(SB)
+ MOVD $1071, R12
+ B callbackasm1(SB)
+ MOVD $1072, R12
+ B callbackasm1(SB)
+ MOVD $1073, R12
+ B callbackasm1(SB)
+ MOVD $1074, R12
+ B callbackasm1(SB)
+ MOVD $1075, R12
+ B callbackasm1(SB)
+ MOVD $1076, R12
+ B callbackasm1(SB)
+ MOVD $1077, R12
+ B callbackasm1(SB)
+ MOVD $1078, R12
+ B callbackasm1(SB)
+ MOVD $1079, R12
+ B callbackasm1(SB)
+ MOVD $1080, R12
+ B callbackasm1(SB)
+ MOVD $1081, R12
+ B callbackasm1(SB)
+ MOVD $1082, R12
+ B callbackasm1(SB)
+ MOVD $1083, R12
+ B callbackasm1(SB)
+ MOVD $1084, R12
+ B callbackasm1(SB)
+ MOVD $1085, R12
+ B callbackasm1(SB)
+ MOVD $1086, R12
+ B callbackasm1(SB)
+ MOVD $1087, R12
+ B callbackasm1(SB)
+ MOVD $1088, R12
+ B callbackasm1(SB)
+ MOVD $1089, R12
+ B callbackasm1(SB)
+ MOVD $1090, R12
+ B callbackasm1(SB)
+ MOVD $1091, R12
+ B callbackasm1(SB)
+ MOVD $1092, R12
+ B callbackasm1(SB)
+ MOVD $1093, R12
+ B callbackasm1(SB)
+ MOVD $1094, R12
+ B callbackasm1(SB)
+ MOVD $1095, R12
+ B callbackasm1(SB)
+ MOVD $1096, R12
+ B callbackasm1(SB)
+ MOVD $1097, R12
+ B callbackasm1(SB)
+ MOVD $1098, R12
+ B callbackasm1(SB)
+ MOVD $1099, R12
+ B callbackasm1(SB)
+ MOVD $1100, R12
+ B callbackasm1(SB)
+ MOVD $1101, R12
+ B callbackasm1(SB)
+ MOVD $1102, R12
+ B callbackasm1(SB)
+ MOVD $1103, R12
+ B callbackasm1(SB)
+ MOVD $1104, R12
+ B callbackasm1(SB)
+ MOVD $1105, R12
+ B callbackasm1(SB)
+ MOVD $1106, R12
+ B callbackasm1(SB)
+ MOVD $1107, R12
+ B callbackasm1(SB)
+ MOVD $1108, R12
+ B callbackasm1(SB)
+ MOVD $1109, R12
+ B callbackasm1(SB)
+ MOVD $1110, R12
+ B callbackasm1(SB)
+ MOVD $1111, R12
+ B callbackasm1(SB)
+ MOVD $1112, R12
+ B callbackasm1(SB)
+ MOVD $1113, R12
+ B callbackasm1(SB)
+ MOVD $1114, R12
+ B callbackasm1(SB)
+ MOVD $1115, R12
+ B callbackasm1(SB)
+ MOVD $1116, R12
+ B callbackasm1(SB)
+ MOVD $1117, R12
+ B callbackasm1(SB)
+ MOVD $1118, R12
+ B callbackasm1(SB)
+ MOVD $1119, R12
+ B callbackasm1(SB)
+ MOVD $1120, R12
+ B callbackasm1(SB)
+ MOVD $1121, R12
+ B callbackasm1(SB)
+ MOVD $1122, R12
+ B callbackasm1(SB)
+ MOVD $1123, R12
+ B callbackasm1(SB)
+ MOVD $1124, R12
+ B callbackasm1(SB)
+ MOVD $1125, R12
+ B callbackasm1(SB)
+ MOVD $1126, R12
+ B callbackasm1(SB)
+ MOVD $1127, R12
+ B callbackasm1(SB)
+ MOVD $1128, R12
+ B callbackasm1(SB)
+ MOVD $1129, R12
+ B callbackasm1(SB)
+ MOVD $1130, R12
+ B callbackasm1(SB)
+ MOVD $1131, R12
+ B callbackasm1(SB)
+ MOVD $1132, R12
+ B callbackasm1(SB)
+ MOVD $1133, R12
+ B callbackasm1(SB)
+ MOVD $1134, R12
+ B callbackasm1(SB)
+ MOVD $1135, R12
+ B callbackasm1(SB)
+ MOVD $1136, R12
+ B callbackasm1(SB)
+ MOVD $1137, R12
+ B callbackasm1(SB)
+ MOVD $1138, R12
+ B callbackasm1(SB)
+ MOVD $1139, R12
+ B callbackasm1(SB)
+ MOVD $1140, R12
+ B callbackasm1(SB)
+ MOVD $1141, R12
+ B callbackasm1(SB)
+ MOVD $1142, R12
+ B callbackasm1(SB)
+ MOVD $1143, R12
+ B callbackasm1(SB)
+ MOVD $1144, R12
+ B callbackasm1(SB)
+ MOVD $1145, R12
+ B callbackasm1(SB)
+ MOVD $1146, R12
+ B callbackasm1(SB)
+ MOVD $1147, R12
+ B callbackasm1(SB)
+ MOVD $1148, R12
+ B callbackasm1(SB)
+ MOVD $1149, R12
+ B callbackasm1(SB)
+ MOVD $1150, R12
+ B callbackasm1(SB)
+ MOVD $1151, R12
+ B callbackasm1(SB)
+ MOVD $1152, R12
+ B callbackasm1(SB)
+ MOVD $1153, R12
+ B callbackasm1(SB)
+ MOVD $1154, R12
+ B callbackasm1(SB)
+ MOVD $1155, R12
+ B callbackasm1(SB)
+ MOVD $1156, R12
+ B callbackasm1(SB)
+ MOVD $1157, R12
+ B callbackasm1(SB)
+ MOVD $1158, R12
+ B callbackasm1(SB)
+ MOVD $1159, R12
+ B callbackasm1(SB)
+ MOVD $1160, R12
+ B callbackasm1(SB)
+ MOVD $1161, R12
+ B callbackasm1(SB)
+ MOVD $1162, R12
+ B callbackasm1(SB)
+ MOVD $1163, R12
+ B callbackasm1(SB)
+ MOVD $1164, R12
+ B callbackasm1(SB)
+ MOVD $1165, R12
+ B callbackasm1(SB)
+ MOVD $1166, R12
+ B callbackasm1(SB)
+ MOVD $1167, R12
+ B callbackasm1(SB)
+ MOVD $1168, R12
+ B callbackasm1(SB)
+ MOVD $1169, R12
+ B callbackasm1(SB)
+ MOVD $1170, R12
+ B callbackasm1(SB)
+ MOVD $1171, R12
+ B callbackasm1(SB)
+ MOVD $1172, R12
+ B callbackasm1(SB)
+ MOVD $1173, R12
+ B callbackasm1(SB)
+ MOVD $1174, R12
+ B callbackasm1(SB)
+ MOVD $1175, R12
+ B callbackasm1(SB)
+ MOVD $1176, R12
+ B callbackasm1(SB)
+ MOVD $1177, R12
+ B callbackasm1(SB)
+ MOVD $1178, R12
+ B callbackasm1(SB)
+ MOVD $1179, R12
+ B callbackasm1(SB)
+ MOVD $1180, R12
+ B callbackasm1(SB)
+ MOVD $1181, R12
+ B callbackasm1(SB)
+ MOVD $1182, R12
+ B callbackasm1(SB)
+ MOVD $1183, R12
+ B callbackasm1(SB)
+ MOVD $1184, R12
+ B callbackasm1(SB)
+ MOVD $1185, R12
+ B callbackasm1(SB)
+ MOVD $1186, R12
+ B callbackasm1(SB)
+ MOVD $1187, R12
+ B callbackasm1(SB)
+ MOVD $1188, R12
+ B callbackasm1(SB)
+ MOVD $1189, R12
+ B callbackasm1(SB)
+ MOVD $1190, R12
+ B callbackasm1(SB)
+ MOVD $1191, R12
+ B callbackasm1(SB)
+ MOVD $1192, R12
+ B callbackasm1(SB)
+ MOVD $1193, R12
+ B callbackasm1(SB)
+ MOVD $1194, R12
+ B callbackasm1(SB)
+ MOVD $1195, R12
+ B callbackasm1(SB)
+ MOVD $1196, R12
+ B callbackasm1(SB)
+ MOVD $1197, R12
+ B callbackasm1(SB)
+ MOVD $1198, R12
+ B callbackasm1(SB)
+ MOVD $1199, R12
+ B callbackasm1(SB)
+ MOVD $1200, R12
+ B callbackasm1(SB)
+ MOVD $1201, R12
+ B callbackasm1(SB)
+ MOVD $1202, R12
+ B callbackasm1(SB)
+ MOVD $1203, R12
+ B callbackasm1(SB)
+ MOVD $1204, R12
+ B callbackasm1(SB)
+ MOVD $1205, R12
+ B callbackasm1(SB)
+ MOVD $1206, R12
+ B callbackasm1(SB)
+ MOVD $1207, R12
+ B callbackasm1(SB)
+ MOVD $1208, R12
+ B callbackasm1(SB)
+ MOVD $1209, R12
+ B callbackasm1(SB)
+ MOVD $1210, R12
+ B callbackasm1(SB)
+ MOVD $1211, R12
+ B callbackasm1(SB)
+ MOVD $1212, R12
+ B callbackasm1(SB)
+ MOVD $1213, R12
+ B callbackasm1(SB)
+ MOVD $1214, R12
+ B callbackasm1(SB)
+ MOVD $1215, R12
+ B callbackasm1(SB)
+ MOVD $1216, R12
+ B callbackasm1(SB)
+ MOVD $1217, R12
+ B callbackasm1(SB)
+ MOVD $1218, R12
+ B callbackasm1(SB)
+ MOVD $1219, R12
+ B callbackasm1(SB)
+ MOVD $1220, R12
+ B callbackasm1(SB)
+ MOVD $1221, R12
+ B callbackasm1(SB)
+ MOVD $1222, R12
+ B callbackasm1(SB)
+ MOVD $1223, R12
+ B callbackasm1(SB)
+ MOVD $1224, R12
+ B callbackasm1(SB)
+ MOVD $1225, R12
+ B callbackasm1(SB)
+ MOVD $1226, R12
+ B callbackasm1(SB)
+ MOVD $1227, R12
+ B callbackasm1(SB)
+ MOVD $1228, R12
+ B callbackasm1(SB)
+ MOVD $1229, R12
+ B callbackasm1(SB)
+ MOVD $1230, R12
+ B callbackasm1(SB)
+ MOVD $1231, R12
+ B callbackasm1(SB)
+ MOVD $1232, R12
+ B callbackasm1(SB)
+ MOVD $1233, R12
+ B callbackasm1(SB)
+ MOVD $1234, R12
+ B callbackasm1(SB)
+ MOVD $1235, R12
+ B callbackasm1(SB)
+ MOVD $1236, R12
+ B callbackasm1(SB)
+ MOVD $1237, R12
+ B callbackasm1(SB)
+ MOVD $1238, R12
+ B callbackasm1(SB)
+ MOVD $1239, R12
+ B callbackasm1(SB)
+ MOVD $1240, R12
+ B callbackasm1(SB)
+ MOVD $1241, R12
+ B callbackasm1(SB)
+ MOVD $1242, R12
+ B callbackasm1(SB)
+ MOVD $1243, R12
+ B callbackasm1(SB)
+ MOVD $1244, R12
+ B callbackasm1(SB)
+ MOVD $1245, R12
+ B callbackasm1(SB)
+ MOVD $1246, R12
+ B callbackasm1(SB)
+ MOVD $1247, R12
+ B callbackasm1(SB)
+ MOVD $1248, R12
+ B callbackasm1(SB)
+ MOVD $1249, R12
+ B callbackasm1(SB)
+ MOVD $1250, R12
+ B callbackasm1(SB)
+ MOVD $1251, R12
+ B callbackasm1(SB)
+ MOVD $1252, R12
+ B callbackasm1(SB)
+ MOVD $1253, R12
+ B callbackasm1(SB)
+ MOVD $1254, R12
+ B callbackasm1(SB)
+ MOVD $1255, R12
+ B callbackasm1(SB)
+ MOVD $1256, R12
+ B callbackasm1(SB)
+ MOVD $1257, R12
+ B callbackasm1(SB)
+ MOVD $1258, R12
+ B callbackasm1(SB)
+ MOVD $1259, R12
+ B callbackasm1(SB)
+ MOVD $1260, R12
+ B callbackasm1(SB)
+ MOVD $1261, R12
+ B callbackasm1(SB)
+ MOVD $1262, R12
+ B callbackasm1(SB)
+ MOVD $1263, R12
+ B callbackasm1(SB)
+ MOVD $1264, R12
+ B callbackasm1(SB)
+ MOVD $1265, R12
+ B callbackasm1(SB)
+ MOVD $1266, R12
+ B callbackasm1(SB)
+ MOVD $1267, R12
+ B callbackasm1(SB)
+ MOVD $1268, R12
+ B callbackasm1(SB)
+ MOVD $1269, R12
+ B callbackasm1(SB)
+ MOVD $1270, R12
+ B callbackasm1(SB)
+ MOVD $1271, R12
+ B callbackasm1(SB)
+ MOVD $1272, R12
+ B callbackasm1(SB)
+ MOVD $1273, R12
+ B callbackasm1(SB)
+ MOVD $1274, R12
+ B callbackasm1(SB)
+ MOVD $1275, R12
+ B callbackasm1(SB)
+ MOVD $1276, R12
+ B callbackasm1(SB)
+ MOVD $1277, R12
+ B callbackasm1(SB)
+ MOVD $1278, R12
+ B callbackasm1(SB)
+ MOVD $1279, R12
+ B callbackasm1(SB)
+ MOVD $1280, R12
+ B callbackasm1(SB)
+ MOVD $1281, R12
+ B callbackasm1(SB)
+ MOVD $1282, R12
+ B callbackasm1(SB)
+ MOVD $1283, R12
+ B callbackasm1(SB)
+ MOVD $1284, R12
+ B callbackasm1(SB)
+ MOVD $1285, R12
+ B callbackasm1(SB)
+ MOVD $1286, R12
+ B callbackasm1(SB)
+ MOVD $1287, R12
+ B callbackasm1(SB)
+ MOVD $1288, R12
+ B callbackasm1(SB)
+ MOVD $1289, R12
+ B callbackasm1(SB)
+ MOVD $1290, R12
+ B callbackasm1(SB)
+ MOVD $1291, R12
+ B callbackasm1(SB)
+ MOVD $1292, R12
+ B callbackasm1(SB)
+ MOVD $1293, R12
+ B callbackasm1(SB)
+ MOVD $1294, R12
+ B callbackasm1(SB)
+ MOVD $1295, R12
+ B callbackasm1(SB)
+ MOVD $1296, R12
+ B callbackasm1(SB)
+ MOVD $1297, R12
+ B callbackasm1(SB)
+ MOVD $1298, R12
+ B callbackasm1(SB)
+ MOVD $1299, R12
+ B callbackasm1(SB)
+ MOVD $1300, R12
+ B callbackasm1(SB)
+ MOVD $1301, R12
+ B callbackasm1(SB)
+ MOVD $1302, R12
+ B callbackasm1(SB)
+ MOVD $1303, R12
+ B callbackasm1(SB)
+ MOVD $1304, R12
+ B callbackasm1(SB)
+ MOVD $1305, R12
+ B callbackasm1(SB)
+ MOVD $1306, R12
+ B callbackasm1(SB)
+ MOVD $1307, R12
+ B callbackasm1(SB)
+ MOVD $1308, R12
+ B callbackasm1(SB)
+ MOVD $1309, R12
+ B callbackasm1(SB)
+ MOVD $1310, R12
+ B callbackasm1(SB)
+ MOVD $1311, R12
+ B callbackasm1(SB)
+ MOVD $1312, R12
+ B callbackasm1(SB)
+ MOVD $1313, R12
+ B callbackasm1(SB)
+ MOVD $1314, R12
+ B callbackasm1(SB)
+ MOVD $1315, R12
+ B callbackasm1(SB)
+ MOVD $1316, R12
+ B callbackasm1(SB)
+ MOVD $1317, R12
+ B callbackasm1(SB)
+ MOVD $1318, R12
+ B callbackasm1(SB)
+ MOVD $1319, R12
+ B callbackasm1(SB)
+ MOVD $1320, R12
+ B callbackasm1(SB)
+ MOVD $1321, R12
+ B callbackasm1(SB)
+ MOVD $1322, R12
+ B callbackasm1(SB)
+ MOVD $1323, R12
+ B callbackasm1(SB)
+ MOVD $1324, R12
+ B callbackasm1(SB)
+ MOVD $1325, R12
+ B callbackasm1(SB)
+ MOVD $1326, R12
+ B callbackasm1(SB)
+ MOVD $1327, R12
+ B callbackasm1(SB)
+ MOVD $1328, R12
+ B callbackasm1(SB)
+ MOVD $1329, R12
+ B callbackasm1(SB)
+ MOVD $1330, R12
+ B callbackasm1(SB)
+ MOVD $1331, R12
+ B callbackasm1(SB)
+ MOVD $1332, R12
+ B callbackasm1(SB)
+ MOVD $1333, R12
+ B callbackasm1(SB)
+ MOVD $1334, R12
+ B callbackasm1(SB)
+ MOVD $1335, R12
+ B callbackasm1(SB)
+ MOVD $1336, R12
+ B callbackasm1(SB)
+ MOVD $1337, R12
+ B callbackasm1(SB)
+ MOVD $1338, R12
+ B callbackasm1(SB)
+ MOVD $1339, R12
+ B callbackasm1(SB)
+ MOVD $1340, R12
+ B callbackasm1(SB)
+ MOVD $1341, R12
+ B callbackasm1(SB)
+ MOVD $1342, R12
+ B callbackasm1(SB)
+ MOVD $1343, R12
+ B callbackasm1(SB)
+ MOVD $1344, R12
+ B callbackasm1(SB)
+ MOVD $1345, R12
+ B callbackasm1(SB)
+ MOVD $1346, R12
+ B callbackasm1(SB)
+ MOVD $1347, R12
+ B callbackasm1(SB)
+ MOVD $1348, R12
+ B callbackasm1(SB)
+ MOVD $1349, R12
+ B callbackasm1(SB)
+ MOVD $1350, R12
+ B callbackasm1(SB)
+ MOVD $1351, R12
+ B callbackasm1(SB)
+ MOVD $1352, R12
+ B callbackasm1(SB)
+ MOVD $1353, R12
+ B callbackasm1(SB)
+ MOVD $1354, R12
+ B callbackasm1(SB)
+ MOVD $1355, R12
+ B callbackasm1(SB)
+ MOVD $1356, R12
+ B callbackasm1(SB)
+ MOVD $1357, R12
+ B callbackasm1(SB)
+ MOVD $1358, R12
+ B callbackasm1(SB)
+ MOVD $1359, R12
+ B callbackasm1(SB)
+ MOVD $1360, R12
+ B callbackasm1(SB)
+ MOVD $1361, R12
+ B callbackasm1(SB)
+ MOVD $1362, R12
+ B callbackasm1(SB)
+ MOVD $1363, R12
+ B callbackasm1(SB)
+ MOVD $1364, R12
+ B callbackasm1(SB)
+ MOVD $1365, R12
+ B callbackasm1(SB)
+ MOVD $1366, R12
+ B callbackasm1(SB)
+ MOVD $1367, R12
+ B callbackasm1(SB)
+ MOVD $1368, R12
+ B callbackasm1(SB)
+ MOVD $1369, R12
+ B callbackasm1(SB)
+ MOVD $1370, R12
+ B callbackasm1(SB)
+ MOVD $1371, R12
+ B callbackasm1(SB)
+ MOVD $1372, R12
+ B callbackasm1(SB)
+ MOVD $1373, R12
+ B callbackasm1(SB)
+ MOVD $1374, R12
+ B callbackasm1(SB)
+ MOVD $1375, R12
+ B callbackasm1(SB)
+ MOVD $1376, R12
+ B callbackasm1(SB)
+ MOVD $1377, R12
+ B callbackasm1(SB)
+ MOVD $1378, R12
+ B callbackasm1(SB)
+ MOVD $1379, R12
+ B callbackasm1(SB)
+ MOVD $1380, R12
+ B callbackasm1(SB)
+ MOVD $1381, R12
+ B callbackasm1(SB)
+ MOVD $1382, R12
+ B callbackasm1(SB)
+ MOVD $1383, R12
+ B callbackasm1(SB)
+ MOVD $1384, R12
+ B callbackasm1(SB)
+ MOVD $1385, R12
+ B callbackasm1(SB)
+ MOVD $1386, R12
+ B callbackasm1(SB)
+ MOVD $1387, R12
+ B callbackasm1(SB)
+ MOVD $1388, R12
+ B callbackasm1(SB)
+ MOVD $1389, R12
+ B callbackasm1(SB)
+ MOVD $1390, R12
+ B callbackasm1(SB)
+ MOVD $1391, R12
+ B callbackasm1(SB)
+ MOVD $1392, R12
+ B callbackasm1(SB)
+ MOVD $1393, R12
+ B callbackasm1(SB)
+ MOVD $1394, R12
+ B callbackasm1(SB)
+ MOVD $1395, R12
+ B callbackasm1(SB)
+ MOVD $1396, R12
+ B callbackasm1(SB)
+ MOVD $1397, R12
+ B callbackasm1(SB)
+ MOVD $1398, R12
+ B callbackasm1(SB)
+ MOVD $1399, R12
+ B callbackasm1(SB)
+ MOVD $1400, R12
+ B callbackasm1(SB)
+ MOVD $1401, R12
+ B callbackasm1(SB)
+ MOVD $1402, R12
+ B callbackasm1(SB)
+ MOVD $1403, R12
+ B callbackasm1(SB)
+ MOVD $1404, R12
+ B callbackasm1(SB)
+ MOVD $1405, R12
+ B callbackasm1(SB)
+ MOVD $1406, R12
+ B callbackasm1(SB)
+ MOVD $1407, R12
+ B callbackasm1(SB)
+ MOVD $1408, R12
+ B callbackasm1(SB)
+ MOVD $1409, R12
+ B callbackasm1(SB)
+ MOVD $1410, R12
+ B callbackasm1(SB)
+ MOVD $1411, R12
+ B callbackasm1(SB)
+ MOVD $1412, R12
+ B callbackasm1(SB)
+ MOVD $1413, R12
+ B callbackasm1(SB)
+ MOVD $1414, R12
+ B callbackasm1(SB)
+ MOVD $1415, R12
+ B callbackasm1(SB)
+ MOVD $1416, R12
+ B callbackasm1(SB)
+ MOVD $1417, R12
+ B callbackasm1(SB)
+ MOVD $1418, R12
+ B callbackasm1(SB)
+ MOVD $1419, R12
+ B callbackasm1(SB)
+ MOVD $1420, R12
+ B callbackasm1(SB)
+ MOVD $1421, R12
+ B callbackasm1(SB)
+ MOVD $1422, R12
+ B callbackasm1(SB)
+ MOVD $1423, R12
+ B callbackasm1(SB)
+ MOVD $1424, R12
+ B callbackasm1(SB)
+ MOVD $1425, R12
+ B callbackasm1(SB)
+ MOVD $1426, R12
+ B callbackasm1(SB)
+ MOVD $1427, R12
+ B callbackasm1(SB)
+ MOVD $1428, R12
+ B callbackasm1(SB)
+ MOVD $1429, R12
+ B callbackasm1(SB)
+ MOVD $1430, R12
+ B callbackasm1(SB)
+ MOVD $1431, R12
+ B callbackasm1(SB)
+ MOVD $1432, R12
+ B callbackasm1(SB)
+ MOVD $1433, R12
+ B callbackasm1(SB)
+ MOVD $1434, R12
+ B callbackasm1(SB)
+ MOVD $1435, R12
+ B callbackasm1(SB)
+ MOVD $1436, R12
+ B callbackasm1(SB)
+ MOVD $1437, R12
+ B callbackasm1(SB)
+ MOVD $1438, R12
+ B callbackasm1(SB)
+ MOVD $1439, R12
+ B callbackasm1(SB)
+ MOVD $1440, R12
+ B callbackasm1(SB)
+ MOVD $1441, R12
+ B callbackasm1(SB)
+ MOVD $1442, R12
+ B callbackasm1(SB)
+ MOVD $1443, R12
+ B callbackasm1(SB)
+ MOVD $1444, R12
+ B callbackasm1(SB)
+ MOVD $1445, R12
+ B callbackasm1(SB)
+ MOVD $1446, R12
+ B callbackasm1(SB)
+ MOVD $1447, R12
+ B callbackasm1(SB)
+ MOVD $1448, R12
+ B callbackasm1(SB)
+ MOVD $1449, R12
+ B callbackasm1(SB)
+ MOVD $1450, R12
+ B callbackasm1(SB)
+ MOVD $1451, R12
+ B callbackasm1(SB)
+ MOVD $1452, R12
+ B callbackasm1(SB)
+ MOVD $1453, R12
+ B callbackasm1(SB)
+ MOVD $1454, R12
+ B callbackasm1(SB)
+ MOVD $1455, R12
+ B callbackasm1(SB)
+ MOVD $1456, R12
+ B callbackasm1(SB)
+ MOVD $1457, R12
+ B callbackasm1(SB)
+ MOVD $1458, R12
+ B callbackasm1(SB)
+ MOVD $1459, R12
+ B callbackasm1(SB)
+ MOVD $1460, R12
+ B callbackasm1(SB)
+ MOVD $1461, R12
+ B callbackasm1(SB)
+ MOVD $1462, R12
+ B callbackasm1(SB)
+ MOVD $1463, R12
+ B callbackasm1(SB)
+ MOVD $1464, R12
+ B callbackasm1(SB)
+ MOVD $1465, R12
+ B callbackasm1(SB)
+ MOVD $1466, R12
+ B callbackasm1(SB)
+ MOVD $1467, R12
+ B callbackasm1(SB)
+ MOVD $1468, R12
+ B callbackasm1(SB)
+ MOVD $1469, R12
+ B callbackasm1(SB)
+ MOVD $1470, R12
+ B callbackasm1(SB)
+ MOVD $1471, R12
+ B callbackasm1(SB)
+ MOVD $1472, R12
+ B callbackasm1(SB)
+ MOVD $1473, R12
+ B callbackasm1(SB)
+ MOVD $1474, R12
+ B callbackasm1(SB)
+ MOVD $1475, R12
+ B callbackasm1(SB)
+ MOVD $1476, R12
+ B callbackasm1(SB)
+ MOVD $1477, R12
+ B callbackasm1(SB)
+ MOVD $1478, R12
+ B callbackasm1(SB)
+ MOVD $1479, R12
+ B callbackasm1(SB)
+ MOVD $1480, R12
+ B callbackasm1(SB)
+ MOVD $1481, R12
+ B callbackasm1(SB)
+ MOVD $1482, R12
+ B callbackasm1(SB)
+ MOVD $1483, R12
+ B callbackasm1(SB)
+ MOVD $1484, R12
+ B callbackasm1(SB)
+ MOVD $1485, R12
+ B callbackasm1(SB)
+ MOVD $1486, R12
+ B callbackasm1(SB)
+ MOVD $1487, R12
+ B callbackasm1(SB)
+ MOVD $1488, R12
+ B callbackasm1(SB)
+ MOVD $1489, R12
+ B callbackasm1(SB)
+ MOVD $1490, R12
+ B callbackasm1(SB)
+ MOVD $1491, R12
+ B callbackasm1(SB)
+ MOVD $1492, R12
+ B callbackasm1(SB)
+ MOVD $1493, R12
+ B callbackasm1(SB)
+ MOVD $1494, R12
+ B callbackasm1(SB)
+ MOVD $1495, R12
+ B callbackasm1(SB)
+ MOVD $1496, R12
+ B callbackasm1(SB)
+ MOVD $1497, R12
+ B callbackasm1(SB)
+ MOVD $1498, R12
+ B callbackasm1(SB)
+ MOVD $1499, R12
+ B callbackasm1(SB)
+ MOVD $1500, R12
+ B callbackasm1(SB)
+ MOVD $1501, R12
+ B callbackasm1(SB)
+ MOVD $1502, R12
+ B callbackasm1(SB)
+ MOVD $1503, R12
+ B callbackasm1(SB)
+ MOVD $1504, R12
+ B callbackasm1(SB)
+ MOVD $1505, R12
+ B callbackasm1(SB)
+ MOVD $1506, R12
+ B callbackasm1(SB)
+ MOVD $1507, R12
+ B callbackasm1(SB)
+ MOVD $1508, R12
+ B callbackasm1(SB)
+ MOVD $1509, R12
+ B callbackasm1(SB)
+ MOVD $1510, R12
+ B callbackasm1(SB)
+ MOVD $1511, R12
+ B callbackasm1(SB)
+ MOVD $1512, R12
+ B callbackasm1(SB)
+ MOVD $1513, R12
+ B callbackasm1(SB)
+ MOVD $1514, R12
+ B callbackasm1(SB)
+ MOVD $1515, R12
+ B callbackasm1(SB)
+ MOVD $1516, R12
+ B callbackasm1(SB)
+ MOVD $1517, R12
+ B callbackasm1(SB)
+ MOVD $1518, R12
+ B callbackasm1(SB)
+ MOVD $1519, R12
+ B callbackasm1(SB)
+ MOVD $1520, R12
+ B callbackasm1(SB)
+ MOVD $1521, R12
+ B callbackasm1(SB)
+ MOVD $1522, R12
+ B callbackasm1(SB)
+ MOVD $1523, R12
+ B callbackasm1(SB)
+ MOVD $1524, R12
+ B callbackasm1(SB)
+ MOVD $1525, R12
+ B callbackasm1(SB)
+ MOVD $1526, R12
+ B callbackasm1(SB)
+ MOVD $1527, R12
+ B callbackasm1(SB)
+ MOVD $1528, R12
+ B callbackasm1(SB)
+ MOVD $1529, R12
+ B callbackasm1(SB)
+ MOVD $1530, R12
+ B callbackasm1(SB)
+ MOVD $1531, R12
+ B callbackasm1(SB)
+ MOVD $1532, R12
+ B callbackasm1(SB)
+ MOVD $1533, R12
+ B callbackasm1(SB)
+ MOVD $1534, R12
+ B callbackasm1(SB)
+ MOVD $1535, R12
+ B callbackasm1(SB)
+ MOVD $1536, R12
+ B callbackasm1(SB)
+ MOVD $1537, R12
+ B callbackasm1(SB)
+ MOVD $1538, R12
+ B callbackasm1(SB)
+ MOVD $1539, R12
+ B callbackasm1(SB)
+ MOVD $1540, R12
+ B callbackasm1(SB)
+ MOVD $1541, R12
+ B callbackasm1(SB)
+ MOVD $1542, R12
+ B callbackasm1(SB)
+ MOVD $1543, R12
+ B callbackasm1(SB)
+ MOVD $1544, R12
+ B callbackasm1(SB)
+ MOVD $1545, R12
+ B callbackasm1(SB)
+ MOVD $1546, R12
+ B callbackasm1(SB)
+ MOVD $1547, R12
+ B callbackasm1(SB)
+ MOVD $1548, R12
+ B callbackasm1(SB)
+ MOVD $1549, R12
+ B callbackasm1(SB)
+ MOVD $1550, R12
+ B callbackasm1(SB)
+ MOVD $1551, R12
+ B callbackasm1(SB)
+ MOVD $1552, R12
+ B callbackasm1(SB)
+ MOVD $1553, R12
+ B callbackasm1(SB)
+ MOVD $1554, R12
+ B callbackasm1(SB)
+ MOVD $1555, R12
+ B callbackasm1(SB)
+ MOVD $1556, R12
+ B callbackasm1(SB)
+ MOVD $1557, R12
+ B callbackasm1(SB)
+ MOVD $1558, R12
+ B callbackasm1(SB)
+ MOVD $1559, R12
+ B callbackasm1(SB)
+ MOVD $1560, R12
+ B callbackasm1(SB)
+ MOVD $1561, R12
+ B callbackasm1(SB)
+ MOVD $1562, R12
+ B callbackasm1(SB)
+ MOVD $1563, R12
+ B callbackasm1(SB)
+ MOVD $1564, R12
+ B callbackasm1(SB)
+ MOVD $1565, R12
+ B callbackasm1(SB)
+ MOVD $1566, R12
+ B callbackasm1(SB)
+ MOVD $1567, R12
+ B callbackasm1(SB)
+ MOVD $1568, R12
+ B callbackasm1(SB)
+ MOVD $1569, R12
+ B callbackasm1(SB)
+ MOVD $1570, R12
+ B callbackasm1(SB)
+ MOVD $1571, R12
+ B callbackasm1(SB)
+ MOVD $1572, R12
+ B callbackasm1(SB)
+ MOVD $1573, R12
+ B callbackasm1(SB)
+ MOVD $1574, R12
+ B callbackasm1(SB)
+ MOVD $1575, R12
+ B callbackasm1(SB)
+ MOVD $1576, R12
+ B callbackasm1(SB)
+ MOVD $1577, R12
+ B callbackasm1(SB)
+ MOVD $1578, R12
+ B callbackasm1(SB)
+ MOVD $1579, R12
+ B callbackasm1(SB)
+ MOVD $1580, R12
+ B callbackasm1(SB)
+ MOVD $1581, R12
+ B callbackasm1(SB)
+ MOVD $1582, R12
+ B callbackasm1(SB)
+ MOVD $1583, R12
+ B callbackasm1(SB)
+ MOVD $1584, R12
+ B callbackasm1(SB)
+ MOVD $1585, R12
+ B callbackasm1(SB)
+ MOVD $1586, R12
+ B callbackasm1(SB)
+ MOVD $1587, R12
+ B callbackasm1(SB)
+ MOVD $1588, R12
+ B callbackasm1(SB)
+ MOVD $1589, R12
+ B callbackasm1(SB)
+ MOVD $1590, R12
+ B callbackasm1(SB)
+ MOVD $1591, R12
+ B callbackasm1(SB)
+ MOVD $1592, R12
+ B callbackasm1(SB)
+ MOVD $1593, R12
+ B callbackasm1(SB)
+ MOVD $1594, R12
+ B callbackasm1(SB)
+ MOVD $1595, R12
+ B callbackasm1(SB)
+ MOVD $1596, R12
+ B callbackasm1(SB)
+ MOVD $1597, R12
+ B callbackasm1(SB)
+ MOVD $1598, R12
+ B callbackasm1(SB)
+ MOVD $1599, R12
+ B callbackasm1(SB)
+ MOVD $1600, R12
+ B callbackasm1(SB)
+ MOVD $1601, R12
+ B callbackasm1(SB)
+ MOVD $1602, R12
+ B callbackasm1(SB)
+ MOVD $1603, R12
+ B callbackasm1(SB)
+ MOVD $1604, R12
+ B callbackasm1(SB)
+ MOVD $1605, R12
+ B callbackasm1(SB)
+ MOVD $1606, R12
+ B callbackasm1(SB)
+ MOVD $1607, R12
+ B callbackasm1(SB)
+ MOVD $1608, R12
+ B callbackasm1(SB)
+ MOVD $1609, R12
+ B callbackasm1(SB)
+ MOVD $1610, R12
+ B callbackasm1(SB)
+ MOVD $1611, R12
+ B callbackasm1(SB)
+ MOVD $1612, R12
+ B callbackasm1(SB)
+ MOVD $1613, R12
+ B callbackasm1(SB)
+ MOVD $1614, R12
+ B callbackasm1(SB)
+ MOVD $1615, R12
+ B callbackasm1(SB)
+ MOVD $1616, R12
+ B callbackasm1(SB)
+ MOVD $1617, R12
+ B callbackasm1(SB)
+ MOVD $1618, R12
+ B callbackasm1(SB)
+ MOVD $1619, R12
+ B callbackasm1(SB)
+ MOVD $1620, R12
+ B callbackasm1(SB)
+ MOVD $1621, R12
+ B callbackasm1(SB)
+ MOVD $1622, R12
+ B callbackasm1(SB)
+ MOVD $1623, R12
+ B callbackasm1(SB)
+ MOVD $1624, R12
+ B callbackasm1(SB)
+ MOVD $1625, R12
+ B callbackasm1(SB)
+ MOVD $1626, R12
+ B callbackasm1(SB)
+ MOVD $1627, R12
+ B callbackasm1(SB)
+ MOVD $1628, R12
+ B callbackasm1(SB)
+ MOVD $1629, R12
+ B callbackasm1(SB)
+ MOVD $1630, R12
+ B callbackasm1(SB)
+ MOVD $1631, R12
+ B callbackasm1(SB)
+ MOVD $1632, R12
+ B callbackasm1(SB)
+ MOVD $1633, R12
+ B callbackasm1(SB)
+ MOVD $1634, R12
+ B callbackasm1(SB)
+ MOVD $1635, R12
+ B callbackasm1(SB)
+ MOVD $1636, R12
+ B callbackasm1(SB)
+ MOVD $1637, R12
+ B callbackasm1(SB)
+ MOVD $1638, R12
+ B callbackasm1(SB)
+ MOVD $1639, R12
+ B callbackasm1(SB)
+ MOVD $1640, R12
+ B callbackasm1(SB)
+ MOVD $1641, R12
+ B callbackasm1(SB)
+ MOVD $1642, R12
+ B callbackasm1(SB)
+ MOVD $1643, R12
+ B callbackasm1(SB)
+ MOVD $1644, R12
+ B callbackasm1(SB)
+ MOVD $1645, R12
+ B callbackasm1(SB)
+ MOVD $1646, R12
+ B callbackasm1(SB)
+ MOVD $1647, R12
+ B callbackasm1(SB)
+ MOVD $1648, R12
+ B callbackasm1(SB)
+ MOVD $1649, R12
+ B callbackasm1(SB)
+ MOVD $1650, R12
+ B callbackasm1(SB)
+ MOVD $1651, R12
+ B callbackasm1(SB)
+ MOVD $1652, R12
+ B callbackasm1(SB)
+ MOVD $1653, R12
+ B callbackasm1(SB)
+ MOVD $1654, R12
+ B callbackasm1(SB)
+ MOVD $1655, R12
+ B callbackasm1(SB)
+ MOVD $1656, R12
+ B callbackasm1(SB)
+ MOVD $1657, R12
+ B callbackasm1(SB)
+ MOVD $1658, R12
+ B callbackasm1(SB)
+ MOVD $1659, R12
+ B callbackasm1(SB)
+ MOVD $1660, R12
+ B callbackasm1(SB)
+ MOVD $1661, R12
+ B callbackasm1(SB)
+ MOVD $1662, R12
+ B callbackasm1(SB)
+ MOVD $1663, R12
+ B callbackasm1(SB)
+ MOVD $1664, R12
+ B callbackasm1(SB)
+ MOVD $1665, R12
+ B callbackasm1(SB)
+ MOVD $1666, R12
+ B callbackasm1(SB)
+ MOVD $1667, R12
+ B callbackasm1(SB)
+ MOVD $1668, R12
+ B callbackasm1(SB)
+ MOVD $1669, R12
+ B callbackasm1(SB)
+ MOVD $1670, R12
+ B callbackasm1(SB)
+ MOVD $1671, R12
+ B callbackasm1(SB)
+ MOVD $1672, R12
+ B callbackasm1(SB)
+ MOVD $1673, R12
+ B callbackasm1(SB)
+ MOVD $1674, R12
+ B callbackasm1(SB)
+ MOVD $1675, R12
+ B callbackasm1(SB)
+ MOVD $1676, R12
+ B callbackasm1(SB)
+ MOVD $1677, R12
+ B callbackasm1(SB)
+ MOVD $1678, R12
+ B callbackasm1(SB)
+ MOVD $1679, R12
+ B callbackasm1(SB)
+ MOVD $1680, R12
+ B callbackasm1(SB)
+ MOVD $1681, R12
+ B callbackasm1(SB)
+ MOVD $1682, R12
+ B callbackasm1(SB)
+ MOVD $1683, R12
+ B callbackasm1(SB)
+ MOVD $1684, R12
+ B callbackasm1(SB)
+ MOVD $1685, R12
+ B callbackasm1(SB)
+ MOVD $1686, R12
+ B callbackasm1(SB)
+ MOVD $1687, R12
+ B callbackasm1(SB)
+ MOVD $1688, R12
+ B callbackasm1(SB)
+ MOVD $1689, R12
+ B callbackasm1(SB)
+ MOVD $1690, R12
+ B callbackasm1(SB)
+ MOVD $1691, R12
+ B callbackasm1(SB)
+ MOVD $1692, R12
+ B callbackasm1(SB)
+ MOVD $1693, R12
+ B callbackasm1(SB)
+ MOVD $1694, R12
+ B callbackasm1(SB)
+ MOVD $1695, R12
+ B callbackasm1(SB)
+ MOVD $1696, R12
+ B callbackasm1(SB)
+ MOVD $1697, R12
+ B callbackasm1(SB)
+ MOVD $1698, R12
+ B callbackasm1(SB)
+ MOVD $1699, R12
+ B callbackasm1(SB)
+ MOVD $1700, R12
+ B callbackasm1(SB)
+ MOVD $1701, R12
+ B callbackasm1(SB)
+ MOVD $1702, R12
+ B callbackasm1(SB)
+ MOVD $1703, R12
+ B callbackasm1(SB)
+ MOVD $1704, R12
+ B callbackasm1(SB)
+ MOVD $1705, R12
+ B callbackasm1(SB)
+ MOVD $1706, R12
+ B callbackasm1(SB)
+ MOVD $1707, R12
+ B callbackasm1(SB)
+ MOVD $1708, R12
+ B callbackasm1(SB)
+ MOVD $1709, R12
+ B callbackasm1(SB)
+ MOVD $1710, R12
+ B callbackasm1(SB)
+ MOVD $1711, R12
+ B callbackasm1(SB)
+ MOVD $1712, R12
+ B callbackasm1(SB)
+ MOVD $1713, R12
+ B callbackasm1(SB)
+ MOVD $1714, R12
+ B callbackasm1(SB)
+ MOVD $1715, R12
+ B callbackasm1(SB)
+ MOVD $1716, R12
+ B callbackasm1(SB)
+ MOVD $1717, R12
+ B callbackasm1(SB)
+ MOVD $1718, R12
+ B callbackasm1(SB)
+ MOVD $1719, R12
+ B callbackasm1(SB)
+ MOVD $1720, R12
+ B callbackasm1(SB)
+ MOVD $1721, R12
+ B callbackasm1(SB)
+ MOVD $1722, R12
+ B callbackasm1(SB)
+ MOVD $1723, R12
+ B callbackasm1(SB)
+ MOVD $1724, R12
+ B callbackasm1(SB)
+ MOVD $1725, R12
+ B callbackasm1(SB)
+ MOVD $1726, R12
+ B callbackasm1(SB)
+ MOVD $1727, R12
+ B callbackasm1(SB)
+ MOVD $1728, R12
+ B callbackasm1(SB)
+ MOVD $1729, R12
+ B callbackasm1(SB)
+ MOVD $1730, R12
+ B callbackasm1(SB)
+ MOVD $1731, R12
+ B callbackasm1(SB)
+ MOVD $1732, R12
+ B callbackasm1(SB)
+ MOVD $1733, R12
+ B callbackasm1(SB)
+ MOVD $1734, R12
+ B callbackasm1(SB)
+ MOVD $1735, R12
+ B callbackasm1(SB)
+ MOVD $1736, R12
+ B callbackasm1(SB)
+ MOVD $1737, R12
+ B callbackasm1(SB)
+ MOVD $1738, R12
+ B callbackasm1(SB)
+ MOVD $1739, R12
+ B callbackasm1(SB)
+ MOVD $1740, R12
+ B callbackasm1(SB)
+ MOVD $1741, R12
+ B callbackasm1(SB)
+ MOVD $1742, R12
+ B callbackasm1(SB)
+ MOVD $1743, R12
+ B callbackasm1(SB)
+ MOVD $1744, R12
+ B callbackasm1(SB)
+ MOVD $1745, R12
+ B callbackasm1(SB)
+ MOVD $1746, R12
+ B callbackasm1(SB)
+ MOVD $1747, R12
+ B callbackasm1(SB)
+ MOVD $1748, R12
+ B callbackasm1(SB)
+ MOVD $1749, R12
+ B callbackasm1(SB)
+ MOVD $1750, R12
+ B callbackasm1(SB)
+ MOVD $1751, R12
+ B callbackasm1(SB)
+ MOVD $1752, R12
+ B callbackasm1(SB)
+ MOVD $1753, R12
+ B callbackasm1(SB)
+ MOVD $1754, R12
+ B callbackasm1(SB)
+ MOVD $1755, R12
+ B callbackasm1(SB)
+ MOVD $1756, R12
+ B callbackasm1(SB)
+ MOVD $1757, R12
+ B callbackasm1(SB)
+ MOVD $1758, R12
+ B callbackasm1(SB)
+ MOVD $1759, R12
+ B callbackasm1(SB)
+ MOVD $1760, R12
+ B callbackasm1(SB)
+ MOVD $1761, R12
+ B callbackasm1(SB)
+ MOVD $1762, R12
+ B callbackasm1(SB)
+ MOVD $1763, R12
+ B callbackasm1(SB)
+ MOVD $1764, R12
+ B callbackasm1(SB)
+ MOVD $1765, R12
+ B callbackasm1(SB)
+ MOVD $1766, R12
+ B callbackasm1(SB)
+ MOVD $1767, R12
+ B callbackasm1(SB)
+ MOVD $1768, R12
+ B callbackasm1(SB)
+ MOVD $1769, R12
+ B callbackasm1(SB)
+ MOVD $1770, R12
+ B callbackasm1(SB)
+ MOVD $1771, R12
+ B callbackasm1(SB)
+ MOVD $1772, R12
+ B callbackasm1(SB)
+ MOVD $1773, R12
+ B callbackasm1(SB)
+ MOVD $1774, R12
+ B callbackasm1(SB)
+ MOVD $1775, R12
+ B callbackasm1(SB)
+ MOVD $1776, R12
+ B callbackasm1(SB)
+ MOVD $1777, R12
+ B callbackasm1(SB)
+ MOVD $1778, R12
+ B callbackasm1(SB)
+ MOVD $1779, R12
+ B callbackasm1(SB)
+ MOVD $1780, R12
+ B callbackasm1(SB)
+ MOVD $1781, R12
+ B callbackasm1(SB)
+ MOVD $1782, R12
+ B callbackasm1(SB)
+ MOVD $1783, R12
+ B callbackasm1(SB)
+ MOVD $1784, R12
+ B callbackasm1(SB)
+ MOVD $1785, R12
+ B callbackasm1(SB)
+ MOVD $1786, R12
+ B callbackasm1(SB)
+ MOVD $1787, R12
+ B callbackasm1(SB)
+ MOVD $1788, R12
+ B callbackasm1(SB)
+ MOVD $1789, R12
+ B callbackasm1(SB)
+ MOVD $1790, R12
+ B callbackasm1(SB)
+ MOVD $1791, R12
+ B callbackasm1(SB)
+ MOVD $1792, R12
+ B callbackasm1(SB)
+ MOVD $1793, R12
+ B callbackasm1(SB)
+ MOVD $1794, R12
+ B callbackasm1(SB)
+ MOVD $1795, R12
+ B callbackasm1(SB)
+ MOVD $1796, R12
+ B callbackasm1(SB)
+ MOVD $1797, R12
+ B callbackasm1(SB)
+ MOVD $1798, R12
+ B callbackasm1(SB)
+ MOVD $1799, R12
+ B callbackasm1(SB)
+ MOVD $1800, R12
+ B callbackasm1(SB)
+ MOVD $1801, R12
+ B callbackasm1(SB)
+ MOVD $1802, R12
+ B callbackasm1(SB)
+ MOVD $1803, R12
+ B callbackasm1(SB)
+ MOVD $1804, R12
+ B callbackasm1(SB)
+ MOVD $1805, R12
+ B callbackasm1(SB)
+ MOVD $1806, R12
+ B callbackasm1(SB)
+ MOVD $1807, R12
+ B callbackasm1(SB)
+ MOVD $1808, R12
+ B callbackasm1(SB)
+ MOVD $1809, R12
+ B callbackasm1(SB)
+ MOVD $1810, R12
+ B callbackasm1(SB)
+ MOVD $1811, R12
+ B callbackasm1(SB)
+ MOVD $1812, R12
+ B callbackasm1(SB)
+ MOVD $1813, R12
+ B callbackasm1(SB)
+ MOVD $1814, R12
+ B callbackasm1(SB)
+ MOVD $1815, R12
+ B callbackasm1(SB)
+ MOVD $1816, R12
+ B callbackasm1(SB)
+ MOVD $1817, R12
+ B callbackasm1(SB)
+ MOVD $1818, R12
+ B callbackasm1(SB)
+ MOVD $1819, R12
+ B callbackasm1(SB)
+ MOVD $1820, R12
+ B callbackasm1(SB)
+ MOVD $1821, R12
+ B callbackasm1(SB)
+ MOVD $1822, R12
+ B callbackasm1(SB)
+ MOVD $1823, R12
+ B callbackasm1(SB)
+ MOVD $1824, R12
+ B callbackasm1(SB)
+ MOVD $1825, R12
+ B callbackasm1(SB)
+ MOVD $1826, R12
+ B callbackasm1(SB)
+ MOVD $1827, R12
+ B callbackasm1(SB)
+ MOVD $1828, R12
+ B callbackasm1(SB)
+ MOVD $1829, R12
+ B callbackasm1(SB)
+ MOVD $1830, R12
+ B callbackasm1(SB)
+ MOVD $1831, R12
+ B callbackasm1(SB)
+ MOVD $1832, R12
+ B callbackasm1(SB)
+ MOVD $1833, R12
+ B callbackasm1(SB)
+ MOVD $1834, R12
+ B callbackasm1(SB)
+ MOVD $1835, R12
+ B callbackasm1(SB)
+ MOVD $1836, R12
+ B callbackasm1(SB)
+ MOVD $1837, R12
+ B callbackasm1(SB)
+ MOVD $1838, R12
+ B callbackasm1(SB)
+ MOVD $1839, R12
+ B callbackasm1(SB)
+ MOVD $1840, R12
+ B callbackasm1(SB)
+ MOVD $1841, R12
+ B callbackasm1(SB)
+ MOVD $1842, R12
+ B callbackasm1(SB)
+ MOVD $1843, R12
+ B callbackasm1(SB)
+ MOVD $1844, R12
+ B callbackasm1(SB)
+ MOVD $1845, R12
+ B callbackasm1(SB)
+ MOVD $1846, R12
+ B callbackasm1(SB)
+ MOVD $1847, R12
+ B callbackasm1(SB)
+ MOVD $1848, R12
+ B callbackasm1(SB)
+ MOVD $1849, R12
+ B callbackasm1(SB)
+ MOVD $1850, R12
+ B callbackasm1(SB)
+ MOVD $1851, R12
+ B callbackasm1(SB)
+ MOVD $1852, R12
+ B callbackasm1(SB)
+ MOVD $1853, R12
+ B callbackasm1(SB)
+ MOVD $1854, R12
+ B callbackasm1(SB)
+ MOVD $1855, R12
+ B callbackasm1(SB)
+ MOVD $1856, R12
+ B callbackasm1(SB)
+ MOVD $1857, R12
+ B callbackasm1(SB)
+ MOVD $1858, R12
+ B callbackasm1(SB)
+ MOVD $1859, R12
+ B callbackasm1(SB)
+ MOVD $1860, R12
+ B callbackasm1(SB)
+ MOVD $1861, R12
+ B callbackasm1(SB)
+ MOVD $1862, R12
+ B callbackasm1(SB)
+ MOVD $1863, R12
+ B callbackasm1(SB)
+ MOVD $1864, R12
+ B callbackasm1(SB)
+ MOVD $1865, R12
+ B callbackasm1(SB)
+ MOVD $1866, R12
+ B callbackasm1(SB)
+ MOVD $1867, R12
+ B callbackasm1(SB)
+ MOVD $1868, R12
+ B callbackasm1(SB)
+ MOVD $1869, R12
+ B callbackasm1(SB)
+ MOVD $1870, R12
+ B callbackasm1(SB)
+ MOVD $1871, R12
+ B callbackasm1(SB)
+ MOVD $1872, R12
+ B callbackasm1(SB)
+ MOVD $1873, R12
+ B callbackasm1(SB)
+ MOVD $1874, R12
+ B callbackasm1(SB)
+ MOVD $1875, R12
+ B callbackasm1(SB)
+ MOVD $1876, R12
+ B callbackasm1(SB)
+ MOVD $1877, R12
+ B callbackasm1(SB)
+ MOVD $1878, R12
+ B callbackasm1(SB)
+ MOVD $1879, R12
+ B callbackasm1(SB)
+ MOVD $1880, R12
+ B callbackasm1(SB)
+ MOVD $1881, R12
+ B callbackasm1(SB)
+ MOVD $1882, R12
+ B callbackasm1(SB)
+ MOVD $1883, R12
+ B callbackasm1(SB)
+ MOVD $1884, R12
+ B callbackasm1(SB)
+ MOVD $1885, R12
+ B callbackasm1(SB)
+ MOVD $1886, R12
+ B callbackasm1(SB)
+ MOVD $1887, R12
+ B callbackasm1(SB)
+ MOVD $1888, R12
+ B callbackasm1(SB)
+ MOVD $1889, R12
+ B callbackasm1(SB)
+ MOVD $1890, R12
+ B callbackasm1(SB)
+ MOVD $1891, R12
+ B callbackasm1(SB)
+ MOVD $1892, R12
+ B callbackasm1(SB)
+ MOVD $1893, R12
+ B callbackasm1(SB)
+ MOVD $1894, R12
+ B callbackasm1(SB)
+ MOVD $1895, R12
+ B callbackasm1(SB)
+ MOVD $1896, R12
+ B callbackasm1(SB)
+ MOVD $1897, R12
+ B callbackasm1(SB)
+ MOVD $1898, R12
+ B callbackasm1(SB)
+ MOVD $1899, R12
+ B callbackasm1(SB)
+ MOVD $1900, R12
+ B callbackasm1(SB)
+ MOVD $1901, R12
+ B callbackasm1(SB)
+ MOVD $1902, R12
+ B callbackasm1(SB)
+ MOVD $1903, R12
+ B callbackasm1(SB)
+ MOVD $1904, R12
+ B callbackasm1(SB)
+ MOVD $1905, R12
+ B callbackasm1(SB)
+ MOVD $1906, R12
+ B callbackasm1(SB)
+ MOVD $1907, R12
+ B callbackasm1(SB)
+ MOVD $1908, R12
+ B callbackasm1(SB)
+ MOVD $1909, R12
+ B callbackasm1(SB)
+ MOVD $1910, R12
+ B callbackasm1(SB)
+ MOVD $1911, R12
+ B callbackasm1(SB)
+ MOVD $1912, R12
+ B callbackasm1(SB)
+ MOVD $1913, R12
+ B callbackasm1(SB)
+ MOVD $1914, R12
+ B callbackasm1(SB)
+ MOVD $1915, R12
+ B callbackasm1(SB)
+ MOVD $1916, R12
+ B callbackasm1(SB)
+ MOVD $1917, R12
+ B callbackasm1(SB)
+ MOVD $1918, R12
+ B callbackasm1(SB)
+ MOVD $1919, R12
+ B callbackasm1(SB)
+ MOVD $1920, R12
+ B callbackasm1(SB)
+ MOVD $1921, R12
+ B callbackasm1(SB)
+ MOVD $1922, R12
+ B callbackasm1(SB)
+ MOVD $1923, R12
+ B callbackasm1(SB)
+ MOVD $1924, R12
+ B callbackasm1(SB)
+ MOVD $1925, R12
+ B callbackasm1(SB)
+ MOVD $1926, R12
+ B callbackasm1(SB)
+ MOVD $1927, R12
+ B callbackasm1(SB)
+ MOVD $1928, R12
+ B callbackasm1(SB)
+ MOVD $1929, R12
+ B callbackasm1(SB)
+ MOVD $1930, R12
+ B callbackasm1(SB)
+ MOVD $1931, R12
+ B callbackasm1(SB)
+ MOVD $1932, R12
+ B callbackasm1(SB)
+ MOVD $1933, R12
+ B callbackasm1(SB)
+ MOVD $1934, R12
+ B callbackasm1(SB)
+ MOVD $1935, R12
+ B callbackasm1(SB)
+ MOVD $1936, R12
+ B callbackasm1(SB)
+ MOVD $1937, R12
+ B callbackasm1(SB)
+ MOVD $1938, R12
+ B callbackasm1(SB)
+ MOVD $1939, R12
+ B callbackasm1(SB)
+ MOVD $1940, R12
+ B callbackasm1(SB)
+ MOVD $1941, R12
+ B callbackasm1(SB)
+ MOVD $1942, R12
+ B callbackasm1(SB)
+ MOVD $1943, R12
+ B callbackasm1(SB)
+ MOVD $1944, R12
+ B callbackasm1(SB)
+ MOVD $1945, R12
+ B callbackasm1(SB)
+ MOVD $1946, R12
+ B callbackasm1(SB)
+ MOVD $1947, R12
+ B callbackasm1(SB)
+ MOVD $1948, R12
+ B callbackasm1(SB)
+ MOVD $1949, R12
+ B callbackasm1(SB)
+ MOVD $1950, R12
+ B callbackasm1(SB)
+ MOVD $1951, R12
+ B callbackasm1(SB)
+ MOVD $1952, R12
+ B callbackasm1(SB)
+ MOVD $1953, R12
+ B callbackasm1(SB)
+ MOVD $1954, R12
+ B callbackasm1(SB)
+ MOVD $1955, R12
+ B callbackasm1(SB)
+ MOVD $1956, R12
+ B callbackasm1(SB)
+ MOVD $1957, R12
+ B callbackasm1(SB)
+ MOVD $1958, R12
+ B callbackasm1(SB)
+ MOVD $1959, R12
+ B callbackasm1(SB)
+ MOVD $1960, R12
+ B callbackasm1(SB)
+ MOVD $1961, R12
+ B callbackasm1(SB)
+ MOVD $1962, R12
+ B callbackasm1(SB)
+ MOVD $1963, R12
+ B callbackasm1(SB)
+ MOVD $1964, R12
+ B callbackasm1(SB)
+ MOVD $1965, R12
+ B callbackasm1(SB)
+ MOVD $1966, R12
+ B callbackasm1(SB)
+ MOVD $1967, R12
+ B callbackasm1(SB)
+ MOVD $1968, R12
+ B callbackasm1(SB)
+ MOVD $1969, R12
+ B callbackasm1(SB)
+ MOVD $1970, R12
+ B callbackasm1(SB)
+ MOVD $1971, R12
+ B callbackasm1(SB)
+ MOVD $1972, R12
+ B callbackasm1(SB)
+ MOVD $1973, R12
+ B callbackasm1(SB)
+ MOVD $1974, R12
+ B callbackasm1(SB)
+ MOVD $1975, R12
+ B callbackasm1(SB)
+ MOVD $1976, R12
+ B callbackasm1(SB)
+ MOVD $1977, R12
+ B callbackasm1(SB)
+ MOVD $1978, R12
+ B callbackasm1(SB)
+ MOVD $1979, R12
+ B callbackasm1(SB)
+ MOVD $1980, R12
+ B callbackasm1(SB)
+ MOVD $1981, R12
+ B callbackasm1(SB)
+ MOVD $1982, R12
+ B callbackasm1(SB)
+ MOVD $1983, R12
+ B callbackasm1(SB)
+ MOVD $1984, R12
+ B callbackasm1(SB)
+ MOVD $1985, R12
+ B callbackasm1(SB)
+ MOVD $1986, R12
+ B callbackasm1(SB)
+ MOVD $1987, R12
+ B callbackasm1(SB)
+ MOVD $1988, R12
+ B callbackasm1(SB)
+ MOVD $1989, R12
+ B callbackasm1(SB)
+ MOVD $1990, R12
+ B callbackasm1(SB)
+ MOVD $1991, R12
+ B callbackasm1(SB)
+ MOVD $1992, R12
+ B callbackasm1(SB)
+ MOVD $1993, R12
+ B callbackasm1(SB)
+ MOVD $1994, R12
+ B callbackasm1(SB)
+ MOVD $1995, R12
+ B callbackasm1(SB)
+ MOVD $1996, R12
+ B callbackasm1(SB)
+ MOVD $1997, R12
+ B callbackasm1(SB)
+ MOVD $1998, R12
+ B callbackasm1(SB)
+ MOVD $1999, R12
+ B callbackasm1(SB)
diff --git a/vendor/github.com/fsouza/fake-gcs-server/LICENSE b/vendor/github.com/fsouza/fake-gcs-server/LICENSE
index 529faa468606e..a619aaecef9d1 100644
--- a/vendor/github.com/fsouza/fake-gcs-server/LICENSE
+++ b/vendor/github.com/fsouza/fake-gcs-server/LICENSE
@@ -1,4 +1,4 @@
-Copyright (c) 2017-2019, Francisco Souza
+Copyright (c) Francisco Souza
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/vendor/github.com/fsouza/fake-gcs-server/fakestorage/bucket.go b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/bucket.go
index e2fa2ad3716ee..4026f1a4a0deb 100644
--- a/vendor/github.com/fsouza/fake-gcs-server/fakestorage/bucket.go
+++ b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/bucket.go
@@ -6,49 +6,161 @@ package fakestorage
import (
"encoding/json"
+ "errors"
+ "fmt"
+ "io"
"net/http"
+ "regexp"
+ "github.com/fsouza/fake-gcs-server/internal/backend"
"github.com/gorilla/mux"
)
+var bucketRegexp = regexp.MustCompile(`^[a-zA-Z0-9][a-zA-Z0-9._-]*[a-zA-Z0-9]$`)
+
// CreateBucket creates a bucket inside the server, so any API calls that
// require the bucket name will recognize this bucket.
//
// If the bucket already exists, this method does nothing.
+//
+// Deprecated: use CreateBucketWithOpts.
func (s *Server) CreateBucket(name string) {
- s.mtx.Lock()
- defer s.mtx.Unlock()
- err := s.backend.CreateBucket(name)
+ err := s.backend.CreateBucket(name, backend.BucketAttrs{VersioningEnabled: false, DefaultEventBasedHold: false})
+ if err != nil {
+ panic(err)
+ }
+}
+
+func (s *Server) updateBucket(r *http.Request) jsonResponse {
+ bucketName := unescapeMuxVars(mux.Vars(r))["bucketName"]
+ attrsToUpdate := getBucketAttrsToUpdate(r.Body)
+ err := s.backend.UpdateBucket(bucketName, attrsToUpdate)
if err != nil {
panic(err)
}
+ return jsonResponse{}
+}
+
+func getBucketAttrsToUpdate(body io.ReadCloser) backend.BucketAttrs {
+ var data struct {
+ DefaultEventBasedHold bool `json:"defaultEventBasedHold,omitempty"`
+ Versioning bucketVersioning `json:"versioning,omitempty"`
+ }
+ err := json.NewDecoder(body).Decode(&data)
+ if err != nil {
+ panic(err)
+ }
+ attrsToUpdate := backend.BucketAttrs{
+ DefaultEventBasedHold: data.DefaultEventBasedHold,
+ VersioningEnabled: data.Versioning.Enabled,
+ }
+ return attrsToUpdate
+}
+
+// CreateBucketOpts defines the properties of a bucket you can create with
+// CreateBucketWithOpts.
+type CreateBucketOpts struct {
+ Name string
+ VersioningEnabled bool
+ DefaultEventBasedHold bool
+}
+
+// CreateBucketWithOpts creates a bucket inside the server, so any API calls that
+// require the bucket name will recognize this bucket. Use CreateBucketOpts to
+// customize the options for this bucket
+//
+// If the underlying backend returns an error, this method panics.
+func (s *Server) CreateBucketWithOpts(opts CreateBucketOpts) {
+ err := s.backend.CreateBucket(opts.Name, backend.BucketAttrs{VersioningEnabled: opts.VersioningEnabled, DefaultEventBasedHold: opts.DefaultEventBasedHold})
+ if err != nil {
+ panic(err)
+ }
+}
+
+func (s *Server) createBucketByPost(r *http.Request) jsonResponse {
+ // Minimal version of Bucket from google.golang.org/api/storage/v1
+
+ var data struct {
+ Name string `json:"name,omitempty"`
+ Versioning *bucketVersioning `json:"versioning,omitempty"`
+ DefaultEventBasedHold bool `json:"defaultEventBasedHold,omitempty"`
+ }
+
+ // Read the bucket props from the request body JSON
+ decoder := json.NewDecoder(r.Body)
+ if err := decoder.Decode(&data); err != nil {
+ return jsonResponse{errorMessage: err.Error(), status: http.StatusBadRequest}
+ }
+ name := data.Name
+ versioning := false
+ if data.Versioning != nil {
+ versioning = data.Versioning.Enabled
+ }
+ defaultEventBasedHold := data.DefaultEventBasedHold
+ if err := validateBucketName(name); err != nil {
+ return jsonResponse{errorMessage: err.Error(), status: http.StatusBadRequest}
+ }
+
+ _, err := s.backend.GetBucket(name)
+ if err == nil {
+ return jsonResponse{
+ errorMessage: fmt.Sprintf(
+ "A Cloud Storage bucket named '%s' already exists. "+
+ "Try another name. Bucket names must be globally unique "+
+ "across all Google Cloud projects, including those "+
+ "outside of your organization.", name),
+ status: http.StatusConflict,
+ }
+ }
+
+ // Create the named bucket
+ if err := s.backend.CreateBucket(name, backend.BucketAttrs{VersioningEnabled: versioning, DefaultEventBasedHold: defaultEventBasedHold}); err != nil {
+ return jsonResponse{errorMessage: err.Error()}
+ }
+
+ // Return the created bucket:
+ bucket, err := s.backend.GetBucket(name)
+ if err != nil {
+ return jsonResponse{errorMessage: err.Error()}
+ }
+ return jsonResponse{data: newBucketResponse(bucket, s.options.BucketsLocation)}
}
-func (s *Server) listBuckets(w http.ResponseWriter, r *http.Request) {
- s.mtx.RLock()
- defer s.mtx.RUnlock()
+func (s *Server) listBuckets(r *http.Request) jsonResponse {
+ buckets, err := s.backend.ListBuckets()
+ if err != nil {
+ return jsonResponse{errorMessage: err.Error()}
+ }
+ return jsonResponse{data: newListBucketsResponse(buckets, s.options.BucketsLocation)}
+}
- bucketNames, err := s.backend.ListBuckets()
+func (s *Server) getBucket(r *http.Request) jsonResponse {
+ bucketName := unescapeMuxVars(mux.Vars(r))["bucketName"]
+ bucket, err := s.backend.GetBucket(bucketName)
+ if err != nil {
+ return jsonResponse{status: http.StatusNotFound}
+ }
+ return jsonResponse{data: newBucketResponse(bucket, s.options.BucketsLocation)}
+}
+
+func (s *Server) deleteBucket(r *http.Request) jsonResponse {
+ bucketName := unescapeMuxVars(mux.Vars(r))["bucketName"]
+ err := s.backend.DeleteBucket(bucketName)
+ if err == backend.BucketNotFound {
+ return jsonResponse{status: http.StatusNotFound}
+ }
+ if err == backend.BucketNotEmpty {
+ return jsonResponse{status: http.StatusPreconditionFailed, errorMessage: err.Error()}
+ }
if err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- return
+ return jsonResponse{status: http.StatusInternalServerError, errorMessage: err.Error()}
}
- resp := newListBucketsResponse(bucketNames)
- json.NewEncoder(w).Encode(resp)
+ return jsonResponse{}
}
-func (s *Server) getBucket(w http.ResponseWriter, r *http.Request) {
- bucketName := mux.Vars(r)["bucketName"]
- s.mtx.RLock()
- defer s.mtx.RUnlock()
- encoder := json.NewEncoder(w)
- if err := s.backend.GetBucket(bucketName); err != nil {
- w.WriteHeader(http.StatusNotFound)
- err := newErrorResponse(http.StatusNotFound, "Not found", nil)
- encoder.Encode(err)
- return
+func validateBucketName(bucketName string) error {
+ if !bucketRegexp.MatchString(bucketName) {
+ return errors.New("invalid bucket name")
}
- resp := newBucketResponse(bucketName)
- w.WriteHeader(http.StatusOK)
- encoder.Encode(resp)
+ return nil
}
diff --git a/vendor/github.com/fsouza/fake-gcs-server/fakestorage/config.go b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/config.go
new file mode 100644
index 0000000000000..a57d154279a5e
--- /dev/null
+++ b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/config.go
@@ -0,0 +1,30 @@
+package fakestorage
+
+import (
+ "encoding/json"
+ "net/http"
+)
+
+func (s *Server) updateServerConfig(r *http.Request) jsonResponse {
+ var configOptions struct {
+ ExternalUrl string `json:"externalUrl,omitempty"`
+ PublicHost string `json:"publicHost,omitempty"`
+ }
+ err := json.NewDecoder(r.Body).Decode(&configOptions)
+ if err != nil {
+ return jsonResponse{
+ status: http.StatusBadRequest,
+ errorMessage: "Update server config payload can not be parsed.",
+ }
+ }
+
+ if configOptions.ExternalUrl != "" {
+ s.externalURL = configOptions.ExternalUrl
+ }
+
+ if configOptions.PublicHost != "" {
+ s.publicHost = configOptions.PublicHost
+ }
+
+ return jsonResponse{status: http.StatusOK}
+}
diff --git a/vendor/github.com/fsouza/fake-gcs-server/fakestorage/json_response.go b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/json_response.go
new file mode 100644
index 0000000000000..99e8ce7d4ccab
--- /dev/null
+++ b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/json_response.go
@@ -0,0 +1,84 @@
+package fakestorage
+
+import (
+ "encoding/json"
+ "errors"
+ "net/http"
+ "os"
+ "syscall"
+
+ "github.com/fsouza/fake-gcs-server/internal/backend"
+)
+
+type jsonResponse struct {
+ status int
+ header http.Header
+ data any
+ errorMessage string
+}
+
+type jsonHandler = func(r *http.Request) jsonResponse
+
+func jsonToHTTPHandler(h jsonHandler) http.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) {
+ resp := h(r)
+ w.Header().Set("Content-Type", "application/json")
+ for name, values := range resp.header {
+ for _, value := range values {
+ w.Header().Add(name, value)
+ }
+ }
+
+ status := resp.getStatus()
+ var data any
+ if status > 399 {
+ data = newErrorResponse(status, resp.getErrorMessage(status), resp.getErrorList(status))
+ } else {
+ data = resp.data
+ }
+
+ w.WriteHeader(status)
+ json.NewEncoder(w).Encode(data)
+ }
+}
+
+func (r *jsonResponse) getStatus() int {
+ if r.status > 0 {
+ return r.status
+ }
+ if r.errorMessage != "" {
+ return http.StatusInternalServerError
+ }
+ return http.StatusOK
+}
+
+func (r *jsonResponse) getErrorMessage(status int) string {
+ if r.errorMessage != "" {
+ return r.errorMessage
+ }
+ return http.StatusText(status)
+}
+
+func (r *jsonResponse) getErrorList(status int) []apiError {
+ if status == http.StatusOK {
+ return nil
+ } else {
+ return []apiError{{
+ Domain: "global",
+ Reason: http.StatusText(status),
+ Message: r.getErrorMessage(status),
+ }}
+ }
+}
+
+func errToJsonResponse(err error) jsonResponse {
+ status := 0
+ var pathError *os.PathError
+ if errors.As(err, &pathError) && pathError.Err == syscall.ENAMETOOLONG {
+ status = http.StatusBadRequest
+ }
+ if err == backend.PreConditionFailed {
+ status = http.StatusPreconditionFailed
+ }
+ return jsonResponse{errorMessage: err.Error(), status: status}
+}
diff --git a/vendor/github.com/fsouza/fake-gcs-server/fakestorage/mux_tranport.go b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/mux_tranport.go
index afaa2efeac76a..b228c787ae682 100644
--- a/vendor/github.com/fsouza/fake-gcs-server/fakestorage/mux_tranport.go
+++ b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/mux_tranport.go
@@ -7,16 +7,14 @@ package fakestorage
import (
"net/http"
"net/http/httptest"
-
- "github.com/gorilla/mux"
)
type muxTransport struct {
- router *mux.Router
+ handler http.Handler
}
func (t *muxTransport) RoundTrip(r *http.Request) (*http.Response, error) {
w := httptest.NewRecorder()
- t.router.ServeHTTP(w, r)
+ t.handler.ServeHTTP(w, r)
return w.Result(), nil
}
diff --git a/vendor/github.com/fsouza/fake-gcs-server/fakestorage/object.go b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/object.go
index bc1d472f36e30..b229a452331e6 100644
--- a/vendor/github.com/fsouza/fake-gcs-server/fakestorage/object.go
+++ b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/object.go
@@ -5,84 +5,357 @@
package fakestorage
import (
+ "bytes"
+ "compress/gzip"
"encoding/json"
+ "encoding/xml"
+ "errors"
"fmt"
+ "io"
"net/http"
+ "slices"
"sort"
"strconv"
"strings"
+ "time"
+ "cloud.google.com/go/storage"
"github.com/fsouza/fake-gcs-server/internal/backend"
+ "github.com/fsouza/fake-gcs-server/internal/notification"
"github.com/gorilla/mux"
)
-// Object represents the object that is stored within the fake server.
-type Object struct {
- BucketName string `json:"-"`
- Name string `json:"name"`
- Content []byte `json:"-"`
+var errInvalidGeneration = errors.New("invalid generation ID")
+
+// ObjectAttrs returns only the meta-data about an object without its contents.
+type ObjectAttrs struct {
+ BucketName string
+ Name string
+ Size int64
+ ContentType string
+ ContentEncoding string
+ ContentDisposition string
+ CacheControl string
// Crc32c checksum of Content. calculated by server when it's upload methods are used.
- Crc32c string `json:"crc32c,omitempty"`
- Md5Hash string `json:"md5hash,omitempty"`
+ Crc32c string
+ Md5Hash string
+ Etag string
+ ACL []storage.ACLRule
+ // Dates and generation can be manually injected, so you can do assertions on them,
+ // or let us fill these fields for you
+ Created time.Time
+ Updated time.Time
+ Deleted time.Time
+ CustomTime time.Time
+ Generation int64
+ Metadata map[string]string
}
-func (o *Object) id() string {
+func (o *ObjectAttrs) id() string {
return o.BucketName + "/" + o.Name
}
-type objectList []Object
+type jsonObject struct {
+ BucketName string `json:"bucket"`
+ Name string `json:"name"`
+ Size int64 `json:"size,string"`
+ ContentType string `json:"contentType"`
+ ContentEncoding string `json:"contentEncoding"`
+ ContentDisposition string `json:"contentDisposition"`
+ Crc32c string `json:"crc32c,omitempty"`
+ Md5Hash string `json:"md5Hash,omitempty"`
+ Etag string `json:"etag,omitempty"`
+ ACL []aclRule `json:"acl,omitempty"`
+ Created time.Time `json:"created,omitempty"`
+ Updated time.Time `json:"updated,omitempty"`
+ Deleted time.Time `json:"deleted,omitempty"`
+ CustomTime time.Time `json:"customTime,omitempty"`
+ Generation int64 `json:"generation,omitempty,string"`
+ Metadata map[string]string `json:"metadata,omitempty"`
+}
+
+// MarshalJSON for ObjectAttrs to use ACLRule instead of storage.ACLRule
+func (o ObjectAttrs) MarshalJSON() ([]byte, error) {
+ temp := jsonObject{
+ BucketName: o.BucketName,
+ Name: o.Name,
+ ContentType: o.ContentType,
+ ContentEncoding: o.ContentEncoding,
+ ContentDisposition: o.ContentDisposition,
+ Size: o.Size,
+ Crc32c: o.Crc32c,
+ Md5Hash: o.Md5Hash,
+ Etag: o.Etag,
+ Created: o.Created,
+ Updated: o.Updated,
+ Deleted: o.Deleted,
+ CustomTime: o.CustomTime,
+ Generation: o.Generation,
+ Metadata: o.Metadata,
+ }
+ temp.ACL = make([]aclRule, len(o.ACL))
+ for i, ACL := range o.ACL {
+ temp.ACL[i] = aclRule(ACL)
+ }
+ return json.Marshal(temp)
+}
+
+// UnmarshalJSON for ObjectAttrs to use ACLRule instead of storage.ACLRule
+func (o *ObjectAttrs) UnmarshalJSON(data []byte) error {
+ var temp jsonObject
+ if err := json.Unmarshal(data, &temp); err != nil {
+ return err
+ }
+ o.BucketName = temp.BucketName
+ o.Name = temp.Name
+ o.ContentType = temp.ContentType
+ o.ContentEncoding = temp.ContentEncoding
+ o.ContentDisposition = temp.ContentDisposition
+ o.Size = temp.Size
+ o.Crc32c = temp.Crc32c
+ o.Md5Hash = temp.Md5Hash
+ o.Etag = temp.Etag
+ o.Created = temp.Created
+ o.Updated = temp.Updated
+ o.Deleted = temp.Deleted
+ o.Generation = temp.Generation
+ o.Metadata = temp.Metadata
+ o.CustomTime = temp.CustomTime
+ o.ACL = make([]storage.ACLRule, len(temp.ACL))
+ for i, ACL := range temp.ACL {
+ o.ACL[i] = storage.ACLRule(ACL)
+ }
+
+ return nil
+}
+
+// Object represents an object that is stored within the fake server. The
+// content of this type is stored is buffered, i.e. it's stored in memory.
+// Use StreamingObject to stream the content from a reader, e.g a file.
+type Object struct {
+ ObjectAttrs
+ Content []byte `json:"-"`
+}
+
+type noopSeekCloser struct {
+ io.ReadSeeker
+}
+
+func (n noopSeekCloser) Close() error {
+ return nil
+}
+
+func (o Object) StreamingObject() StreamingObject {
+ return StreamingObject{
+ ObjectAttrs: o.ObjectAttrs,
+ Content: noopSeekCloser{bytes.NewReader(o.Content)},
+ }
+}
+
+// StreamingObject is the streaming version of Object.
+type StreamingObject struct {
+ ObjectAttrs
+ Content io.ReadSeekCloser `json:"-"`
+}
+
+func (o *StreamingObject) Close() error {
+ if o != nil && o.Content != nil {
+ return o.Content.Close()
+ }
+ return nil
+}
+
+func (o *StreamingObject) BufferedObject() (Object, error) {
+ data, err := io.ReadAll(o.Content)
+ return Object{
+ ObjectAttrs: o.ObjectAttrs,
+ Content: data,
+ }, err
+}
+
+// ACLRule is an alias of storage.ACLRule to have custom JSON marshal
+type aclRule storage.ACLRule
+
+// ProjectTeam is an alias of storage.ProjectTeam to have custom JSON marshal
+type projectTeam storage.ProjectTeam
+
+// MarshalJSON for ACLRule to customize field names
+func (acl aclRule) MarshalJSON() ([]byte, error) {
+ temp := struct {
+ Entity storage.ACLEntity `json:"entity"`
+ EntityID string `json:"entityId"`
+ Role storage.ACLRole `json:"role"`
+ Domain string `json:"domain"`
+ Email string `json:"email"`
+ ProjectTeam *projectTeam `json:"projectTeam"`
+ }{
+ Entity: acl.Entity,
+ EntityID: acl.EntityID,
+ Role: acl.Role,
+ Domain: acl.Domain,
+ Email: acl.Email,
+ ProjectTeam: (*projectTeam)(acl.ProjectTeam),
+ }
+ return json.Marshal(temp)
+}
-func (o objectList) Len() int {
- return len(o)
+// UnmarshalJSON for ACLRule to customize field names
+func (acl *aclRule) UnmarshalJSON(data []byte) error {
+ temp := struct {
+ Entity storage.ACLEntity `json:"entity"`
+ EntityID string `json:"entityId"`
+ Role storage.ACLRole `json:"role"`
+ Domain string `json:"domain"`
+ Email string `json:"email"`
+ ProjectTeam *projectTeam `json:"projectTeam"`
+ }{}
+ if err := json.Unmarshal(data, &temp); err != nil {
+ return err
+ }
+ acl.Entity = temp.Entity
+ acl.EntityID = temp.EntityID
+ acl.Role = temp.Role
+ acl.Domain = temp.Domain
+ acl.Email = temp.Email
+ acl.ProjectTeam = (*storage.ProjectTeam)(temp.ProjectTeam)
+ return nil
}
-func (o objectList) Less(i int, j int) bool {
- return o[i].Name < o[j].Name
+// MarshalJSON for ProjectTeam to customize field names
+func (team projectTeam) MarshalJSON() ([]byte, error) {
+ temp := struct {
+ ProjectNumber string `json:"projectNumber"`
+ Team string `json:"team"`
+ }{
+ ProjectNumber: team.ProjectNumber,
+ Team: team.Team,
+ }
+ return json.Marshal(temp)
}
-func (o *objectList) Swap(i int, j int) {
- d := *o
- d[i], d[j] = d[j], d[i]
+// UnmarshalJSON for ProjectTeam to customize field names
+func (team *projectTeam) UnmarshalJSON(data []byte) error {
+ temp := struct {
+ ProjectNumber string `json:"projectNumber"`
+ Team string `json:"team"`
+ }{}
+ if err := json.Unmarshal(data, &temp); err != nil {
+ return err
+ }
+ team.ProjectNumber = temp.ProjectNumber
+ team.Team = temp.Team
+ return nil
}
-// CreateObject stores the given object internally.
+// CreateObject is the non-streaming version of CreateObjectStreaming.
//
-// If the bucket within the object doesn't exist, it also creates it. If the
-// object already exists, it overrides the object.
+// In addition to streaming, CreateObjectStreaming returns an error instead of
+// panicking when an error occurs.
func (s *Server) CreateObject(obj Object) {
- s.mtx.Lock()
- defer s.mtx.Unlock()
- err := s.createObject(obj)
+ err := s.CreateObjectStreaming(obj.StreamingObject())
if err != nil {
panic(err)
}
}
-func (s *Server) createObject(obj Object) error {
- return s.backend.CreateObject(toBackendObjects([]Object{obj})[0])
+// CreateObjectStreaming stores the given object internally.
+//
+// If the bucket within the object doesn't exist, it also creates it. If the
+// object already exists, it overwrites the object.
+func (s *Server) CreateObjectStreaming(obj StreamingObject) error {
+ obj, err := s.createObject(obj, backend.NoConditions{})
+ if err != nil {
+ return err
+ }
+ obj.Close()
+ return nil
+}
+
+func (s *Server) createObject(obj StreamingObject, conditions backend.Conditions) (StreamingObject, error) {
+ oldBackendObj, err := s.backend.GetObject(obj.BucketName, obj.Name)
+ // Calling Close before checking err is okay on objects, and the object
+ // may need to be closed whether or not there's an error.
+ defer oldBackendObj.Close() //lint:ignore SA5001 // see above
+
+ prevVersionExisted := err == nil
+
+ // The caller is responsible for closing the created object.
+ newBackendObj, err := s.backend.CreateObject(toBackendObjects([]StreamingObject{obj})[0], conditions)
+ if err != nil {
+ return StreamingObject{}, err
+ }
+
+ var newObjEventAttr map[string]string
+ if prevVersionExisted {
+ newObjEventAttr = map[string]string{
+ "overwroteGeneration": strconv.FormatInt(oldBackendObj.Generation, 10),
+ }
+
+ oldObjEventAttr := map[string]string{
+ "overwrittenByGeneration": strconv.FormatInt(newBackendObj.Generation, 10),
+ }
+
+ bucket, _ := s.backend.GetBucket(obj.BucketName)
+ if bucket.VersioningEnabled {
+ s.eventManager.Trigger(&oldBackendObj, notification.EventArchive, oldObjEventAttr)
+ } else {
+ s.eventManager.Trigger(&oldBackendObj, notification.EventDelete, oldObjEventAttr)
+ }
+ }
+
+ newObj := fromBackendObjects([]backend.StreamingObject{newBackendObj})[0]
+ s.eventManager.Trigger(&newBackendObj, notification.EventFinalize, newObjEventAttr)
+ return newObj, nil
+}
+
+type ListOptions struct {
+ Prefix string
+ Delimiter string
+ Versions bool
+ StartOffset string
+ EndOffset string
+ IncludeTrailingDelimiter bool
}
// ListObjects returns a sorted list of objects that match the given criteria,
// or an error if the bucket doesn't exist.
-func (s *Server) ListObjects(bucketName, prefix, delimiter string) ([]Object, []string, error) {
- s.mtx.RLock()
- defer s.mtx.RUnlock()
- backendObjects, err := s.backend.ListObjects(bucketName)
+//
+// Deprecated: use ListObjectsWithOptions.
+func (s *Server) ListObjects(bucketName, prefix, delimiter string, versions bool) ([]ObjectAttrs, []string, error) {
+ return s.ListObjectsWithOptions(bucketName, ListOptions{
+ Prefix: prefix,
+ Delimiter: delimiter,
+ Versions: versions,
+ })
+}
+
+func (s *Server) ListObjectsWithOptions(bucketName string, options ListOptions) ([]ObjectAttrs, []string, error) {
+ backendObjects, err := s.backend.ListObjects(bucketName, options.Prefix, options.Versions)
if err != nil {
return nil, nil, err
}
- objects := fromBackendObjects(backendObjects)
- olist := objectList(objects)
- sort.Sort(&olist)
- var respObjects []Object
+ objects := fromBackendObjectsAttrs(backendObjects)
+ slices.SortFunc(objects, func(left, right ObjectAttrs) int {
+ return strings.Compare(left.Name, right.Name)
+ })
+ var respObjects []ObjectAttrs
prefixes := make(map[string]bool)
- for _, obj := range olist {
- if strings.HasPrefix(obj.Name, prefix) {
- objName := strings.Replace(obj.Name, prefix, "", 1)
- delimPos := strings.Index(objName, delimiter)
- if delimiter != "" && delimPos > -1 {
- prefixes[obj.Name[:len(prefix)+delimPos+1]] = true
- } else {
+ for _, obj := range objects {
+ if !strings.HasPrefix(obj.Name, options.Prefix) {
+ continue
+ }
+ objName := strings.Replace(obj.Name, options.Prefix, "", 1)
+ delimPos := strings.Index(objName, options.Delimiter)
+ if options.Delimiter != "" && delimPos > -1 {
+ prefix := obj.Name[:len(options.Prefix)+delimPos+1]
+ if isInOffset(prefix, options.StartOffset, options.EndOffset) {
+ prefixes[prefix] = true
+ }
+ if options.IncludeTrailingDelimiter && obj.Name == prefix {
+ respObjects = append(respObjects, obj)
+ }
+ } else {
+ if isInOffset(obj.Name, options.StartOffset, options.EndOffset) {
respObjects = append(respObjects, obj)
}
}
@@ -95,143 +368,818 @@ func (s *Server) ListObjects(bucketName, prefix, delimiter string) ([]Object, []
return respObjects, respPrefixes, nil
}
-func toBackendObjects(objects []Object) []backend.Object {
- backendObjects := []backend.Object{}
+func isInOffset(name, startOffset, endOffset string) bool {
+ if endOffset != "" && startOffset != "" {
+ return strings.Compare(name, endOffset) < 0 && strings.Compare(name, startOffset) >= 0
+ } else if endOffset != "" {
+ return strings.Compare(name, endOffset) < 0
+ } else if startOffset != "" {
+ return strings.Compare(name, startOffset) >= 0
+ } else {
+ return true
+ }
+}
+
+func getCurrentIfZero(date time.Time) time.Time {
+ if date.IsZero() {
+ return time.Now()
+ }
+ return date
+}
+
+func toBackendObjects(objects []StreamingObject) []backend.StreamingObject {
+ backendObjects := make([]backend.StreamingObject, 0, len(objects))
for _, o := range objects {
- backendObjects = append(backendObjects, backend.Object{
- BucketName: o.BucketName,
- Name: o.Name,
- Content: o.Content,
- Crc32c: o.Crc32c,
- Md5Hash: o.Md5Hash,
+ backendObjects = append(backendObjects, backend.StreamingObject{
+ ObjectAttrs: backend.ObjectAttrs{
+ BucketName: o.BucketName,
+ Name: o.Name,
+ ContentType: o.ContentType,
+ ContentEncoding: o.ContentEncoding,
+ ContentDisposition: o.ContentDisposition,
+ CacheControl: o.CacheControl,
+ ACL: o.ACL,
+ Created: getCurrentIfZero(o.Created).Format(timestampFormat),
+ Deleted: o.Deleted.Format(timestampFormat),
+ Updated: getCurrentIfZero(o.Updated).Format(timestampFormat),
+ CustomTime: o.CustomTime.Format(timestampFormat),
+ Generation: o.Generation,
+ Metadata: o.Metadata,
+ },
+ Content: o.Content,
})
}
return backendObjects
}
-func fromBackendObjects(objects []backend.Object) []Object {
- backendObjects := []Object{}
+func bufferedObjectsToBackendObjects(objects []Object) []backend.StreamingObject {
+ backendObjects := make([]backend.StreamingObject, 0, len(objects))
+ for _, bufferedObject := range objects {
+ o := bufferedObject.StreamingObject()
+ backendObjects = append(backendObjects, backend.StreamingObject{
+ ObjectAttrs: backend.ObjectAttrs{
+ BucketName: o.BucketName,
+ Name: o.Name,
+ ContentType: o.ContentType,
+ ContentEncoding: o.ContentEncoding,
+ ContentDisposition: o.ContentDisposition,
+ ACL: o.ACL,
+ Created: getCurrentIfZero(o.Created).Format(timestampFormat),
+ Deleted: o.Deleted.Format(timestampFormat),
+ Updated: getCurrentIfZero(o.Updated).Format(timestampFormat),
+ CustomTime: o.CustomTime.Format(timestampFormat),
+ Generation: o.Generation,
+ Metadata: o.Metadata,
+ Crc32c: o.Crc32c,
+ Md5Hash: o.Md5Hash,
+ Size: o.Size,
+ Etag: o.Etag,
+ },
+ Content: o.Content,
+ })
+ }
+ return backendObjects
+}
+
+func fromBackendObjects(objects []backend.StreamingObject) []StreamingObject {
+ backendObjects := make([]StreamingObject, 0, len(objects))
for _, o := range objects {
- backendObjects = append(backendObjects, Object{
- BucketName: o.BucketName,
- Name: o.Name,
- Content: o.Content,
- Crc32c: o.Crc32c,
- Md5Hash: o.Md5Hash,
+ backendObjects = append(backendObjects, StreamingObject{
+ ObjectAttrs: ObjectAttrs{
+ BucketName: o.BucketName,
+ Name: o.Name,
+ Size: o.Size,
+ ContentType: o.ContentType,
+ ContentEncoding: o.ContentEncoding,
+ ContentDisposition: o.ContentDisposition,
+ CacheControl: o.CacheControl,
+ Crc32c: o.Crc32c,
+ Md5Hash: o.Md5Hash,
+ Etag: o.Etag,
+ ACL: o.ACL,
+ Created: convertTimeWithoutError(o.Created),
+ Deleted: convertTimeWithoutError(o.Deleted),
+ Updated: convertTimeWithoutError(o.Updated),
+ CustomTime: convertTimeWithoutError(o.CustomTime),
+ Generation: o.Generation,
+ Metadata: o.Metadata,
+ },
+ Content: o.Content,
})
}
return backendObjects
}
-// GetObject returns the object with the given name in the given bucket, or an
-// error if the object doesn't exist.
+func fromBackendObjectsAttrs(objectAttrs []backend.ObjectAttrs) []ObjectAttrs {
+ oattrs := make([]ObjectAttrs, 0, len(objectAttrs))
+ for _, o := range objectAttrs {
+ oattrs = append(oattrs, ObjectAttrs{
+ BucketName: o.BucketName,
+ Name: o.Name,
+ Size: o.Size,
+ ContentType: o.ContentType,
+ ContentEncoding: o.ContentEncoding,
+ ContentDisposition: o.ContentDisposition,
+ CacheControl: o.CacheControl,
+ Crc32c: o.Crc32c,
+ Md5Hash: o.Md5Hash,
+ Etag: o.Etag,
+ ACL: o.ACL,
+ Created: convertTimeWithoutError(o.Created),
+ Deleted: convertTimeWithoutError(o.Deleted),
+ Updated: convertTimeWithoutError(o.Updated),
+ CustomTime: convertTimeWithoutError(o.CustomTime),
+ Generation: o.Generation,
+ Metadata: o.Metadata,
+ })
+ }
+ return oattrs
+}
+
+func convertTimeWithoutError(t string) time.Time {
+ r, _ := time.Parse(timestampFormat, t)
+ return r
+}
+
+// GetObject is the non-streaming version of GetObjectStreaming.
func (s *Server) GetObject(bucketName, objectName string) (Object, error) {
+ streamingObject, err := s.GetObjectStreaming(bucketName, objectName)
+ if err != nil {
+ return Object{}, err
+ }
+ return streamingObject.BufferedObject()
+}
+
+// GetObjectStreaming returns the object with the given name in the given
+// bucket, or an error if the object doesn't exist.
+func (s *Server) GetObjectStreaming(bucketName, objectName string) (StreamingObject, error) {
backendObj, err := s.backend.GetObject(bucketName, objectName)
+ if err != nil {
+ return StreamingObject{}, err
+ }
+ obj := fromBackendObjects([]backend.StreamingObject{backendObj})[0]
+ return obj, nil
+}
+
+// GetObjectWithGeneration is the non-streaming version of
+// GetObjectWithGenerationStreaming.
+func (s *Server) GetObjectWithGeneration(bucketName, objectName string, generation int64) (Object, error) {
+ streamingObject, err := s.GetObjectWithGenerationStreaming(bucketName, objectName, generation)
if err != nil {
return Object{}, err
}
- obj := fromBackendObjects([]backend.Object{backendObj})[0]
+ return streamingObject.BufferedObject()
+}
+
+// GetObjectWithGenerationStreaming returns the object with the given name and
+// given generation ID in the given bucket, or an error if the object doesn't
+// exist.
+//
+// If versioning is enabled, archived versions are considered.
+func (s *Server) GetObjectWithGenerationStreaming(bucketName, objectName string, generation int64) (StreamingObject, error) {
+ backendObj, err := s.backend.GetObjectWithGeneration(bucketName, objectName, generation)
+ if err != nil {
+ return StreamingObject{}, err
+ }
+ obj := fromBackendObjects([]backend.StreamingObject{backendObj})[0]
return obj, nil
}
-func (s *Server) listObjects(w http.ResponseWriter, r *http.Request) {
- bucketName := mux.Vars(r)["bucketName"]
- prefix := r.URL.Query().Get("prefix")
- delimiter := r.URL.Query().Get("delimiter")
- objs, prefixes, err := s.ListObjects(bucketName, prefix, delimiter)
- encoder := json.NewEncoder(w)
+func (s *Server) objectWithGenerationOnValidGeneration(bucketName, objectName, generationStr string) (StreamingObject, error) {
+ generation, err := strconv.ParseInt(generationStr, 10, 64)
+ if err != nil && generationStr != "" {
+ return StreamingObject{}, errInvalidGeneration
+ } else if generation > 0 {
+ return s.GetObjectWithGenerationStreaming(bucketName, objectName, generation)
+ }
+ return s.GetObjectStreaming(bucketName, objectName)
+}
+
+func (s *Server) listObjects(r *http.Request) jsonResponse {
+ bucketName := unescapeMuxVars(mux.Vars(r))["bucketName"]
+ objs, prefixes, err := s.ListObjectsWithOptions(bucketName, ListOptions{
+ Prefix: r.URL.Query().Get("prefix"),
+ Delimiter: r.URL.Query().Get("delimiter"),
+ Versions: r.URL.Query().Get("versions") == "true",
+ StartOffset: r.URL.Query().Get("startOffset"),
+ EndOffset: r.URL.Query().Get("endOffset"),
+ IncludeTrailingDelimiter: r.URL.Query().Get("includeTrailingDelimiter") == "true",
+ })
if err != nil {
- w.WriteHeader(http.StatusNotFound)
- errResp := newErrorResponse(http.StatusNotFound, "Not Found", nil)
- encoder.Encode(errResp)
- return
+ return jsonResponse{status: http.StatusNotFound}
}
- encoder.Encode(newListObjectsResponse(objs, prefixes))
+ return jsonResponse{data: newListObjectsResponse(objs, prefixes, s.externalURL)}
}
-func (s *Server) getObject(w http.ResponseWriter, r *http.Request) {
- vars := mux.Vars(r)
- encoder := json.NewEncoder(w)
- obj, err := s.GetObject(vars["bucketName"], vars["objectName"])
+func (s *Server) xmlListObjects(r *http.Request) xmlResponse {
+ bucketName := unescapeMuxVars(mux.Vars(r))["bucketName"]
+
+ opts := ListOptions{
+ Prefix: r.URL.Query().Get("prefix"),
+ Delimiter: r.URL.Query().Get("delimiter"),
+ Versions: r.URL.Query().Get("versions") == "true",
+ }
+
+ objs, prefixes, err := s.ListObjectsWithOptions(bucketName, opts)
if err != nil {
- errResp := newErrorResponse(http.StatusNotFound, "Not Found", nil)
- w.WriteHeader(http.StatusNotFound)
- encoder.Encode(errResp)
+ return xmlResponse{
+ status: http.StatusInternalServerError,
+ errorMessage: err.Error(),
+ }
+ }
+
+ result := ListBucketResult{
+ Name: bucketName,
+ Delimiter: opts.Delimiter,
+ Prefix: opts.Prefix,
+ KeyCount: len(objs),
+ }
+
+ if opts.Delimiter != "" {
+ for _, prefix := range prefixes {
+ result.CommonPrefixes = append(result.CommonPrefixes, CommonPrefix{Prefix: prefix})
+ }
+ }
+
+ for _, obj := range objs {
+ result.Contents = append(result.Contents, Contents{
+ Key: obj.Name,
+ Generation: obj.Generation,
+ Size: obj.Size,
+ LastModified: obj.Updated.Format(time.RFC3339),
+ ETag: ETag{Value: obj.Etag},
+ })
+ }
+
+ raw, err := xml.Marshal(result)
+ if err != nil {
+ return xmlResponse{
+ status: http.StatusInternalServerError,
+ errorMessage: err.Error(),
+ }
+ }
+
+ return xmlResponse{
+ status: http.StatusOK,
+ data: []byte(xml.Header + string(raw)),
+ }
+}
+
+func (s *Server) getObject(w http.ResponseWriter, r *http.Request) {
+ if alt := r.URL.Query().Get("alt"); alt == "media" || r.Method == http.MethodHead {
+ s.downloadObject(w, r)
return
}
- w.Header().Set("Accept-Ranges", "bytes")
- encoder.Encode(newObjectResponse(obj))
+
+ handler := jsonToHTTPHandler(func(r *http.Request) jsonResponse {
+ vars := unescapeMuxVars(mux.Vars(r))
+
+ projection := storage.ProjectionNoACL
+ if r.URL.Query().Has("projection") {
+ switch value := strings.ToLower(r.URL.Query().Get("projection")); value {
+ case "full":
+ projection = storage.ProjectionFull
+ case "noacl":
+ projection = storage.ProjectionNoACL
+ default:
+ return jsonResponse{
+ status: http.StatusBadRequest,
+ errorMessage: fmt.Sprintf("invalid projection: %q", value),
+ }
+ }
+ }
+
+ obj, err := s.objectWithGenerationOnValidGeneration(vars["bucketName"], vars["objectName"], r.FormValue("generation"))
+ // Calling Close before checking err is okay on objects, and the object
+ // may need to be closed whether or not there's an error.
+ defer obj.Close() //lint:ignore SA5001 // see above
+ if err != nil {
+ statusCode := http.StatusNotFound
+ var errMessage string
+ if errors.Is(err, errInvalidGeneration) {
+ statusCode = http.StatusBadRequest
+ errMessage = err.Error()
+ }
+ return jsonResponse{
+ status: statusCode,
+ errorMessage: errMessage,
+ }
+ }
+ header := make(http.Header)
+ header.Set("Accept-Ranges", "bytes")
+ return jsonResponse{
+ header: header,
+ data: newProjectedObjectResponse(obj.ObjectAttrs, s.externalURL, projection),
+ }
+ })
+
+ handler(w, r)
}
-func (s *Server) deleteObject(w http.ResponseWriter, r *http.Request) {
- s.mtx.Lock()
- defer s.mtx.Unlock()
- vars := mux.Vars(r)
- err := s.backend.DeleteObject(vars["bucketName"], vars["objectName"])
+func (s *Server) deleteObject(r *http.Request) jsonResponse {
+ vars := unescapeMuxVars(mux.Vars(r))
+ obj, err := s.GetObjectStreaming(vars["bucketName"], vars["objectName"])
+ // Calling Close before checking err is okay on objects, and the object
+ // may need to be closed whether or not there's an error.
+ defer obj.Close() //lint:ignore SA5001 // see above
+ if err == nil {
+ err = s.backend.DeleteObject(vars["bucketName"], vars["objectName"])
+ }
if err != nil {
- errResp := newErrorResponse(http.StatusNotFound, "Not Found", nil)
- w.WriteHeader(http.StatusNotFound)
- json.NewEncoder(w).Encode(errResp)
- return
+ return jsonResponse{status: http.StatusNotFound}
}
- w.WriteHeader(http.StatusOK)
+ bucket, _ := s.backend.GetBucket(obj.BucketName)
+ backendObj := toBackendObjects([]StreamingObject{obj})[0]
+ if bucket.VersioningEnabled {
+ s.eventManager.Trigger(&backendObj, notification.EventArchive, nil)
+ } else {
+ s.eventManager.Trigger(&backendObj, notification.EventDelete, nil)
+ }
+ return jsonResponse{}
}
-func (s *Server) rewriteObject(w http.ResponseWriter, r *http.Request) {
- vars := mux.Vars(r)
- obj, err := s.GetObject(vars["sourceBucket"], vars["sourceObject"])
+func (s *Server) listObjectACL(r *http.Request) jsonResponse {
+ vars := unescapeMuxVars(mux.Vars(r))
+
+ obj, err := s.GetObjectStreaming(vars["bucketName"], vars["objectName"])
if err != nil {
- http.Error(w, "not found", http.StatusNotFound)
- return
+ return jsonResponse{status: http.StatusNotFound}
+ }
+ defer obj.Close()
+
+ return jsonResponse{data: newACLListResponse(obj.ObjectAttrs)}
+}
+
+func (s *Server) setObjectACL(r *http.Request) jsonResponse {
+ vars := unescapeMuxVars(mux.Vars(r))
+
+ obj, err := s.GetObjectStreaming(vars["bucketName"], vars["objectName"])
+ if err != nil {
+ return jsonResponse{status: http.StatusNotFound}
+ }
+ defer obj.Close()
+
+ var data struct {
+ Entity string
+ Role string
+ }
+
+ decoder := json.NewDecoder(r.Body)
+ if err := decoder.Decode(&data); err != nil {
+ return jsonResponse{
+ status: http.StatusBadRequest,
+ errorMessage: err.Error(),
+ }
+ }
+
+ entity := storage.ACLEntity(data.Entity)
+ role := storage.ACLRole(data.Role)
+ obj.ACL = []storage.ACLRule{{
+ Entity: entity,
+ Role: role,
+ }}
+
+ obj, err = s.createObject(obj, backend.NoConditions{})
+ if err != nil {
+ return errToJsonResponse(err)
+ }
+ defer obj.Close()
+
+ return jsonResponse{data: newACLListResponse(obj.ObjectAttrs)}
+}
+
+func (s *Server) rewriteObject(r *http.Request) jsonResponse {
+ vars := unescapeMuxVars(mux.Vars(r))
+ obj, err := s.objectWithGenerationOnValidGeneration(vars["sourceBucket"], vars["sourceObject"], r.FormValue("sourceGeneration"))
+ // Calling Close before checking err is okay on objects, and the object
+ // may need to be closed whether or not there's an error.
+ defer obj.Close() //lint:ignore SA5001 // see above
+ if err != nil {
+ statusCode := http.StatusNotFound
+ var errMessage string
+ if errors.Is(err, errInvalidGeneration) {
+ statusCode = http.StatusBadRequest
+ errMessage = err.Error()
+ }
+ return jsonResponse{errorMessage: errMessage, status: statusCode}
+ }
+
+ var metadata multipartMetadata
+ err = json.NewDecoder(r.Body).Decode(&metadata)
+ if err != nil && err != io.EOF { // The body is optional
+ return jsonResponse{errorMessage: "Invalid metadata", status: http.StatusBadRequest}
+ }
+
+ // Only supplied metadata overwrites the new object's metdata
+ if len(metadata.Metadata) == 0 {
+ metadata.Metadata = obj.Metadata
+ }
+ if metadata.ContentType == "" {
+ metadata.ContentType = obj.ContentType
+ }
+ if metadata.ContentEncoding == "" {
+ metadata.ContentEncoding = obj.ContentEncoding
+ }
+ if metadata.ContentDisposition == "" {
+ metadata.ContentDisposition = obj.ContentDisposition
}
+
dstBucket := vars["destinationBucket"]
- newObject := Object{
- BucketName: dstBucket,
- Name: vars["destinationObject"],
- Content: append([]byte(nil), obj.Content...),
- Crc32c: obj.Crc32c,
- Md5Hash: obj.Md5Hash,
+ newObject := StreamingObject{
+ ObjectAttrs: ObjectAttrs{
+ BucketName: dstBucket,
+ Name: vars["destinationObject"],
+ ACL: obj.ACL,
+ ContentType: metadata.ContentType,
+ ContentEncoding: metadata.ContentEncoding,
+ ContentDisposition: metadata.ContentDisposition,
+ Metadata: metadata.Metadata,
+ },
+ Content: obj.Content,
}
- s.CreateObject(newObject)
- w.Header().Set("Content-Type", "application/json")
- json.NewEncoder(w).Encode(newObjectRewriteResponse(newObject))
+
+ created, err := s.createObject(newObject, backend.NoConditions{})
+ if err != nil {
+ return errToJsonResponse(err)
+ }
+ defer created.Close()
+
+ if vars["copyType"] == "copyTo" {
+ return jsonResponse{data: newObjectResponse(created.ObjectAttrs, s.externalURL)}
+ }
+ return jsonResponse{data: newObjectRewriteResponse(created.ObjectAttrs, s.externalURL)}
}
func (s *Server) downloadObject(w http.ResponseWriter, r *http.Request) {
- vars := mux.Vars(r)
- obj, err := s.GetObject(vars["bucketName"], vars["objectName"])
+ vars := unescapeMuxVars(mux.Vars(r))
+ obj, err := s.objectWithGenerationOnValidGeneration(vars["bucketName"], vars["objectName"], r.FormValue("generation"))
+ // Calling Close before checking err is okay on objects, and the object
+ // may need to be closed whether or not there's an error.
+ defer obj.Close() //lint:ignore SA5001 // see above
if err != nil {
- http.Error(w, "not found", http.StatusNotFound)
+ statusCode := http.StatusNotFound
+ message := http.StatusText(statusCode)
+ if errors.Is(err, errInvalidGeneration) {
+ statusCode = http.StatusBadRequest
+ message = err.Error()
+ }
+ http.Error(w, message, statusCode)
return
}
+
+ var content io.Reader
+ content = obj.Content
status := http.StatusOK
- start, end, content := s.handleRange(obj, r)
- if len(content) != len(obj.Content) {
+
+ transcoded := false
+ ranged := false
+ start := int64(0)
+ lastByte := int64(0)
+ satisfiable := true
+ contentLength := int64(0)
+
+ handledTranscoding := func() bool {
+ // This should also be false if the Cache-Control metadata field == "no-transform",
+ // but we don't currently support that field.
+ // See https://cloud.google.com/storage/docs/transcoding
+
+ if obj.ContentEncoding == "gzip" && !strings.Contains(r.Header.Get("accept-encoding"), "gzip") {
+ // GCS will transparently decompress gzipped content, see
+ // https://cloud.google.com/storage/docs/transcoding
+ // In this case, any Range header is ignored and the full content is returned.
+
+ // If the content is not a valid gzip file, ignore errors and continue
+ // without transcoding. Otherwise, return decompressed content.
+ gzipReader, err := gzip.NewReader(content)
+ if err == nil {
+ rawContent, err := io.ReadAll(gzipReader)
+ if err == nil {
+ transcoded = true
+ content = bytes.NewReader(rawContent)
+ contentLength = int64(len(rawContent))
+ obj.Size = contentLength
+ return true
+ }
+ }
+ }
+ return false
+ }
+
+ if !handledTranscoding() {
+ ranged, start, lastByte, satisfiable = s.handleRange(obj, r)
+ contentLength = lastByte - start + 1
+ }
+
+ if ranged && satisfiable {
+ _, err = obj.Content.Seek(start, io.SeekStart)
+ if err != nil {
+ http.Error(w, "could not seek", http.StatusInternalServerError)
+ return
+ }
+ content = io.LimitReader(obj.Content, contentLength)
status = http.StatusPartialContent
- w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", start, end, len(obj.Content)))
+ w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", start, lastByte, obj.Size))
}
w.Header().Set("Accept-Ranges", "bytes")
- w.Header().Set("Content-Length", strconv.Itoa(len(content)))
+ w.Header().Set("Content-Length", strconv.FormatInt(contentLength, 10))
+ w.Header().Set("X-Goog-Generation", strconv.FormatInt(obj.Generation, 10))
+ w.Header().Set("X-Goog-Hash", fmt.Sprintf("crc32c=%s,md5=%s", obj.Crc32c, obj.Md5Hash))
+ w.Header().Set("Last-Modified", obj.Updated.Format(http.TimeFormat))
+ w.Header().Set("ETag", fmt.Sprintf("%q", obj.Etag))
+ for name, value := range obj.Metadata {
+ w.Header().Set("X-Goog-Meta-"+name, value)
+ }
+ w.Header().Set("Access-Control-Allow-Origin", "*")
+
+ if ranged && !satisfiable {
+ status = http.StatusRequestedRangeNotSatisfiable
+ content = bytes.NewReader([]byte(fmt.Sprintf(``+
+ `InvalidRange
`+
+ `The requested range cannot be satisfied. `+
+ `%s `, r.Header.Get("Range"))))
+ w.Header().Set(contentTypeHeader, "application/xml; charset=UTF-8")
+ } else {
+ if obj.ContentType != "" {
+ w.Header().Set(contentTypeHeader, obj.ContentType)
+ }
+ if obj.CacheControl != "" {
+ w.Header().Set(cacheControlHeader, obj.CacheControl)
+ }
+ // If content was transcoded, the underlying encoding was removed so we shouldn't report it.
+ if obj.ContentEncoding != "" && !transcoded {
+ w.Header().Set("Content-Encoding", obj.ContentEncoding)
+ }
+ if obj.ContentDisposition != "" {
+ w.Header().Set("Content-Disposition", obj.ContentDisposition)
+ }
+ // X-Goog-Stored-Content-Encoding must be set to the original encoding,
+ // defaulting to "identity" if no encoding was set.
+ storedContentEncoding := "identity"
+ if obj.ContentEncoding != "" {
+ storedContentEncoding = obj.ContentEncoding
+ }
+ w.Header().Set("X-Goog-Stored-Content-Encoding", storedContentEncoding)
+ }
+
w.WriteHeader(status)
if r.Method == http.MethodGet {
- w.Write(content)
+ io.Copy(w, content)
}
}
-func (s *Server) handleRange(obj Object, r *http.Request) (start, end int, content []byte) {
- if reqRange := r.Header.Get("Range"); reqRange != "" {
- parts := strings.SplitN(reqRange, "=", 2)
- if len(parts) == 2 && parts[0] == "bytes" {
- rangeParts := strings.SplitN(parts[1], "-", 2)
- if len(rangeParts) == 2 {
- start, _ = strconv.Atoi(rangeParts[0])
- end, _ = strconv.Atoi(rangeParts[1])
- if end < 1 {
- end = len(obj.Content)
- }
- return start, end, obj.Content[start:end]
+func (s *Server) handleRange(obj StreamingObject, r *http.Request) (ranged bool, start int64, lastByte int64, satisfiable bool) {
+ start, end, err := parseRange(r.Header.Get("Range"), obj.Size)
+ if err != nil {
+ // If the range isn't valid, GCS returns all content.
+ return false, 0, obj.Size - 1, false
+ }
+ // GCS is pretty flexible when it comes to invalid ranges. A 416 http
+ // response is only returned when the range start is beyond the length of
+ // the content. Otherwise, the range is ignored.
+ switch {
+ // Invalid start. Return 416 and NO content.
+ // Examples:
+ // Length: 40, Range: bytes=50-60
+ // Length: 40, Range: bytes=50-
+ case start >= obj.Size:
+ // This IS a ranged request, but it ISN'T satisfiable.
+ return true, 0, 0, false
+ // Negative range, ignore range and return all content.
+ // Examples:
+ // Length: 40, Range: bytes=30-20
+ case end < start:
+ return false, 0, obj.Size - 1, false
+ // Return range. Clamp start and end.
+ // Examples:
+ // Length: 40, Range: bytes=-100
+ // Length: 40, Range: bytes=0-100
+ default:
+ if start < 0 {
+ start = 0
+ }
+ if end >= obj.Size {
+ end = obj.Size - 1
+ }
+ return true, start, end, true
+ }
+}
+
+// parseRange parses the range header and returns the corresponding start and
+// end indices in the content. The end index is inclusive. This function
+// doesn't validate that the start and end indices fall within the content
+// bounds. The content length is only used to handle "suffix length" and
+// range-to-end ranges.
+func parseRange(rangeHeaderValue string, contentLength int64) (start int64, end int64, err error) {
+ // For information about the range header, see:
+ // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Range
+ // https://httpwg.org/specs/rfc7233.html#header.range
+ // https://httpwg.org/specs/rfc7233.html#byte.ranges
+ // https://httpwg.org/specs/rfc7233.html#status.416
+ //
+ // =
+ //
+ // The following ranges are parsed:
+ // "bytes=40-50" (range with given start and end)
+ // "bytes=40-" (range to end of content)
+ // "bytes=-40" (suffix length, offset from end of string)
+ //
+ // The unit MUST be "bytes".
+ parts := strings.SplitN(rangeHeaderValue, "=", 2)
+ if len(parts) != 2 {
+ return 0, 0, fmt.Errorf("expecting `=` in range header, got: %s", rangeHeaderValue)
+ }
+ if parts[0] != "bytes" {
+ return 0, 0, fmt.Errorf("invalid range unit, expecting `bytes`, got: %s", parts[0])
+ }
+ rangeSpec := parts[1]
+ if len(rangeSpec) == 0 {
+ return 0, 0, errors.New("empty range")
+ }
+ if rangeSpec[0] == '-' {
+ offsetFromEnd, err := strconv.ParseInt(rangeSpec, 10, 64)
+ if err != nil {
+ return 0, 0, fmt.Errorf("invalid suffix length, got: %s", rangeSpec)
+ }
+ start = contentLength + offsetFromEnd
+ end = contentLength - 1
+ } else {
+ rangeParts := strings.SplitN(rangeSpec, "-", 2)
+ if len(rangeParts) != 2 {
+ return 0, 0, fmt.Errorf("only one range supported, got: %s", rangeSpec)
+ }
+ start, err = strconv.ParseInt(rangeParts[0], 10, 64)
+ if err != nil {
+ return 0, 0, fmt.Errorf("invalid range start, got: %s", rangeParts[0])
+ }
+ if rangeParts[1] == "" {
+ end = contentLength - 1
+ } else {
+ end, err = strconv.ParseInt(rangeParts[1], 10, 64)
+ if err != nil {
+ return 0, 0, fmt.Errorf("invalid range end, got: %s", rangeParts[1])
}
}
}
- return 0, 0, obj.Content
+ return start, end, nil
+}
+
+func (s *Server) patchObject(r *http.Request) jsonResponse {
+ vars := unescapeMuxVars(mux.Vars(r))
+ bucketName := vars["bucketName"]
+ objectName := vars["objectName"]
+
+ type acls struct {
+ Entity string
+ Role string
+ }
+
+ var payload struct {
+ ContentType string
+ ContentEncoding string
+ ContentDisposition string
+ Metadata map[string]string `json:"metadata"`
+ CustomTime string
+ Acl []acls
+ }
+ err := json.NewDecoder(r.Body).Decode(&payload)
+ if err != nil {
+ return jsonResponse{
+ status: http.StatusBadRequest,
+ errorMessage: "Metadata in the request couldn't decode",
+ }
+ }
+
+ var attrsToUpdate backend.ObjectAttrs
+
+ attrsToUpdate.ContentType = payload.ContentType
+ attrsToUpdate.ContentEncoding = payload.ContentEncoding
+ attrsToUpdate.ContentDisposition = payload.ContentDisposition
+ attrsToUpdate.Metadata = payload.Metadata
+ attrsToUpdate.CustomTime = payload.CustomTime
+
+ if len(payload.Acl) > 0 {
+ attrsToUpdate.ACL = []storage.ACLRule{}
+ for _, aclData := range payload.Acl {
+ newAcl := storage.ACLRule{Entity: storage.ACLEntity(aclData.Entity), Role: storage.ACLRole(aclData.Role)}
+ attrsToUpdate.ACL = append(attrsToUpdate.ACL, newAcl)
+ }
+ }
+
+ backendObj, err := s.backend.PatchObject(bucketName, objectName, attrsToUpdate)
+ if err != nil {
+ return jsonResponse{
+ status: http.StatusNotFound,
+ errorMessage: "Object not found to be PATCHed",
+ }
+ }
+ defer backendObj.Close()
+
+ s.eventManager.Trigger(&backendObj, notification.EventMetadata, nil)
+ return jsonResponse{data: fromBackendObjects([]backend.StreamingObject{backendObj})[0]}
+}
+
+func (s *Server) updateObject(r *http.Request) jsonResponse {
+ vars := unescapeMuxVars(mux.Vars(r))
+ bucketName := vars["bucketName"]
+ objectName := vars["objectName"]
+
+ type acls struct {
+ Entity string
+ Role string
+ }
+
+ var payload struct {
+ Metadata map[string]string `json:"metadata"`
+ ContentType string `json:"contentType"`
+ ContentDisposition string `json:"contentDisposition"`
+ CustomTime string
+ Acl []acls
+ }
+ err := json.NewDecoder(r.Body).Decode(&payload)
+ if err != nil {
+ return jsonResponse{
+ status: http.StatusBadRequest,
+ errorMessage: "Metadata in the request couldn't decode",
+ }
+ }
+
+ var attrsToUpdate backend.ObjectAttrs
+
+ attrsToUpdate.Metadata = payload.Metadata
+ attrsToUpdate.CustomTime = payload.CustomTime
+ attrsToUpdate.ContentType = payload.ContentType
+ attrsToUpdate.ContentDisposition = payload.ContentDisposition
+ if len(payload.Acl) > 0 {
+ attrsToUpdate.ACL = []storage.ACLRule{}
+ for _, aclData := range payload.Acl {
+ newAcl := storage.ACLRule{Entity: storage.ACLEntity(aclData.Entity), Role: storage.ACLRole(aclData.Role)}
+ attrsToUpdate.ACL = append(attrsToUpdate.ACL, newAcl)
+ }
+ }
+ backendObj, err := s.backend.UpdateObject(bucketName, objectName, attrsToUpdate)
+ if err != nil {
+ return jsonResponse{
+ status: http.StatusNotFound,
+ errorMessage: "Object not found to be updated",
+ }
+ }
+ defer backendObj.Close()
+
+ s.eventManager.Trigger(&backendObj, notification.EventMetadata, nil)
+ return jsonResponse{data: fromBackendObjects([]backend.StreamingObject{backendObj})[0]}
+}
+
+func (s *Server) composeObject(r *http.Request) jsonResponse {
+ vars := unescapeMuxVars(mux.Vars(r))
+ bucketName := vars["bucketName"]
+ destinationObject := vars["destinationObject"]
+
+ var composeRequest struct {
+ SourceObjects []struct {
+ Name string
+ }
+ Destination struct {
+ Bucket string
+ ContentType string
+ ContentDisposition string
+ Metadata map[string]string
+ }
+ }
+
+ decoder := json.NewDecoder(r.Body)
+ err := decoder.Decode(&composeRequest)
+ if err != nil {
+ return jsonResponse{
+ status: http.StatusBadRequest,
+ errorMessage: "Error parsing request body",
+ }
+ }
+
+ const maxComposeObjects = 32
+ if len(composeRequest.SourceObjects) > maxComposeObjects {
+ return jsonResponse{
+ status: http.StatusBadRequest,
+ errorMessage: fmt.Sprintf("The number of source components provided (%d) exceeds the maximum (%d)", len(composeRequest.SourceObjects), maxComposeObjects),
+ }
+ }
+
+ sourceNames := make([]string, 0, len(composeRequest.SourceObjects))
+ for _, n := range composeRequest.SourceObjects {
+ sourceNames = append(sourceNames, n.Name)
+ }
+
+ backendObj, err := s.backend.ComposeObject(bucketName, sourceNames, destinationObject, composeRequest.Destination.Metadata, composeRequest.Destination.ContentType)
+ if err != nil {
+ return jsonResponse{
+ status: http.StatusInternalServerError,
+ errorMessage: "Error running compose",
+ }
+ }
+ defer backendObj.Close()
+
+ obj := fromBackendObjects([]backend.StreamingObject{backendObj})[0]
+
+ s.eventManager.Trigger(&backendObj, notification.EventFinalize, nil)
+
+ return jsonResponse{data: newObjectResponse(obj.ObjectAttrs, s.externalURL)}
}
diff --git a/vendor/github.com/fsouza/fake-gcs-server/fakestorage/response.go b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/response.go
index 92164cafb1057..f40dcd3fe9dd7 100644
--- a/vendor/github.com/fsouza/fake-gcs-server/fakestorage/response.go
+++ b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/response.go
@@ -4,73 +4,200 @@
package fakestorage
-import "sort"
+import (
+ "fmt"
+ "net/url"
+ "time"
+
+ "cloud.google.com/go/storage"
+ "github.com/fsouza/fake-gcs-server/internal/backend"
+)
+
+const timestampFormat = "2006-01-02T15:04:05.999999Z07:00"
+
+func formatTime(t time.Time) string {
+ if t.IsZero() {
+ return ""
+ }
+ return t.Format(timestampFormat)
+}
type listResponse struct {
- Kind string `json:"kind"`
- Items []interface{} `json:"items"`
- Prefixes []string `json:"prefixes"`
+ Kind string `json:"kind"`
+ Items []any `json:"items,omitempty"`
+ Prefixes []string `json:"prefixes,omitempty"`
}
-func newListBucketsResponse(bucketNames []string) listResponse {
+func newListBucketsResponse(buckets []backend.Bucket, location string) listResponse {
resp := listResponse{
Kind: "storage#buckets",
- Items: make([]interface{}, len(bucketNames)),
+ Items: make([]any, len(buckets)),
}
- sort.Strings(bucketNames)
- for i, name := range bucketNames {
- resp.Items[i] = newBucketResponse(name)
+ for i, bucket := range buckets {
+ resp.Items[i] = newBucketResponse(bucket, location)
}
return resp
}
type bucketResponse struct {
- Kind string `json:"kind"`
- ID string `json:"id"`
- Name string `json:"name"`
+ Kind string `json:"kind"`
+ ID string `json:"id"`
+ DefaultEventBasedHold bool `json:"defaultEventBasedHold"`
+ Name string `json:"name"`
+ Versioning *bucketVersioning `json:"versioning,omitempty"`
+ TimeCreated string `json:"timeCreated,omitempty"`
+ Updated string `json:"updated,omitempty"`
+ Location string `json:"location,omitempty"`
+ StorageClass string `json:"storageClass,omitempty"`
+ ProjectNumber string `json:"projectNumber"`
+ Metageneration string `json:"metageneration"`
+ Etag string `json:"etag"`
+ LocationType string `json:"locationType"`
}
-func newBucketResponse(bucketName string) bucketResponse {
+type bucketVersioning struct {
+ Enabled bool `json:"enabled"`
+}
+
+func newBucketResponse(bucket backend.Bucket, location string) bucketResponse {
return bucketResponse{
- Kind: "storage#bucket",
- ID: bucketName,
- Name: bucketName,
+ Kind: "storage#bucket",
+ ID: bucket.Name,
+ Name: bucket.Name,
+ DefaultEventBasedHold: bucket.DefaultEventBasedHold,
+ Versioning: &bucketVersioning{bucket.VersioningEnabled},
+ TimeCreated: formatTime(bucket.TimeCreated),
+ Updated: formatTime(bucket.TimeCreated), // not tracking update times yet, reporting `updated` = `timeCreated`
+ Location: location,
+ StorageClass: "STANDARD",
+ ProjectNumber: "0",
+ Metageneration: "1",
+ Etag: "RVRhZw==",
+ LocationType: "region",
}
}
-func newListObjectsResponse(objs []Object, prefixes []string) listResponse {
+func newListObjectsResponse(objs []ObjectAttrs, prefixes []string, externalURL string) listResponse {
resp := listResponse{
Kind: "storage#objects",
- Items: make([]interface{}, len(objs)),
+ Items: make([]any, len(objs)),
Prefixes: prefixes,
}
for i, obj := range objs {
- resp.Items[i] = newObjectResponse(obj)
+ resp.Items[i] = newObjectResponse(obj, externalURL)
}
return resp
}
+// objectAccessControl is copied from the Google SDK to avoid direct
+// dependency.
+type objectAccessControl struct {
+ Bucket string `json:"bucket,omitempty"`
+ Domain string `json:"domain,omitempty"`
+ Email string `json:"email,omitempty"`
+ Entity string `json:"entity,omitempty"`
+ EntityID string `json:"entityId,omitempty"`
+ Etag string `json:"etag,omitempty"`
+ Generation int64 `json:"generation,omitempty,string"`
+ ID string `json:"id,omitempty"`
+ Kind string `json:"kind,omitempty"`
+ Object string `json:"object,omitempty"`
+ ProjectTeam struct {
+ ProjectNumber string `json:"projectNumber,omitempty"`
+ Team string `json:"team,omitempty"`
+ } `json:"projectTeam,omitempty"`
+ Role string `json:"role,omitempty"`
+ SelfLink string `json:"selfLink,omitempty"`
+}
+
type objectResponse struct {
- Kind string `json:"kind"`
- Name string `json:"name"`
- ID string `json:"id"`
- Bucket string `json:"bucket"`
- Size int64 `json:"size,string"`
- // Crc32c: CRC32c checksum, same as in google storage client code
- Crc32c string `json:"crc32c,omitempty"`
- Md5Hash string `json:"md5hash,omitempty"`
+ Kind string `json:"kind"`
+ Name string `json:"name"`
+ ID string `json:"id"`
+ Bucket string `json:"bucket"`
+ Size int64 `json:"size,string"`
+ ContentType string `json:"contentType,omitempty"`
+ ContentEncoding string `json:"contentEncoding,omitempty"`
+ ContentDisposition string `json:"contentDisposition,omitempty"`
+ Crc32c string `json:"crc32c,omitempty"`
+ ACL []*objectAccessControl `json:"acl,omitempty"`
+ Md5Hash string `json:"md5Hash,omitempty"`
+ Etag string `json:"etag,omitempty"`
+ StorageClass string `json:"storageClass"`
+ TimeCreated string `json:"timeCreated,omitempty"`
+ TimeDeleted string `json:"timeDeleted,omitempty"`
+ TimeStorageClassUpdated string `json:"timeStorageClassUpdated,omitempty"`
+ Updated string `json:"updated,omitempty"`
+ Generation int64 `json:"generation,string"`
+ CustomTime string `json:"customTime,omitempty"`
+ Metadata map[string]string `json:"metadata,omitempty"`
+ SelfLink string `json:"selfLink,omitempty"`
+ MediaLink string `json:"mediaLink,omitempty"`
+ Metageneration string `json:"metageneration,omitempty"`
}
-func newObjectResponse(obj Object) objectResponse {
+func newProjectedObjectResponse(obj ObjectAttrs, externalURL string, projection storage.Projection) objectResponse {
+ objResponse := newObjectResponse(obj, externalURL)
+ if projection == storage.ProjectionNoACL {
+ objResponse.ACL = nil
+ }
+ return objResponse
+}
+
+func newObjectResponse(obj ObjectAttrs, externalURL string) objectResponse {
+ acl := getAccessControlsListFromObject(obj)
+
return objectResponse{
- Kind: "storage#object",
- ID: obj.id(),
- Bucket: obj.BucketName,
- Name: obj.Name,
- Size: int64(len(obj.Content)),
- Crc32c: obj.Crc32c,
- Md5Hash: obj.Md5Hash,
+ Kind: "storage#object",
+ ID: obj.id(),
+ Bucket: obj.BucketName,
+ Name: obj.Name,
+ Size: obj.Size,
+ ContentType: obj.ContentType,
+ ContentEncoding: obj.ContentEncoding,
+ ContentDisposition: obj.ContentDisposition,
+ Crc32c: obj.Crc32c,
+ Md5Hash: obj.Md5Hash,
+ Etag: obj.Etag,
+ ACL: acl,
+ StorageClass: "STANDARD",
+ Metadata: obj.Metadata,
+ TimeCreated: formatTime(obj.Created),
+ TimeDeleted: formatTime(obj.Deleted),
+ TimeStorageClassUpdated: formatTime(obj.Updated),
+ Updated: formatTime(obj.Updated),
+ CustomTime: formatTime(obj.CustomTime),
+ Generation: obj.Generation,
+ SelfLink: fmt.Sprintf("%s/storage/v1/b/%s/o/%s", externalURL, url.PathEscape(obj.BucketName), url.PathEscape(obj.Name)),
+ MediaLink: fmt.Sprintf("%s/download/storage/v1/b/%s/o/%s?alt=media", externalURL, url.PathEscape(obj.BucketName), url.PathEscape(obj.Name)),
+ Metageneration: "1",
+ }
+}
+
+type aclListResponse struct {
+ Items []*objectAccessControl `json:"items"`
+}
+
+func newACLListResponse(obj ObjectAttrs) aclListResponse {
+ if len(obj.ACL) == 0 {
+ return aclListResponse{}
+ }
+ return aclListResponse{Items: getAccessControlsListFromObject(obj)}
+}
+
+func getAccessControlsListFromObject(obj ObjectAttrs) []*objectAccessControl {
+ aclItems := make([]*objectAccessControl, len(obj.ACL))
+ for idx, aclRule := range obj.ACL {
+ aclItems[idx] = &objectAccessControl{
+ Bucket: obj.BucketName,
+ Entity: string(aclRule.Entity),
+ Object: obj.Name,
+ Role: string(aclRule.Role),
+ Etag: "RVRhZw==",
+ Kind: "storage#objectAccessControl",
+ }
}
+ return aclItems
}
type rewriteResponse struct {
@@ -82,14 +209,14 @@ type rewriteResponse struct {
Resource objectResponse `json:"resource"`
}
-func newObjectRewriteResponse(obj Object) rewriteResponse {
+func newObjectRewriteResponse(obj ObjectAttrs, externalURL string) rewriteResponse {
return rewriteResponse{
Kind: "storage#rewriteResponse",
- TotalBytesRewritten: int64(len(obj.Content)),
- ObjectSize: int64(len(obj.Content)),
+ TotalBytesRewritten: obj.Size,
+ ObjectSize: obj.Size,
Done: true,
RewriteToken: "",
- Resource: newObjectResponse(obj),
+ Resource: newObjectResponse(obj, externalURL),
}
}
diff --git a/vendor/github.com/fsouza/fake-gcs-server/fakestorage/server.go b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/server.go
index 165d9d7ec2ed4..4283ccf030dc0 100644
--- a/vendor/github.com/fsouza/fake-gcs-server/fakestorage/server.go
+++ b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/server.go
@@ -5,30 +5,52 @@
package fakestorage
import (
+ "bufio"
+ "bytes"
+ "compress/gzip"
"context"
"crypto/tls"
+ "errors"
"fmt"
+ "io"
+ "mime"
+ "mime/multipart"
"net"
"net/http"
"net/http/httptest"
+ "net/http/httputil"
+ "net/textproto"
+ "net/url"
+ "os"
+ "path/filepath"
+ "strings"
"sync"
"cloud.google.com/go/storage"
"github.com/fsouza/fake-gcs-server/internal/backend"
+ "github.com/fsouza/fake-gcs-server/internal/checksum"
+ "github.com/fsouza/fake-gcs-server/internal/notification"
+ "github.com/gorilla/handlers"
"github.com/gorilla/mux"
+ "golang.org/x/oauth2/google"
"google.golang.org/api/option"
)
+const defaultPublicHost = "storage.googleapis.com"
+
// Server is the fake server.
//
// It provides a fake implementation of the Google Cloud Storage API.
type Server struct {
- backend backend.Storage
- uploads map[string]Object
- transport http.RoundTripper
- ts *httptest.Server
- mux *mux.Router
- mtx sync.RWMutex
+ backend backend.Storage
+ uploads sync.Map
+ transport http.RoundTripper
+ ts *httptest.Server
+ handler http.Handler
+ options Options
+ externalURL string
+ publicHost string
+ eventManager notification.EventManager
}
// NewServer creates a new instance of the server, pre-loaded with the given
@@ -41,6 +63,8 @@ func NewServer(objects []Object) *Server {
}
// NewServerWithHostPort creates a new server that listens on a custom host and port
+//
+// Deprecated: use NewServerWithOptions.
func NewServerWithHostPort(objects []Object, host string, port uint16) (*Server, error) {
return NewServerWithOptions(Options{
InitialObjects: objects,
@@ -49,30 +73,106 @@ func NewServerWithHostPort(objects []Object, host string, port uint16) (*Server,
})
}
-// Options are used to configure the server on creation
+type EventManagerOptions = notification.EventManagerOptions
+
+type EventNotificationOptions = notification.EventNotificationOptions
+
+// Options are used to configure the server on creation.
type Options struct {
InitialObjects []Object
StorageRoot string
+ Seed string
+ Scheme string
Host string
Port uint16
// when set to true, the server will not actually start a TCP listener,
// client requests will get processed by an internal mocked transport.
NoListener bool
+
+ // Optional external URL, such as https://gcs.127.0.0.1.nip.io:4443
+ // Returned in the Location header for resumable uploads
+ // The "real" value is https://www.googleapis.com, the JSON API
+ // The default is whatever the server is bound to, such as https://0.0.0.0:4443
+ ExternalURL string
+
+ // Optional URL for public access
+ // An example is "storage.gcs.127.0.0.1.nip.io:4443", which will configure
+ // the server to serve objects at:
+ // https://storage.gcs.127.0.0.1.nip.io:4443//
+ // https://.storage.gcs.127.0.0.1.nip.io:4443>/
+ // If unset, the default is "storage.googleapis.com", the XML API
+ PublicHost string
+
+ // Optional list of headers to add to the CORS header allowlist
+ // An example is "X-Goog-Meta-Uploader", which will allow a
+ // custom metadata header named "X-Goog-Meta-Uploader" to be
+ // sent through the browser
+ AllowedCORSHeaders []string
+
+ // Destination for writing log.
+ Writer io.Writer
+
+ // EventOptions contains the events that should be published and the URL
+ // of the Google cloud function such events should be published to.
+ EventOptions EventManagerOptions
+
+ // Location used for buckets in the server.
+ BucketsLocation string
+
+ CertificateLocation string
+
+ PrivateKeyLocation string
}
-// NewServerWithOptions creates a new server with custom options
+// NewServerWithOptions creates a new server configured according to the
+// provided options.
func NewServerWithOptions(options Options) (*Server, error) {
- s, err := newServer(options.InitialObjects, options.StorageRoot)
+ s, err := newServer(options)
+ if err != nil {
+ return nil, err
+ }
+
+ allowedHeaders := []string{"Content-Type", "Content-Encoding", "Range", "Content-Range"}
+ allowedHeaders = append(allowedHeaders, options.AllowedCORSHeaders...)
+
+ cors := handlers.CORS(
+ handlers.AllowedMethods([]string{
+ http.MethodHead,
+ http.MethodGet,
+ http.MethodPost,
+ http.MethodPut,
+ http.MethodPatch,
+ http.MethodDelete,
+ }),
+ handlers.AllowedHeaders(allowedHeaders),
+ handlers.AllowedOrigins([]string{"*"}),
+ handlers.AllowCredentials(),
+ handlers.ExposedHeaders([]string{"Location"}),
+ )
+
+ s.handler = cors(s.handler)
+ if options.Writer != nil {
+ s.handler = handlers.LoggingHandler(options.Writer, s.handler)
+ }
+ s.handler = requestCompressHandler(s.handler)
+ s.transport = &muxTransport{handler: s.handler}
+
+ s.eventManager, err = notification.NewPubsubEventManager(options.EventOptions, options.Writer)
if err != nil {
return nil, err
}
+
if options.NoListener {
- s.setTransportToMux()
return s, nil
}
- s.ts = httptest.NewUnstartedServer(s.mux)
+ s.ts = httptest.NewUnstartedServer(s.handler)
+ startFunc := s.ts.StartTLS
+ if options.Scheme == "http" {
+ startFunc = s.ts.Start
+ }
+
if options.Port != 0 {
addr := fmt.Sprintf("%s:%d", options.Host, options.Port)
l, err := net.Listen("tcp", addr)
@@ -81,64 +181,255 @@ func NewServerWithOptions(options Options) (*Server, error) {
}
s.ts.Listener.Close()
s.ts.Listener = l
- s.ts.StartTLS()
- } else {
- s.ts.StartTLS()
}
- s.setTransportToAddr(s.ts.Listener.Addr().String())
+ if options.CertificateLocation != "" && options.PrivateKeyLocation != "" {
+ cert, err := tls.LoadX509KeyPair(options.CertificateLocation, options.PrivateKeyLocation)
+ if err != nil {
+ return nil, err
+ }
+ s.ts.TLS = &tls.Config{Certificates: []tls.Certificate{cert}}
+ }
+ startFunc()
+
return s, nil
}
-func newServer(objects []Object, storageRoot string) (*Server, error) {
- backendObjects := toBackendObjects(objects)
+func newServer(options Options) (*Server, error) {
+ if len(options.InitialObjects) > 0 && options.Seed != "" {
+ return nil, errors.New("please provide either a seed directory or a list of initial objects")
+ }
+
+ var backendObjects []backend.StreamingObject
+ if len(options.InitialObjects) > 0 {
+ backendObjects = bufferedObjectsToBackendObjects(options.InitialObjects)
+ }
+
var backendStorage backend.Storage
var err error
- if storageRoot != "" {
- backendStorage, err = backend.NewStorageFS(backendObjects, storageRoot)
+ if options.StorageRoot != "" {
+ backendStorage, err = backend.NewStorageFS(backendObjects, options.StorageRoot)
} else {
- backendStorage = backend.NewStorageMemory(backendObjects)
+ backendStorage, err = backend.NewStorageMemory(backendObjects)
}
if err != nil {
return nil, err
}
+ publicHost := options.PublicHost
+ if publicHost == "" {
+ publicHost = defaultPublicHost
+ }
+
s := Server{
- backend: backendStorage,
- uploads: make(map[string]Object),
+ backend: backendStorage,
+ uploads: sync.Map{},
+ externalURL: options.ExternalURL,
+ publicHost: publicHost,
+ options: options,
+ eventManager: ¬ification.PubsubEventManager{},
}
s.buildMuxer()
+ _, err = s.seed()
+ if err != nil {
+ return nil, err
+ }
return &s, nil
}
-func (s *Server) setTransportToAddr(addr string) {
- // #nosec
- tlsConfig := tls.Config{InsecureSkipVerify: true}
- s.transport = &http.Transport{
- TLSClientConfig: &tlsConfig,
- DialTLS: func(string, string) (net.Conn, error) {
- return tls.Dial("tcp", addr, &tlsConfig)
- },
+func unescapeMuxVars(vars map[string]string) map[string]string {
+ m := make(map[string]string)
+ for k, v := range vars {
+ r, err := url.PathUnescape(v)
+ if err == nil {
+ m[k] = r
+ } else {
+ m[k] = v
+ }
}
+ return m
}
-func (s *Server) setTransportToMux() {
- s.transport = &muxTransport{router: s.mux}
+func (s *Server) buildMuxer() {
+ const apiPrefix = "/storage/v1"
+ handler := mux.NewRouter().SkipClean(true).UseEncodedPath()
+
+ // healthcheck
+ handler.Path("/_internal/healthcheck").Methods(http.MethodGet).HandlerFunc(s.healthcheck)
+
+ routers := []*mux.Router{
+ handler.PathPrefix(apiPrefix).Subrouter(),
+ handler.MatcherFunc(s.publicHostMatcher).PathPrefix(apiPrefix).Subrouter(),
+ }
+
+ for _, r := range routers {
+ r.Path("/b").Methods(http.MethodGet).HandlerFunc(jsonToHTTPHandler(s.listBuckets))
+ r.Path("/b/").Methods(http.MethodGet).HandlerFunc(jsonToHTTPHandler(s.listBuckets))
+ r.Path("/b").Methods(http.MethodPost).HandlerFunc(jsonToHTTPHandler(s.createBucketByPost))
+ r.Path("/b/").Methods(http.MethodPost).HandlerFunc(jsonToHTTPHandler(s.createBucketByPost))
+ r.Path("/b/{bucketName}").Methods(http.MethodGet).HandlerFunc(jsonToHTTPHandler(s.getBucket))
+ r.Path("/b/{bucketName}").Methods(http.MethodPatch).HandlerFunc(jsonToHTTPHandler(s.updateBucket))
+ r.Path("/b/{bucketName}").Methods(http.MethodDelete).HandlerFunc(jsonToHTTPHandler(s.deleteBucket))
+ r.Path("/b/{bucketName}/o").Methods(http.MethodGet).HandlerFunc(jsonToHTTPHandler(s.listObjects))
+ r.Path("/b/{bucketName}/o/").Methods(http.MethodGet).HandlerFunc(jsonToHTTPHandler(s.listObjects))
+ r.Path("/b/{bucketName}/o/{objectName:.+}").Methods(http.MethodPatch).HandlerFunc(jsonToHTTPHandler(s.patchObject))
+ r.Path("/b/{bucketName}/o/{objectName:.+}/acl").Methods(http.MethodGet).HandlerFunc(jsonToHTTPHandler(s.listObjectACL))
+ r.Path("/b/{bucketName}/o/{objectName:.+}/acl").Methods(http.MethodPost).HandlerFunc(jsonToHTTPHandler(s.setObjectACL))
+ r.Path("/b/{bucketName}/o/{objectName:.+}/acl/{entity}").Methods(http.MethodPut).HandlerFunc(jsonToHTTPHandler(s.setObjectACL))
+ r.Path("/b/{bucketName}/o/{objectName:.+}").Methods(http.MethodGet, http.MethodHead).HandlerFunc(s.getObject)
+ r.Path("/b/{bucketName}/o/{objectName:.+}").Methods(http.MethodDelete).HandlerFunc(jsonToHTTPHandler(s.deleteObject))
+ r.Path("/b/{sourceBucket}/o/{sourceObject:.+}/{copyType:rewriteTo|copyTo}/b/{destinationBucket}/o/{destinationObject:.+}").Methods(http.MethodPost).HandlerFunc(jsonToHTTPHandler(s.rewriteObject))
+ r.Path("/b/{bucketName}/o/{destinationObject:.+}/compose").Methods(http.MethodPost).HandlerFunc(jsonToHTTPHandler(s.composeObject))
+ r.Path("/b/{bucketName}/o/{objectName:.+}").Methods(http.MethodPut, http.MethodPost).HandlerFunc(jsonToHTTPHandler(s.updateObject))
+ }
+
+ // Internal / update server configuration
+ handler.Path("/_internal/config").Methods(http.MethodPut).HandlerFunc(jsonToHTTPHandler(s.updateServerConfig))
+ handler.MatcherFunc(s.publicHostMatcher).Path("/_internal/config").Methods(http.MethodPut).HandlerFunc(jsonToHTTPHandler(s.updateServerConfig))
+ handler.Path("/_internal/reseed").Methods(http.MethodPut, http.MethodPost).HandlerFunc(jsonToHTTPHandler(s.reseedServer))
+ // Internal - end
+
+ // XML API
+ xmlApiRouters := []*mux.Router{
+ handler.Host(fmt.Sprintf("{bucketName}.%s", s.publicHost)).Subrouter(),
+ handler.MatcherFunc(s.publicHostMatcher).PathPrefix(`/{bucketName}`).Subrouter(),
+ }
+ for _, r := range xmlApiRouters {
+ r.Path("/").Methods(http.MethodGet).HandlerFunc(xmlToHTTPHandler(s.xmlListObjects))
+ r.Path("").Methods(http.MethodGet).HandlerFunc(xmlToHTTPHandler(s.xmlListObjects))
+ }
+
+ bucketHost := fmt.Sprintf("{bucketName}.%s", s.publicHost)
+ handler.Host(bucketHost).Path("/{objectName:.+}").Methods(http.MethodGet, http.MethodHead).HandlerFunc(s.downloadObject)
+ handler.Path("/download/storage/v1/b/{bucketName}/o/{objectName:.+}").Methods(http.MethodGet, http.MethodHead).HandlerFunc(s.downloadObject)
+ handler.Path("/upload/storage/v1/b/{bucketName}/o").Methods(http.MethodPost).HandlerFunc(jsonToHTTPHandler(s.insertObject))
+ handler.Path("/upload/storage/v1/b/{bucketName}/o/").Methods(http.MethodPost).HandlerFunc(jsonToHTTPHandler(s.insertObject))
+ handler.Path("/upload/storage/v1/b/{bucketName}/o").Methods(http.MethodPut).HandlerFunc(jsonToHTTPHandler(s.uploadFileContent))
+ handler.Path("/upload/storage/v1/b/{bucketName}/o/").Methods(http.MethodPut).HandlerFunc(jsonToHTTPHandler(s.uploadFileContent))
+ handler.Path("/upload/resumable/{uploadId}").Methods(http.MethodPut, http.MethodPost).HandlerFunc(jsonToHTTPHandler(s.uploadFileContent))
+
+ // Batch endpoint
+ handler.MatcherFunc(s.publicHostMatcher).Path("/batch/storage/v1").Methods(http.MethodPost).HandlerFunc(s.handleBatchCall)
+ handler.Path("/batch/storage/v1").Methods(http.MethodPost).HandlerFunc(s.handleBatchCall)
+
+ handler.MatcherFunc(s.publicHostMatcher).Path("/{bucketName}/{objectName:.+}").Methods(http.MethodGet, http.MethodHead).HandlerFunc(s.downloadObject)
+ handler.Host("{bucketName:.+}").Path("/{objectName:.+}").Methods(http.MethodGet, http.MethodHead).HandlerFunc(s.downloadObject)
+
+ // Form Uploads
+ handler.Host(s.publicHost).Path("/{bucketName}").MatcherFunc(matchFormData).Methods(http.MethodPost, http.MethodPut).HandlerFunc(xmlToHTTPHandler(s.insertFormObject))
+ handler.Host(bucketHost).MatcherFunc(matchFormData).Methods(http.MethodPost, http.MethodPut).HandlerFunc(xmlToHTTPHandler(s.insertFormObject))
+
+ // Signed URLs (upload and download)
+ handler.MatcherFunc(s.publicHostMatcher).Path("/{bucketName}/{objectName:.+}").Methods(http.MethodPost, http.MethodPut).HandlerFunc(jsonToHTTPHandler(s.insertObject))
+ handler.MatcherFunc(s.publicHostMatcher).Path("/{bucketName}/{objectName:.+}").Methods(http.MethodGet, http.MethodHead).HandlerFunc(s.getObject)
+ handler.MatcherFunc(s.publicHostMatcher).Path("/{bucketName}/{objectName:.+}").Methods(http.MethodDelete).HandlerFunc(jsonToHTTPHandler(s.deleteObject))
+ handler.Host(bucketHost).Path("/{objectName:.+}").Methods(http.MethodPost, http.MethodPut).HandlerFunc(jsonToHTTPHandler(s.insertObject))
+ handler.Host("{bucketName:.+}").Path("/{objectName:.+}").Methods(http.MethodPost, http.MethodPut).HandlerFunc(jsonToHTTPHandler(s.insertObject))
+
+ s.handler = handler
}
-func (s *Server) buildMuxer() {
- s.mux = mux.NewRouter()
- s.mux.Host("storage.googleapis.com").Path("/{bucketName}/{objectName:.+}").Methods("GET", "HEAD").HandlerFunc(s.downloadObject)
- s.mux.Host("{bucketName}.storage.googleapis.com").Path("/{objectName:.+}").Methods("GET", "HEAD").HandlerFunc(s.downloadObject)
- r := s.mux.PathPrefix("/storage/v1").Subrouter()
- r.Path("/b").Methods("GET").HandlerFunc(s.listBuckets)
- r.Path("/b/{bucketName}").Methods("GET").HandlerFunc(s.getBucket)
- r.Path("/b/{bucketName}/o").Methods("GET").HandlerFunc(s.listObjects)
- r.Path("/b/{bucketName}/o").Methods("POST").HandlerFunc(s.insertObject)
- r.Path("/b/{bucketName}/o/{objectName:.+}").Methods("GET").HandlerFunc(s.getObject)
- r.Path("/b/{bucketName}/o/{objectName:.+}").Methods("DELETE").HandlerFunc(s.deleteObject)
- r.Path("/b/{sourceBucket}/o/{sourceObject:.+}/rewriteTo/b/{destinationBucket}/o/{destinationObject:.+}").HandlerFunc(s.rewriteObject)
- s.mux.Path("/download/storage/v1/b/{bucketName}/o/{objectName}").Methods("GET").HandlerFunc(s.downloadObject)
- s.mux.Path("/upload/storage/v1/b/{bucketName}/o").Methods("POST").HandlerFunc(s.insertObject)
- s.mux.Path("/upload/resumable/{uploadId}").Methods("PUT", "POST").HandlerFunc(s.uploadFileContent)
+func (s *Server) seed() ([]backend.StreamingObject, error) {
+ if s.options.Seed == "" {
+ return nil, nil
+ }
+
+ initialObjects, emptyBuckets := generateObjectsFromFiles(s.options.Seed)
+
+ backendObjects := bufferedObjectsToBackendObjects(initialObjects)
+
+ var err error
+ if s.options.StorageRoot != "" {
+ s.backend, err = backend.NewStorageFS(backendObjects, s.options.StorageRoot)
+ } else {
+ s.backend, err = backend.NewStorageMemory(backendObjects)
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ for _, bucketName := range emptyBuckets {
+ s.CreateBucketWithOpts(CreateBucketOpts{Name: bucketName})
+ }
+ return backendObjects, nil
+}
+
+func (s *Server) reseedServer(r *http.Request) jsonResponse {
+ backendObjects, err := s.seed()
+ if err != nil {
+ return errToJsonResponse(err)
+ }
+
+ return jsonResponse{data: fromBackendObjects(backendObjects)}
+}
+
+func generateObjectsFromFiles(folder string) ([]Object, []string) {
+ var objects []Object
+ var emptyBuckets []string
+ if files, err := os.ReadDir(folder); err == nil {
+ for _, f := range files {
+ if !f.IsDir() {
+ continue
+ }
+ bucketName := f.Name()
+ localBucketPath := filepath.Join(folder, bucketName)
+
+ bucketObjects, err := objectsFromBucket(localBucketPath, bucketName)
+ if err != nil {
+ continue
+ }
+
+ if len(bucketObjects) < 1 {
+ emptyBuckets = append(emptyBuckets, bucketName)
+ }
+ objects = append(objects, bucketObjects...)
+ }
+ }
+ return objects, emptyBuckets
+}
+
+func objectsFromBucket(localBucketPath, bucketName string) ([]Object, error) {
+ var objects []Object
+ err := filepath.Walk(localBucketPath, func(path string, info os.FileInfo, _ error) error {
+ if info.Mode().IsRegular() {
+ // Rel() should never return error since path always descend from localBucketPath
+ relPath, _ := filepath.Rel(localBucketPath, path)
+ objectKey := filepath.ToSlash(relPath)
+ fileContent, err := os.ReadFile(path)
+ if err != nil {
+ return fmt.Errorf("could not read file %q: %w", path, err)
+ }
+ objects = append(objects, Object{
+ ObjectAttrs: ObjectAttrs{
+ ACL: []storage.ACLRule{
+ {
+ Entity: "projectOwner-test-project",
+ Role: "OWNER",
+ },
+ },
+ BucketName: bucketName,
+ Name: objectKey,
+ ContentType: mime.TypeByExtension(filepath.Ext(path)),
+ Crc32c: checksum.EncodedCrc32cChecksum(fileContent),
+ Md5Hash: checksum.EncodedMd5Hash(fileContent),
+ },
+ Content: fileContent,
+ })
+ }
+ return nil
+ })
+ return objects, err
+}
+
+func (s *Server) healthcheck(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusOK)
+}
+
+// publicHostMatcher matches incoming requests against the currently specified server publicHost.
+func (s *Server) publicHostMatcher(r *http.Request, rm *mux.RouteMatch) bool {
+ if strings.Contains(s.publicHost, ":") || !strings.Contains(r.Host, ":") {
+ return r.Host == s.publicHost
+ }
+ idx := strings.IndexByte(r.Host, ':')
+ return r.Host[:idx] == s.publicHost
}
// Stop stops the server, closing all connections.
@@ -153,20 +444,136 @@ func (s *Server) Stop() {
// URL returns the server URL.
func (s *Server) URL() string {
+ if s.externalURL != "" {
+ return s.externalURL
+ }
if s.ts != nil {
return s.ts.URL
}
return ""
}
+// PublicURL returns the server's public download URL.
+func (s *Server) PublicURL() string {
+ return fmt.Sprintf("%s://%s", s.scheme(), s.publicHost)
+}
+
+func (s *Server) Backend() backend.Storage {
+ return s.backend
+}
+
+func (s *Server) scheme() string {
+ if s.options.Scheme == "http" {
+ return "http"
+ }
+ return "https"
+}
+
// HTTPClient returns an HTTP client configured to talk to the server.
func (s *Server) HTTPClient() *http.Client {
return &http.Client{Transport: s.transport}
}
+// HTTPHandler returns an HTTP handler that behaves like GCS.
+func (s *Server) HTTPHandler() http.Handler {
+ return s.handler
+}
+
// Client returns a GCS client configured to talk to the server.
func (s *Server) Client() *storage.Client {
- opt := option.WithHTTPClient(s.HTTPClient())
- client, _ := storage.NewClient(context.Background(), opt)
+ client, err := storage.NewClient(context.Background(), option.WithHTTPClient(s.HTTPClient()), option.WithCredentials(&google.Credentials{}))
+ if err != nil {
+ panic(err)
+ }
return client
}
+
+func (s *Server) handleBatchCall(w http.ResponseWriter, r *http.Request) {
+ reader, err := r.MultipartReader()
+ if err != nil {
+ http.Error(w, "invalid Content-Type header", http.StatusBadRequest)
+ return
+ }
+
+ var b bytes.Buffer
+ mw := multipart.NewWriter(&b)
+ defer mw.Close()
+ w.Header().Set("Content-Type", "multipart/mixed; boundary="+mw.Boundary())
+
+ w.WriteHeader(http.StatusOK)
+ part, err := reader.NextPart()
+ for ; err == nil; part, err = reader.NextPart() {
+ contentID := part.Header.Get("Content-ID")
+ if contentID == "" {
+ // missing content ID, skip
+ continue
+ }
+
+ partHeaders := textproto.MIMEHeader{}
+ partHeaders.Set("Content-Type", "application/http")
+ partHeaders.Set("Content-ID", strings.Replace(contentID, "<", "@,;:\"/[]?=
+// (including space). gsutil likes to use `=` in the boundary, but incorrectly
+// quotes it using single quotes.
+//
+// We do exclude \ and " from the regexp because those are not supported by the
+// mime package.
+//
+// This has been reported to gsutil
+// (https://github.com/GoogleCloudPlatform/gsutil/issues/1466). If that issue
+// ever gets closed, we should be able to get rid of this hack.
+var gsutilBoundary = regexp.MustCompile(`boundary='([^']*[()<>@,;:"/\[\]?= ]+[^']*)'`)
+
type multipartMetadata struct {
- Name string `json:"name"`
+ ContentType string `json:"contentType"`
+ ContentEncoding string `json:"contentEncoding"`
+ ContentDisposition string `json:"contentDisposition"`
+ CacheControl string `json:"cacheControl"`
+ CustomTime time.Time `json:"customTime,omitempty"`
+ Name string `json:"name"`
+ Metadata map[string]string `json:"metadata"`
+}
+
+type contentRange struct {
+ KnownRange bool // Is the range known, or "*"?
+ KnownTotal bool // Is the total known, or "*"?
+ Start int // Start of the range, -1 if unknown
+ End int // End of the range, -1 if unknown
+ Total int // Total bytes expected, -1 if unknown
}
-func (s *Server) insertObject(w http.ResponseWriter, r *http.Request) {
- s.mtx.Lock()
- defer s.mtx.Unlock()
- bucketName := mux.Vars(r)["bucketName"]
- if err := s.backend.GetBucket(bucketName); err != nil {
- w.WriteHeader(http.StatusNotFound)
- err := newErrorResponse(http.StatusNotFound, "Not found", nil)
- json.NewEncoder(w).Encode(err)
- return
+type generationCondition struct {
+ ifGenerationMatch *int64
+ ifGenerationNotMatch *int64
+}
+
+func (c generationCondition) ConditionsMet(activeGeneration int64) bool {
+ if c.ifGenerationMatch != nil && *c.ifGenerationMatch != activeGeneration {
+ return false
+ }
+ if c.ifGenerationNotMatch != nil && *c.ifGenerationNotMatch == activeGeneration {
+ return false
+ }
+ return true
+}
+
+func (s *Server) insertObject(r *http.Request) jsonResponse {
+ bucketName := unescapeMuxVars(mux.Vars(r))["bucketName"]
+
+ if _, err := s.backend.GetBucket(bucketName); err != nil {
+ return jsonResponse{status: http.StatusNotFound}
}
uploadType := r.URL.Query().Get("uploadType")
+ if uploadType == "" && r.Header.Get("X-Goog-Upload-Protocol") == uploadTypeResumable {
+ uploadType = uploadTypeResumable
+ }
+
switch uploadType {
- case "media":
- s.simpleUpload(bucketName, w, r)
- case "multipart":
- s.multipartUpload(bucketName, w, r)
- case "resumable":
- s.resumableUpload(bucketName, w, r)
+ case uploadTypeMedia:
+ return s.simpleUpload(bucketName, r)
+ case uploadTypeMultipart:
+ return s.multipartUpload(bucketName, r)
+ case uploadTypeResumable:
+ return s.resumableUpload(bucketName, r)
default:
- http.Error(w, "invalid uploadType", http.StatusBadRequest)
+ // Support Signed URL Uploads
+ if r.URL.Query().Get("X-Goog-Algorithm") != "" {
+ switch r.Method {
+ case http.MethodPost:
+ return s.resumableUpload(bucketName, r)
+ case http.MethodPut:
+ return s.signedUpload(bucketName, r)
+ }
+ }
+ return jsonResponse{errorMessage: "invalid uploadType", status: http.StatusBadRequest}
}
}
-func (s *Server) simpleUpload(bucketName string, w http.ResponseWriter, r *http.Request) {
- defer r.Body.Close()
- name := r.URL.Query().Get("name")
+func (s *Server) insertFormObject(r *http.Request) xmlResponse {
+ bucketName := unescapeMuxVars(mux.Vars(r))["bucketName"]
+
+ if err := r.ParseMultipartForm(32 << 20); nil != err {
+ return xmlResponse{errorMessage: "invalid form", status: http.StatusBadRequest}
+ }
+
+ // Load metadata
+ var name string
+ if keys, ok := r.MultipartForm.Value["key"]; ok {
+ name = keys[0]
+ }
if name == "" {
- http.Error(w, "name is required for simple uploads", http.StatusBadRequest)
- return
+ return xmlResponse{errorMessage: "missing key", status: http.StatusBadRequest}
+ }
+ var predefinedACL string
+ if acls, ok := r.MultipartForm.Value["acl"]; ok {
+ predefinedACL = acls[0]
+ }
+ var contentEncoding string
+ if contentEncodings, ok := r.MultipartForm.Value["Content-Encoding"]; ok {
+ contentEncoding = contentEncodings[0]
+ }
+ var contentType string
+ if contentTypes, ok := r.MultipartForm.Value["Content-Type"]; ok {
+ contentType = contentTypes[0]
+ }
+ successActionStatus := http.StatusNoContent
+ if successActionStatuses, ok := r.MultipartForm.Value["success_action_status"]; ok {
+ successInt, err := strconv.Atoi(successActionStatuses[0])
+ if err != nil {
+ return xmlResponse{errorMessage: err.Error(), status: http.StatusBadRequest}
+ }
+ if successInt != http.StatusOK && successInt != http.StatusCreated && successInt != http.StatusNoContent {
+ return xmlResponse{errorMessage: "invalid success action status", status: http.StatusBadRequest}
+ }
+ successActionStatus = successInt
+ }
+ metaData := make(map[string]string)
+ for key := range r.MultipartForm.Value {
+ lowerKey := strings.ToLower(key)
+ if metaDataKey := strings.TrimPrefix(lowerKey, "x-goog-meta-"); metaDataKey != lowerKey {
+ metaData[metaDataKey] = r.MultipartForm.Value[key][0]
+ }
+ }
+
+ // Load file
+ var file *multipart.FileHeader
+ if files, ok := r.MultipartForm.File["file"]; ok {
+ file = files[0]
+ }
+ if file == nil {
+ return xmlResponse{errorMessage: "missing file", status: http.StatusBadRequest}
}
- data, err := ioutil.ReadAll(r.Body)
+ infile, err := file.Open()
if err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- return
+ return xmlResponse{errorMessage: err.Error()}
}
- obj := Object{BucketName: bucketName, Name: name, Content: data, Crc32c: encodedCrc32cChecksum(data), Md5Hash: encodedMd5Hash(data)}
- err = s.createObject(obj)
+ obj := StreamingObject{
+ ObjectAttrs: ObjectAttrs{
+ BucketName: bucketName,
+ Name: name,
+ ContentType: contentType,
+ ContentEncoding: contentEncoding,
+ ACL: getObjectACL(predefinedACL),
+ Metadata: metaData,
+ },
+ Content: infile,
+ }
+ obj, err = s.createObject(obj, backend.NoConditions{})
if err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- return
+ return xmlResponse{errorMessage: err.Error()}
}
- w.WriteHeader(http.StatusOK)
- json.NewEncoder(w).Encode(obj)
+ defer obj.Close()
+
+ if successActionStatus == 201 {
+ objectURI := fmt.Sprintf("%s/%s%s", s.URL(), bucketName, name)
+ xmlBody := createXmlResponseBody(bucketName, obj.Etag, strings.TrimPrefix(name, "/"), objectURI)
+ return xmlResponse{status: successActionStatus, data: xmlBody}
+ }
+ return xmlResponse{status: successActionStatus}
}
-var crc32cTable = crc32.MakeTable(crc32.Castagnoli)
+func (s *Server) wrapUploadPreconditions(r *http.Request, bucketName string, objectName string) (generationCondition, error) {
+ result := generationCondition{
+ ifGenerationMatch: nil,
+ ifGenerationNotMatch: nil,
+ }
+ ifGenerationMatch := r.URL.Query().Get("ifGenerationMatch")
+
+ if ifGenerationMatch != "" {
+ gen, err := strconv.ParseInt(ifGenerationMatch, 10, 64)
+ if err != nil {
+ return generationCondition{}, err
+ }
+ result.ifGenerationMatch = &gen
+ }
+
+ ifGenerationNotMatch := r.URL.Query().Get("ifGenerationNotMatch")
-func crc32cChecksum(content []byte) []byte {
- checksummer := crc32.New(crc32cTable)
- checksummer.Write(content)
- return checksummer.Sum(make([]byte, 0, 4))
+ if ifGenerationNotMatch != "" {
+ gen, err := strconv.ParseInt(ifGenerationNotMatch, 10, 64)
+ if err != nil {
+ return generationCondition{}, err
+ }
+ result.ifGenerationNotMatch = &gen
+ }
+
+ return result, nil
}
-func encodedChecksum(checksum []byte) string {
- return base64.StdEncoding.EncodeToString(checksum)
+func (s *Server) simpleUpload(bucketName string, r *http.Request) jsonResponse {
+ defer r.Body.Close()
+ name := r.URL.Query().Get("name")
+ predefinedACL := r.URL.Query().Get("predefinedAcl")
+ contentEncoding := r.URL.Query().Get("contentEncoding")
+ customTime := r.URL.Query().Get("customTime")
+ if name == "" {
+ return jsonResponse{
+ status: http.StatusBadRequest,
+ errorMessage: "name is required for simple uploads",
+ }
+ }
+ obj := StreamingObject{
+ ObjectAttrs: ObjectAttrs{
+ BucketName: bucketName,
+ Name: name,
+ ContentType: r.Header.Get(contentTypeHeader),
+ CacheControl: r.Header.Get(cacheControlHeader),
+ ContentEncoding: contentEncoding,
+ CustomTime: convertTimeWithoutError(customTime),
+ ACL: getObjectACL(predefinedACL),
+ },
+ Content: notImplementedSeeker{r.Body},
+ }
+ obj, err := s.createObject(obj, backend.NoConditions{})
+ if err != nil {
+ return errToJsonResponse(err)
+ }
+ obj.Close()
+ return jsonResponse{data: newObjectResponse(obj.ObjectAttrs, s.externalURL)}
}
-func encodedCrc32cChecksum(content []byte) string {
- return encodedChecksum(crc32cChecksum(content))
+type notImplementedSeeker struct {
+ io.ReadCloser
}
-func md5Hash(b []byte) []byte {
- /* #nosec G401 */
- h := md5.New()
- h.Write(b)
- return h.Sum(nil)
+func (s notImplementedSeeker) Seek(offset int64, whence int) (int64, error) {
+ return 0, errors.New("not implemented")
}
-func encodedHash(hash []byte) string {
- return base64.StdEncoding.EncodeToString(hash)
+func (s *Server) signedUpload(bucketName string, r *http.Request) jsonResponse {
+ defer r.Body.Close()
+ name := unescapeMuxVars(mux.Vars(r))["objectName"]
+ predefinedACL := r.URL.Query().Get("predefinedAcl")
+ contentEncoding := r.URL.Query().Get("contentEncoding")
+ customTime := r.URL.Query().Get("customTime")
+
+ // Load data from HTTP Headers
+ if contentEncoding == "" {
+ contentEncoding = r.Header.Get("Content-Encoding")
+ }
+
+ metaData := make(map[string]string)
+ for key := range r.Header {
+ lowerKey := strings.ToLower(key)
+ if metaDataKey := strings.TrimPrefix(lowerKey, "x-goog-meta-"); metaDataKey != lowerKey {
+ metaData[metaDataKey] = r.Header.Get(key)
+ }
+ }
+
+ obj := StreamingObject{
+ ObjectAttrs: ObjectAttrs{
+ BucketName: bucketName,
+ Name: name,
+ ContentType: r.Header.Get(contentTypeHeader),
+ ContentEncoding: contentEncoding,
+ CustomTime: convertTimeWithoutError(customTime),
+ ACL: getObjectACL(predefinedACL),
+ Metadata: metaData,
+ },
+ Content: notImplementedSeeker{r.Body},
+ }
+ obj, err := s.createObject(obj, backend.NoConditions{})
+ if err != nil {
+ return errToJsonResponse(err)
+ }
+ obj.Close()
+ return jsonResponse{data: newObjectResponse(obj.ObjectAttrs, s.externalURL)}
}
-func encodedMd5Hash(content []byte) string {
- return encodedHash(md5Hash(content))
+func getObjectACL(predefinedACL string) []storage.ACLRule {
+ if predefinedACL == "publicRead" {
+ return []storage.ACLRule{
+ {
+ Entity: "allUsers",
+ Role: "READER",
+ },
+ }
+ }
+
+ return []storage.ACLRule{
+ {
+ Entity: "projectOwner-test-project",
+ Role: "OWNER",
+ },
+ }
}
-func (s *Server) multipartUpload(bucketName string, w http.ResponseWriter, r *http.Request) {
+func (s *Server) multipartUpload(bucketName string, r *http.Request) jsonResponse {
defer r.Body.Close()
- _, params, err := mime.ParseMediaType(r.Header.Get("Content-Type"))
+ params, err := parseContentTypeParams(r.Header.Get(contentTypeHeader))
if err != nil {
- http.Error(w, "invalid Content-Type header", http.StatusBadRequest)
- return
+ return jsonResponse{
+ status: http.StatusBadRequest,
+ errorMessage: "invalid Content-Type header",
+ }
}
var (
metadata *multipartMetadata
content []byte
)
+ var contentType string
reader := multipart.NewReader(r.Body, params["boundary"])
+
+ var partReaders []io.Reader
+
part, err := reader.NextPart()
for ; err == nil; part, err = reader.NextPart() {
if metadata == nil {
metadata, err = loadMetadata(part)
+ contentType = metadata.ContentType
} else {
+ contentType = part.Header.Get(contentTypeHeader)
content, err = loadContent(part)
+ partReaders = append(partReaders, bytes.NewReader(content))
}
if err != nil {
break
}
}
if err != io.EOF {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- return
+ return jsonResponse{errorMessage: err.Error()}
+ }
+
+ objName := r.URL.Query().Get("name")
+ predefinedACL := r.URL.Query().Get("predefinedAcl")
+ if objName == "" {
+ objName = metadata.Name
}
- obj := Object{BucketName: bucketName, Name: metadata.Name, Content: content, Crc32c: encodedCrc32cChecksum(content), Md5Hash: encodedMd5Hash(content)}
- err = s.createObject(obj)
+
+ conditions, err := s.wrapUploadPreconditions(r, bucketName, objName)
if err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- return
+ return jsonResponse{
+ status: http.StatusBadRequest,
+ errorMessage: err.Error(),
+ }
+ }
+
+ obj := StreamingObject{
+ ObjectAttrs: ObjectAttrs{
+ BucketName: bucketName,
+ Name: objName,
+ ContentType: contentType,
+ CacheControl: metadata.CacheControl,
+ ContentEncoding: metadata.ContentEncoding,
+ ContentDisposition: metadata.ContentDisposition,
+ CustomTime: metadata.CustomTime,
+ ACL: getObjectACL(predefinedACL),
+ Metadata: metadata.Metadata,
+ },
+ Content: notImplementedSeeker{io.NopCloser(io.MultiReader(partReaders...))},
}
- w.WriteHeader(http.StatusOK)
- json.NewEncoder(w).Encode(obj)
+
+ obj, err = s.createObject(obj, conditions)
+ if err != nil {
+ return errToJsonResponse(err)
+ }
+ defer obj.Close()
+ return jsonResponse{data: newObjectResponse(obj.ObjectAttrs, s.externalURL)}
}
-func (s *Server) resumableUpload(bucketName string, w http.ResponseWriter, r *http.Request) {
- objName := r.URL.Query().Get("name")
- if objName == "" {
- metadata, err := loadMetadata(r.Body)
+func parseContentTypeParams(requestContentType string) (map[string]string, error) {
+ requestContentType = gsutilBoundary.ReplaceAllString(requestContentType, `boundary="$1"`)
+ _, params, err := mime.ParseMediaType(requestContentType)
+ return params, err
+}
+
+func (s *Server) resumableUpload(bucketName string, r *http.Request) jsonResponse {
+ if r.URL.Query().Has("upload_id") {
+ return s.uploadFileContent(r)
+ }
+ predefinedACL := r.URL.Query().Get("predefinedAcl")
+ contentEncoding := r.URL.Query().Get("contentEncoding")
+ metadata := new(multipartMetadata)
+ if r.Body != http.NoBody {
+ var err error
+ metadata, err = loadMetadata(r.Body)
if err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- return
+ return jsonResponse{errorMessage: err.Error()}
}
+ }
+ objName := r.URL.Query().Get("name")
+ if objName == "" {
objName = metadata.Name
}
- obj := Object{BucketName: bucketName, Name: objName}
+ if contentEncoding == "" {
+ contentEncoding = metadata.ContentEncoding
+ }
+ obj := Object{
+ ObjectAttrs: ObjectAttrs{
+ BucketName: bucketName,
+ Name: objName,
+ ContentType: metadata.ContentType,
+ CacheControl: metadata.CacheControl,
+ ContentEncoding: contentEncoding,
+ CustomTime: metadata.CustomTime,
+ ACL: getObjectACL(predefinedACL),
+ Metadata: metadata.Metadata,
+ },
+ }
uploadID, err := generateUploadID()
if err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- return
+ return jsonResponse{errorMessage: err.Error()}
+ }
+ s.uploads.Store(uploadID, obj)
+ header := make(http.Header)
+ location := fmt.Sprintf(
+ "%s/upload/storage/v1/b/%s/o?uploadType=resumable&name=%s&upload_id=%s",
+ s.URL(),
+ bucketName,
+ url.PathEscape(objName),
+ uploadID,
+ )
+ header.Set("Location", location)
+ if r.Header.Get("X-Goog-Upload-Command") == "start" {
+ header.Set("X-Goog-Upload-URL", location)
+ header.Set("X-Goog-Upload-Status", "active")
+ }
+ return jsonResponse{
+ data: newObjectResponse(obj.ObjectAttrs, s.externalURL),
+ header: header,
}
- s.uploads[uploadID] = obj
- w.Header().Set("Location", s.URL()+"/upload/resumable/"+uploadID)
- w.WriteHeader(http.StatusOK)
- json.NewEncoder(w).Encode(obj)
}
-func (s *Server) uploadFileContent(w http.ResponseWriter, r *http.Request) {
- uploadID := mux.Vars(r)["uploadId"]
- s.mtx.Lock()
- defer s.mtx.Unlock()
- obj, ok := s.uploads[uploadID]
+// uploadFileContent accepts a chunk of a resumable upload
+//
+// A resumable upload is sent in one or more chunks. The request's
+// "Content-Range" header is used to determine if more data is expected.
+//
+// When sending streaming content, the total size is unknown until the stream
+// is exhausted. The Go client always sends streaming content. The sequence of
+// "Content-Range" headers for 2600-byte content sent in 1000-byte chunks are:
+//
+// Content-Range: bytes 0-999/*
+// Content-Range: bytes 1000-1999/*
+// Content-Range: bytes 2000-2599/*
+// Content-Range: bytes */2600
+//
+// When sending chunked content of a known size, the total size is sent as
+// well. The Python client uses this method to upload files and in-memory
+// content. The sequence of "Content-Range" headers for the 2600-byte content
+// sent in 1000-byte chunks are:
+//
+// Content-Range: bytes 0-999/2600
+// Content-Range: bytes 1000-1999/2600
+// Content-Range: bytes 2000-2599/2600
+//
+// The server collects the content, analyzes the "Content-Range", and returns a
+// "308 Permanent Redirect" response if more chunks are expected, and a
+// "200 OK" response if the upload is complete (the Go client also accepts a
+// "201 Created" response). The "Range" header in the response should be set to
+// the size of the content received so far, such as:
+//
+// Range: bytes 0-2000
+//
+// The client (such as the Go client) can send a header "X-Guploader-No-308" if
+// it can't process a native "308 Permanent Redirect". The in-process response
+// then has a status of "200 OK", with a header "X-Http-Status-Code-Override"
+// set to "308".
+func (s *Server) uploadFileContent(r *http.Request) jsonResponse {
+ uploadID := r.URL.Query().Get("upload_id")
+ rawObj, ok := s.uploads.Load(uploadID)
if !ok {
- http.Error(w, "upload not found", http.StatusNotFound)
- return
+ return jsonResponse{status: http.StatusNotFound}
}
+ obj := rawObj.(Object)
+ // TODO: stream upload file content to and from disk (when using the FS
+ // backend, at least) instead of loading the entire content into memory.
content, err := loadContent(r.Body)
if err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- return
+ return jsonResponse{errorMessage: err.Error()}
}
commit := true
- status := http.StatusCreated
- objLength := len(obj.Content)
+ status := http.StatusOK
obj.Content = append(obj.Content, content...)
- obj.Crc32c = encodedCrc32cChecksum(obj.Content)
- obj.Md5Hash = encodedMd5Hash(obj.Content)
+ obj.Crc32c = checksum.EncodedCrc32cChecksum(obj.Content)
+ obj.Md5Hash = checksum.EncodedMd5Hash(obj.Content)
+ obj.Etag = obj.Md5Hash
+ contentTypeHeader := r.Header.Get(contentTypeHeader)
+ if contentTypeHeader != "" {
+ obj.ContentType = contentTypeHeader
+ } else {
+ obj.ContentType = "application/octet-stream"
+ }
+ responseHeader := make(http.Header)
if contentRange := r.Header.Get("Content-Range"); contentRange != "" {
- commit, err = parseRange(contentRange, objLength, len(content), w)
+ parsed, err := parseContentRange(contentRange)
if err != nil {
- http.Error(w, err.Error(), http.StatusBadRequest)
- return
+ return jsonResponse{errorMessage: err.Error(), status: http.StatusBadRequest}
+ }
+ if parsed.KnownRange {
+ // Middle of streaming request, or any part of chunked request
+ responseHeader.Set("Range", fmt.Sprintf("bytes=0-%d", parsed.End))
+ // Complete if the range covers the known total
+ commit = parsed.KnownTotal && (parsed.End+1 >= parsed.Total)
+ } else {
+ // End of a streaming request
+ responseHeader.Set("Range", fmt.Sprintf("bytes=0-%d", len(obj.Content)))
}
}
if commit {
- delete(s.uploads, uploadID)
- err = s.createObject(obj)
+ s.uploads.Delete(uploadID)
+ streamingObject, err := s.createObject(obj.StreamingObject(), backend.NoConditions{})
+ if err != nil {
+ return errToJsonResponse(err)
+ }
+ defer streamingObject.Close()
+ obj, err = streamingObject.BufferedObject()
if err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- return
+ return errToJsonResponse(err)
}
} else {
- status = http.StatusOK
- w.Header().Set("X-Http-Status-Code-Override", "308")
- s.uploads[uploadID] = obj
- }
- data, _ := json.Marshal(obj)
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("Content-Length", strconv.Itoa(len(data)))
- w.WriteHeader(status)
- w.Write(data)
+ if _, no308 := r.Header["X-Guploader-No-308"]; no308 {
+ // Go client
+ responseHeader.Set("X-Http-Status-Code-Override", "308")
+ } else {
+ // Python client
+ status = http.StatusPermanentRedirect
+ }
+ s.uploads.Store(uploadID, obj)
+ }
+ if r.Header.Get("X-Goog-Upload-Command") == "upload, finalize" {
+ responseHeader.Set("X-Goog-Upload-Status", "final")
+ }
+ return jsonResponse{
+ status: status,
+ data: newObjectResponse(obj.ObjectAttrs, s.externalURL),
+ header: responseHeader,
+ }
}
-func parseRange(r string, objLength, bodyLength int, w http.ResponseWriter) (finished bool, err error) {
+// Parse a Content-Range header
+// Some possible valid header values:
+//
+// bytes 0-1023/4096 (first 1024 bytes of a 4096-byte document)
+// bytes 1024-2047/* (second 1024 bytes of a streaming document)
+// bytes */4096 (The end of 4096 byte streaming document)
+// bytes 0-*/* (start and end of a streaming document as sent by nodeJS client lib)
+// bytes */* (start and end of a streaming document as sent by the C++ SDK)
+func parseContentRange(r string) (parsed contentRange, err error) {
invalidErr := fmt.Errorf("invalid Content-Range: %v", r)
+
+ // Require that units == "bytes"
const bytesPrefix = "bytes "
- var contentLength int
if !strings.HasPrefix(r, bytesPrefix) {
- return false, invalidErr
+ return parsed, invalidErr
}
+
+ // Split range from total length
parts := strings.SplitN(r[len(bytesPrefix):], "/", 2)
if len(parts) != 2 {
- return false, invalidErr
+ return parsed, invalidErr
}
- var rangeStart, rangeEnd int
+ // Process range
if parts[0] == "*" {
- rangeStart = objLength
- rangeEnd = objLength + bodyLength
+ parsed.Start = -1
+ parsed.End = -1
} else {
rangeParts := strings.SplitN(parts[0], "-", 2)
if len(rangeParts) != 2 {
- return false, invalidErr
+ return parsed, invalidErr
}
- rangeStart, err = strconv.Atoi(rangeParts[0])
+
+ parsed.Start, err = strconv.Atoi(rangeParts[0])
if err != nil {
- return false, invalidErr
+ return parsed, invalidErr
}
- rangeEnd, err = strconv.Atoi(rangeParts[1])
- if err != nil {
- return false, invalidErr
+
+ if rangeParts[1] == "*" {
+ parsed.End = -1
+ } else {
+ parsed.KnownRange = true
+ parsed.End, err = strconv.Atoi(rangeParts[1])
+ if err != nil {
+ return parsed, invalidErr
+ }
}
}
- contentLength = objLength + bodyLength
- finished = rangeEnd == contentLength
- w.Header().Set("Range", fmt.Sprintf("bytes=%d-%d", rangeStart, rangeEnd))
+ // Process total length
+ if parts[1] == "*" {
+ parsed.Total = -1
+ } else {
+ parsed.KnownTotal = true
+ parsed.Total, err = strconv.Atoi(parts[1])
+ if err != nil {
+ return parsed, invalidErr
+ }
+ }
- return finished, nil
+ return parsed, nil
}
func loadMetadata(rc io.ReadCloser) (*multipartMetadata, error) {
@@ -255,7 +639,7 @@ func loadMetadata(rc io.ReadCloser) (*multipartMetadata, error) {
func loadContent(rc io.ReadCloser) ([]byte, error) {
defer rc.Close()
- return ioutil.ReadAll(rc)
+ return io.ReadAll(rc)
}
func generateUploadID() (string, error) {
diff --git a/vendor/github.com/fsouza/fake-gcs-server/fakestorage/xml_response.go b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/xml_response.go
new file mode 100644
index 0000000000000..50d9661df84c8
--- /dev/null
+++ b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/xml_response.go
@@ -0,0 +1,123 @@
+package fakestorage
+
+import (
+ "encoding/xml"
+ "net/http"
+ "strings"
+)
+
+type xmlResponse struct {
+ status int
+ header http.Header
+ data any
+ errorMessage string
+}
+
+type xmlResponseBody struct {
+ XMLName xml.Name `xml:"PostResponse"`
+ Bucket string
+ Etag struct {
+ Value string `xml:",innerxml"`
+ }
+ Key string
+ Location string
+}
+
+type ListBucketResult struct {
+ XMLName xml.Name `xml:"ListBucketResult"`
+ Name string `xml:"Name"`
+ CommonPrefixes []CommonPrefix `xml:"CommonPrefixes,omitempty"`
+ Delimiter string `xml:"Delimiter"`
+ Prefix string `xml:"Prefix"`
+ KeyCount int `xml:"KeyCount"`
+ Contents []Contents `xml:"Contents"`
+}
+
+type Contents struct {
+ XMLName xml.Name `xml:"Contents"`
+ Key string `xml:"Key"`
+ Generation int64 `xml:"Generation"`
+ LastModified string `xml:"LastModified"`
+ ETag ETag
+ Size int64 `xml:"Size"`
+}
+
+type CommonPrefix struct {
+ Prefix string `xml:"Prefix"`
+}
+
+type ETag struct {
+ Value string `xml:",innerxml"`
+}
+
+func (e *ETag) Equals(etag string) bool {
+ trim := func(s string) string {
+ return strings.TrimPrefix(strings.TrimSuffix(s, "\""), "\"")
+ }
+ return trim(e.Value) == trim(etag)
+}
+
+type xmlHandler = func(r *http.Request) xmlResponse
+
+func xmlToHTTPHandler(h xmlHandler) http.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) {
+ resp := h(r)
+ w.Header().Set("Content-Type", "application/xml")
+ for name, values := range resp.header {
+ for _, value := range values {
+ w.Header().Add(name, value)
+ }
+ }
+
+ status := resp.getStatus()
+ var data any
+ if status > 399 {
+ data = newErrorResponse(status, resp.getErrorMessage(status), nil)
+ } else {
+ data = resp.data
+ }
+
+ w.WriteHeader(status)
+
+ dataBytes, ok := data.([]byte)
+ if ok {
+ w.Write(dataBytes)
+ } else {
+ xml.NewEncoder(w).Encode(data)
+ }
+ }
+}
+
+func createXmlResponseBody(bucketName, etag, key, location string) []byte {
+ responseBody := xmlResponseBody{
+ Bucket: bucketName,
+ Etag: struct {
+ Value string `xml:",innerxml"`
+ }{etag},
+ Location: location,
+ Key: key,
+ }
+ x, err := xml.Marshal(responseBody)
+ if err != nil {
+ return nil
+ }
+
+ return []byte(xml.Header + string(x))
+}
+
+func (r *xmlResponse) getStatus() int {
+ if r.status > 0 {
+ return r.status
+ }
+ if r.errorMessage != "" {
+ return http.StatusInternalServerError
+ }
+ return http.StatusOK
+}
+
+func (r *xmlResponse) getErrorMessage(status int) string {
+ if r.errorMessage != "" {
+ return r.errorMessage
+ }
+ return http.StatusText(status)
+}
diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/bucket.go b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/bucket.go
new file mode 100644
index 0000000000000..e56a7aa7950a5
--- /dev/null
+++ b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/bucket.go
@@ -0,0 +1,22 @@
+// Copyright 2019 Francisco Souza. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package backend
+
+import "time"
+
+// Bucket represents the bucket that is stored within the fake server.
+type Bucket struct {
+ Name string
+ VersioningEnabled bool
+ TimeCreated time.Time
+ DefaultEventBasedHold bool
+}
+
+const bucketMetadataSuffix = ".bucketMetadata"
+
+type BucketAttrs struct {
+ DefaultEventBasedHold bool
+ VersioningEnabled bool
+}
diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/fs.go b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/fs.go
index 24b1b2cb9437e..c96867d802df6 100644
--- a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/fs.go
+++ b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/fs.go
@@ -5,129 +5,465 @@
package backend
import (
+ "bytes"
"encoding/json"
+ "errors"
"fmt"
- "io/ioutil"
+ "io"
+ "io/fs"
"net/url"
"os"
- "path"
"path/filepath"
"strings"
+ "sync"
+ "syscall"
+ "time"
+
+ "github.com/fsouza/fake-gcs-server/internal/checksum"
+ "github.com/pkg/xattr"
)
-// StorageFS is an implementation of the backend storage that stores data on disk
+// storageFS is an implementation of the backend storage that stores data on disk
+//
// The layout is the following:
+//
// - rootDir
-// |- bucket1
-// \- bucket2
-// |- object1
-// \- object2
+//
+// |- bucket1
+// \- bucket2
+// |- object1
+// \- object2
+//
// Bucket and object names are url path escaped, so there's no special meaning of forward slashes.
-type StorageFS struct {
+type storageFS struct {
rootDir string
+ mtx sync.RWMutex
+ mh metadataHandler
}
-// NewStorageFS creates an instance of StorageMemory
-func NewStorageFS(objects []Object, rootDir string) (Storage, error) {
+// NewStorageFS creates an instance of the filesystem-backed storage backend.
+func NewStorageFS(objects []StreamingObject, rootDir string) (Storage, error) {
if !strings.HasSuffix(rootDir, "/") {
rootDir += "/"
}
- s := &StorageFS{
- rootDir: rootDir,
+ err := os.MkdirAll(rootDir, 0o700)
+ if err != nil {
+ return nil, err
}
+
+ var mh metadataHandler = metadataFile{}
+ // Use xattr for metadata if rootDir supports it.
+ if xattr.XATTR_SUPPORTED {
+ xattrHandler := metadataXattr{}
+ var xerr *xattr.Error
+ _, err = xattrHandler.read(rootDir)
+ if err == nil || (errors.As(err, &xerr) && xerr.Err == xattr.ENOATTR) {
+ mh = xattrHandler
+ }
+ }
+
+ s := &storageFS{rootDir: rootDir, mh: mh}
for _, o := range objects {
- err := s.CreateObject(o)
+ obj, err := s.CreateObject(o, NoConditions{})
if err != nil {
return nil, err
}
+ obj.Close()
}
return s, nil
}
-// CreateBucket creates a bucket
-func (s *StorageFS) CreateBucket(name string) error {
- return os.MkdirAll(filepath.Join(s.rootDir, url.PathEscape(name)), 0700)
+// CreateBucket creates a bucket in the fs backend. A bucket is a folder in the
+// root directory.
+func (s *storageFS) CreateBucket(name string, bucketAttrs BucketAttrs) error {
+ s.mtx.Lock()
+ defer s.mtx.Unlock()
+ return s.createBucket(name, bucketAttrs)
+}
+
+func (s *storageFS) createBucket(name string, bucketAttrs BucketAttrs) error {
+ if bucketAttrs.VersioningEnabled {
+ return errors.New("not implemented: fs storage type does not support versioning yet")
+ }
+ path := filepath.Join(s.rootDir, url.PathEscape(name))
+ err := os.MkdirAll(path, 0o700)
+ if err != nil {
+ return err
+ }
+ encoded, err := json.Marshal(bucketAttrs)
+ if err != nil {
+ return err
+ }
+ return writeFile(path+bucketMetadataSuffix, encoded, 0o600)
}
-// ListBuckets lists buckets
-func (s *StorageFS) ListBuckets() ([]string, error) {
- infos, err := ioutil.ReadDir(s.rootDir)
+// ListBuckets returns a list of buckets from the list of directories in the
+// root directory.
+func (s *storageFS) ListBuckets() ([]Bucket, error) {
+ s.mtx.RLock()
+ defer s.mtx.RUnlock()
+ infos, err := os.ReadDir(s.rootDir)
if err != nil {
return nil, err
}
- buckets := []string{}
+ buckets := []Bucket{}
for _, info := range infos {
if info.IsDir() {
unescaped, err := url.PathUnescape(info.Name())
if err != nil {
- return nil, fmt.Errorf("failed to unescape object name %s: %s", info.Name(), err)
+ return nil, fmt.Errorf("failed to unescape object name %s: %w", info.Name(), err)
+ }
+ fileInfo, err := info.Info()
+ if err != nil {
+ return nil, fmt.Errorf("failed to get file info for %s: %w", info.Name(), err)
}
- buckets = append(buckets, unescaped)
+ buckets = append(buckets, Bucket{Name: unescaped, TimeCreated: timespecToTime(createTimeFromFileInfo(fileInfo))})
}
}
return buckets, nil
}
-// GetBucket checks if a bucket exists
-func (s *StorageFS) GetBucket(name string) error {
- _, err := os.Stat(filepath.Join(s.rootDir, url.PathEscape(name)))
- return err
+func timespecToTime(ts syscall.Timespec) time.Time {
+ return time.Unix(int64(ts.Sec), int64(ts.Nsec))
}
-// CreateObject stores an object
-func (s *StorageFS) CreateObject(obj Object) error {
- err := s.CreateBucket(obj.BucketName)
+func (s *storageFS) UpdateBucket(bucketName string, attrsToUpdate BucketAttrs) error {
+ if attrsToUpdate.VersioningEnabled {
+ return errors.New("not implemented: fs storage type does not support versioning yet")
+ }
+ encoded, err := json.Marshal(attrsToUpdate)
if err != nil {
return err
}
- encoded, err := json.Marshal(obj)
+ path := filepath.Join(s.rootDir, url.PathEscape(bucketName))
+ return writeFile(path+bucketMetadataSuffix, encoded, 0o600)
+}
+
+// GetBucket returns information about the given bucket, or an error if it
+// doesn't exist.
+func (s *storageFS) GetBucket(name string) (Bucket, error) {
+ s.mtx.RLock()
+ defer s.mtx.RUnlock()
+ path := filepath.Join(s.rootDir, url.PathEscape(name))
+ dirInfo, err := os.Stat(path)
if err != nil {
- return err
+ return Bucket{}, err
}
- return ioutil.WriteFile(filepath.Join(s.rootDir, url.PathEscape(obj.BucketName), url.PathEscape(obj.Name)), encoded, 0664)
+ attrs, err := getBucketAttributes(path)
+ if err != nil {
+ return Bucket{}, err
+ }
+ return Bucket{Name: name, VersioningEnabled: false, TimeCreated: timespecToTime(createTimeFromFileInfo(dirInfo)), DefaultEventBasedHold: attrs.DefaultEventBasedHold}, err
}
-// ListObjects lists the objects in a given bucket with a given prefix and delimeter
-func (s *StorageFS) ListObjects(bucketName string) ([]Object, error) {
- infos, err := ioutil.ReadDir(path.Join(s.rootDir, url.PathEscape(bucketName)))
+func getBucketAttributes(path string) (BucketAttrs, error) {
+ content, err := os.ReadFile(path + bucketMetadataSuffix)
if err != nil {
- return nil, err
+ if os.IsNotExist(err) {
+ return BucketAttrs{}, nil
+ }
+ return BucketAttrs{}, err
}
- objects := []Object{}
- for _, info := range infos {
- unescaped, err := url.PathUnescape(info.Name())
+ var attrs BucketAttrs
+ err = json.Unmarshal(content, &attrs)
+ if err != nil {
+ return BucketAttrs{}, err
+ }
+ return attrs, nil
+}
+
+// DeleteBucket removes the bucket from the backend.
+func (s *storageFS) DeleteBucket(name string) error {
+ objs, err := s.ListObjects(name, "", false)
+ if err != nil {
+ return BucketNotFound
+ }
+ if len(objs) > 0 {
+ return BucketNotEmpty
+ }
+
+ s.mtx.Lock()
+ defer s.mtx.Unlock()
+ return os.RemoveAll(filepath.Join(s.rootDir, url.PathEscape(name)))
+}
+
+// CreateObject stores an object as a regular file on disk. The backing content
+// for the object may be in the same file that's being updated, so a temporary
+// file is first created and then moved into place. This also makes it so any
+// object content readers currently open continue reading from the original
+// file instead of the newly created file.
+//
+// The crc32c checksum and md5 hash of the object content is calculated when
+// reading the object content. Any checksum or hash in the passed-in object
+// metadata is overwritten.
+func (s *storageFS) CreateObject(obj StreamingObject, conditions Conditions) (StreamingObject, error) {
+ if obj.Generation > 0 {
+ return StreamingObject{}, errors.New("not implemented: fs storage type does not support objects generation yet")
+ }
+
+ // Note: this was a quick fix for issue #701. Now that we have a way to
+ // persist object attributes, we should implement versioning in the
+ // filesystem backend and handle generations outside of the backends.
+ obj.Generation = time.Now().UnixNano() / 1000
+
+ s.mtx.Lock()
+ defer s.mtx.Unlock()
+ err := s.createBucket(obj.BucketName, BucketAttrs{VersioningEnabled: false})
+ if err != nil {
+ return StreamingObject{}, err
+ }
+
+ var activeGeneration int64
+ existingObj, err := s.getObject(obj.BucketName, obj.Name)
+ if err != nil {
+ activeGeneration = 0
+ } else {
+ activeGeneration = existingObj.Generation
+ }
+
+ if !conditions.ConditionsMet(activeGeneration) {
+ return StreamingObject{}, PreConditionFailed
+ }
+
+ path := filepath.Join(s.rootDir, url.PathEscape(obj.BucketName), obj.Name)
+ if err = os.MkdirAll(filepath.Dir(path), 0o700); err != nil {
+ return StreamingObject{}, err
+ }
+
+ // Nothing to do if this operation only creates directories
+ if strings.HasSuffix(obj.Name, "/") {
+ // TODO: populate Crc32c, Md5Hash, and Etag
+ return StreamingObject{obj.ObjectAttrs, noopSeekCloser{bytes.NewReader([]byte{})}}, nil
+ }
+
+ var buf bytes.Buffer
+ hasher := checksum.NewStreamingHasher()
+ objectContent := io.TeeReader(obj.Content, hasher)
+
+ if _, err = io.Copy(&buf, objectContent); err != nil {
+ return StreamingObject{}, err
+ }
+
+ if obj.Crc32c == "" {
+ obj.Crc32c = hasher.EncodedCrc32cChecksum()
+ }
+ if obj.Md5Hash == "" {
+ obj.Md5Hash = hasher.EncodedMd5Hash()
+ }
+ if obj.Etag == "" {
+ obj.Etag = obj.Md5Hash
+ }
+
+ // TODO: Handle if metadata is not present more gracefully?
+ encoded, err := json.Marshal(obj.ObjectAttrs)
+ if err != nil {
+ return StreamingObject{}, err
+ }
+
+ if err := writeFile(path, buf.Bytes(), 0o600); err != nil {
+ return StreamingObject{}, err
+ }
+
+ if err = s.mh.write(path, encoded); err != nil {
+ return StreamingObject{}, err
+ }
+
+ err = openObjectAndSetSize(&obj, path)
+
+ return obj, err
+}
+
+// ListObjects lists the objects in a given bucket with a given prefix and
+// delimeter.
+func (s *storageFS) ListObjects(bucketName string, prefix string, versions bool) ([]ObjectAttrs, error) {
+ s.mtx.RLock()
+ defer s.mtx.RUnlock()
+
+ objects := []ObjectAttrs{}
+ bucketPath := filepath.Join(s.rootDir, url.PathEscape(bucketName))
+ if err := filepath.Walk(bucketPath, func(path string, info fs.FileInfo, err error) error {
if err != nil {
- return nil, fmt.Errorf("failed to unescape object name %s: %s", info.Name(), err)
+ return err
+ }
+
+ objName, _ := filepath.Rel(bucketPath, path)
+ if s.mh.isSpecialFile(info.Name()) {
+ return nil
+ }
+ if info.IsDir() {
+ return nil
}
- object, err := s.GetObject(bucketName, unescaped)
+ if prefix != "" && !strings.HasPrefix(objName, prefix) {
+ return nil
+ }
+ objAttrs, err := s.getObjectAttrs(bucketName, objName)
if err != nil {
- return nil, err
+ return err
}
- objects = append(objects, object)
+ objects = append(objects, objAttrs)
+ return nil
+ }); err != nil {
+ return nil, err
}
return objects, nil
}
-// GetObject get an object by bucket and name
-func (s *StorageFS) GetObject(bucketName, objectName string) (Object, error) {
- encoded, err := ioutil.ReadFile(filepath.Join(s.rootDir, url.PathEscape(bucketName), url.PathEscape(objectName)))
+// GetObject get an object by bucket and name.
+func (s *storageFS) GetObject(bucketName, objectName string) (StreamingObject, error) {
+ s.mtx.RLock()
+ defer s.mtx.RUnlock()
+ return s.getObject(bucketName, objectName)
+}
+
+// GetObjectWithGeneration retrieves an specific version of the object. Not
+// implemented for this backend.
+func (s *storageFS) GetObjectWithGeneration(bucketName, objectName string, generation int64) (StreamingObject, error) {
+ obj, err := s.GetObject(bucketName, objectName)
if err != nil {
- return Object{}, err
+ return obj, err
}
- var obj Object
- err = json.Unmarshal(encoded, &obj)
- if err != nil {
- return Object{}, err
+ if obj.Generation != generation {
+ return obj, fmt.Errorf("generation mismatch, object generation is %v, requested generation is %v (note: filesystem backend does not support versioning)", obj.Generation, generation)
}
- obj.Name = objectName
- obj.BucketName = bucketName
return obj, nil
}
-// DeleteObject deletes an object by bucket and name
-func (s *StorageFS) DeleteObject(bucketName, objectName string) error {
+func (s *storageFS) getObject(bucketName, objectName string) (StreamingObject, error) {
+ attrs, err := s.getObjectAttrs(bucketName, objectName)
+ if err != nil {
+ return StreamingObject{}, err
+ }
+
+ obj := StreamingObject{ObjectAttrs: attrs}
+ path := filepath.Join(s.rootDir, url.PathEscape(bucketName), objectName)
+ err = openObjectAndSetSize(&obj, path)
+
+ return obj, err
+}
+
+func openObjectAndSetSize(obj *StreamingObject, path string) error {
+ info, err := os.Stat(path)
+ if err != nil {
+ return err
+ }
+
+ obj.Content = newLazyReader(path)
+ obj.Size = info.Size()
+
+ return nil
+}
+
+func (s *storageFS) getObjectAttrs(bucketName, objectName string) (ObjectAttrs, error) {
+ path := filepath.Join(s.rootDir, url.PathEscape(bucketName), objectName)
+ encoded, err := s.mh.read(path)
+ if err != nil {
+ return ObjectAttrs{}, err
+ }
+
+ var attrs ObjectAttrs
+ if err = json.Unmarshal(encoded, &attrs); err != nil {
+ return ObjectAttrs{}, err
+ }
+
+ info, err := os.Stat(path)
+ if err != nil {
+ return ObjectAttrs{}, fmt.Errorf("failed to stat: %w", err)
+ }
+
+ attrs.Name = filepath.ToSlash(objectName)
+ attrs.BucketName = bucketName
+ attrs.Size = info.Size()
+ return attrs, nil
+}
+
+// DeleteObject deletes an object by bucket and name.
+func (s *storageFS) DeleteObject(bucketName, objectName string) error {
+ s.mtx.Lock()
+ defer s.mtx.Unlock()
if objectName == "" {
- return fmt.Errorf("can't delete object with empty name")
+ return errors.New("can't delete object with empty name")
}
- return os.Remove(filepath.Join(s.rootDir, url.PathEscape(bucketName), url.PathEscape(objectName)))
+ path := filepath.Join(s.rootDir, url.PathEscape(bucketName), objectName)
+ if err := s.mh.remove(path); err != nil {
+ return err
+ }
+ return os.Remove(path)
+}
+
+func (s *storageFS) PatchObject(bucketName, objectName string, attrsToUpdate ObjectAttrs) (StreamingObject, error) {
+ obj, err := s.GetObject(bucketName, objectName)
+ if err != nil {
+ return StreamingObject{}, err
+ }
+ defer obj.Close()
+
+ obj.patch(attrsToUpdate)
+ obj.Generation = 0 // reset generation id
+ return s.CreateObject(obj, NoConditions{})
+}
+
+func (s *storageFS) UpdateObject(bucketName, objectName string, attrsToUpdate ObjectAttrs) (StreamingObject, error) {
+ obj, err := s.GetObject(bucketName, objectName)
+ if err != nil {
+ return StreamingObject{}, err
+ }
+ defer obj.Close()
+
+ if attrsToUpdate.Metadata != nil {
+ obj.Metadata = map[string]string{}
+ }
+ obj.patch(attrsToUpdate)
+ obj.Generation = 0 // reset generation id
+ return s.CreateObject(obj, NoConditions{})
+}
+
+type concatenatedContent struct {
+ io.Reader
+}
+
+func (c concatenatedContent) Close() error {
+ return errors.New("not implemented")
+}
+
+func (c concatenatedContent) Seek(offset int64, whence int) (int64, error) {
+ return 0, errors.New("not implemented")
+}
+
+func concatObjectReaders(objects []StreamingObject) io.ReadSeekCloser {
+ readers := make([]io.Reader, len(objects))
+ for i := range objects {
+ readers[i] = objects[i].Content
+ }
+ return concatenatedContent{io.MultiReader(readers...)}
+}
+
+func (s *storageFS) ComposeObject(bucketName string, objectNames []string, destinationName string, metadata map[string]string, contentType string) (StreamingObject, error) {
+ var sourceObjects []StreamingObject
+ for _, n := range objectNames {
+ obj, err := s.GetObject(bucketName, n)
+ if err != nil {
+ return StreamingObject{}, err
+ }
+ defer obj.Close()
+ sourceObjects = append(sourceObjects, obj)
+ }
+
+ dest := StreamingObject{
+ ObjectAttrs: ObjectAttrs{
+ BucketName: bucketName,
+ Name: destinationName,
+ ContentType: contentType,
+ Created: time.Now().String(),
+ },
+ }
+
+ dest.Content = concatObjectReaders(sourceObjects)
+ dest.Metadata = metadata
+
+ result, err := s.CreateObject(dest, NoConditions{})
+ if err != nil {
+ return result, err
+ }
+
+ return result, nil
}
diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/lazy_file.go b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/lazy_file.go
new file mode 100644
index 0000000000000..8c30a3149213c
--- /dev/null
+++ b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/lazy_file.go
@@ -0,0 +1,53 @@
+// Copyright 2022 Francisco Souza. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package backend
+
+import (
+ "io"
+ "os"
+ "sync"
+)
+
+type lazyReader struct {
+ filename string
+ once *sync.Once
+ f *os.File
+ err error
+}
+
+func newLazyReader(filename string) io.ReadSeekCloser {
+ return &lazyReader{
+ filename: filename,
+ once: &sync.Once{},
+ }
+}
+
+func (r *lazyReader) open() {
+ r.f, r.err = os.Open(r.filename)
+}
+
+func (r *lazyReader) Read(p []byte) (int, error) {
+ r.once.Do(r.open)
+ if r.err != nil {
+ return 0, r.err
+ }
+ return r.f.Read(p)
+}
+
+func (r *lazyReader) Seek(offset int64, whence int) (int64, error) {
+ r.once.Do(r.open)
+ if r.err != nil {
+ return 0, r.err
+ }
+ return r.f.Seek(offset, whence)
+}
+
+func (r *lazyReader) Close() error {
+ r.once.Do(r.open)
+ if r.err != nil {
+ return r.err
+ }
+ return r.f.Close()
+}
diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/memory.go b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/memory.go
index 257843ad36308..c32f06abf2cd4 100644
--- a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/memory.go
+++ b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/memory.go
@@ -7,118 +7,386 @@ package backend
import (
"errors"
"fmt"
+ "io"
+ "strings"
"sync"
+ "time"
+
+ "github.com/fsouza/fake-gcs-server/internal/checksum"
)
-// StorageMemory is an implementation of the backend storage that stores data in memory
-type StorageMemory struct {
- buckets map[string][]Object
+const timestampFormat = "2006-01-02T15:04:05.999999Z07:00"
+
+// storageMemory is an implementation of the backend storage that stores data
+// in memory.
+type storageMemory struct {
+ buckets map[string]bucketInMemory
mtx sync.RWMutex
}
-// NewStorageMemory creates an instance of StorageMemory
-func NewStorageMemory(objects []Object) Storage {
- s := &StorageMemory{
- buckets: make(map[string][]Object),
+type bucketInMemory struct {
+ Bucket
+ // maybe we can refactor how the memory backend works? no need to store
+ // Object instances.
+ activeObjects []Object
+ archivedObjects []Object
+}
+
+func newBucketInMemory(name string, versioningEnabled bool, bucketAttrs BucketAttrs) bucketInMemory {
+ return bucketInMemory{Bucket{name, versioningEnabled, time.Now(), bucketAttrs.DefaultEventBasedHold}, []Object{}, []Object{}}
+}
+
+func (bm *bucketInMemory) addObject(obj Object) Object {
+ if obj.Crc32c == "" {
+ obj.Crc32c = checksum.EncodedCrc32cChecksum(obj.Content)
+ }
+ if obj.Md5Hash == "" {
+ obj.Md5Hash = checksum.EncodedMd5Hash(obj.Content)
+ }
+ if obj.Etag == "" {
+ obj.Etag = obj.Md5Hash
+ }
+ if obj.Size == 0 {
+ obj.Size = int64(len(obj.Content))
+ }
+ obj.Generation = getNewGenerationIfZero(obj.Generation)
+ index := findObject(obj, bm.activeObjects, false)
+ if index >= 0 {
+ if bm.VersioningEnabled {
+ bm.activeObjects[index].Deleted = time.Now().Format(timestampFormat)
+ bm.cpToArchive(bm.activeObjects[index])
+ }
+ bm.activeObjects[index] = obj
+ } else {
+ bm.activeObjects = append(bm.activeObjects, obj)
+ }
+
+ return obj
+}
+
+func getNewGenerationIfZero(generation int64) int64 {
+ if generation == 0 {
+ return time.Now().UnixNano() / 1000
+ }
+ return generation
+}
+
+func (bm *bucketInMemory) deleteObject(obj Object, matchGeneration bool) {
+ index := findObject(obj, bm.activeObjects, matchGeneration)
+ if index < 0 {
+ return
+ }
+ if bm.VersioningEnabled {
+ obj.Deleted = time.Now().Format(timestampFormat)
+ bm.mvToArchive(obj)
+ } else {
+ bm.deleteFromObjectList(obj, true)
+ }
+}
+
+func (bm *bucketInMemory) cpToArchive(obj Object) {
+ bm.archivedObjects = append(bm.archivedObjects, obj)
+}
+
+func (bm *bucketInMemory) mvToArchive(obj Object) {
+ bm.cpToArchive(obj)
+ bm.deleteFromObjectList(obj, true)
+}
+
+func (bm *bucketInMemory) deleteFromObjectList(obj Object, active bool) {
+ objects := bm.activeObjects
+ if !active {
+ objects = bm.archivedObjects
+ }
+ index := findObject(obj, objects, !active)
+ objects[index] = objects[len(objects)-1]
+ if active {
+ bm.activeObjects = objects[:len(objects)-1]
+ } else {
+ bm.archivedObjects = objects[:len(objects)-1]
+ }
+}
+
+// findObject looks for an object in the given list and return the index where it
+// was found, or -1 if the object doesn't exist.
+func findObject(obj Object, objectList []Object, matchGeneration bool) int {
+ for i, o := range objectList {
+ if matchGeneration && obj.ID() == o.ID() {
+ return i
+ }
+ if !matchGeneration && obj.IDNoGen() == o.IDNoGen() {
+ return i
+ }
+ }
+ return -1
+}
+
+// findLastObjectGeneration looks for an object in the given list and return the index where it
+// was found, or -1 if the object doesn't exist.
+func findLastObjectGeneration(obj Object, objectList []Object) int64 {
+ highScore := int64(0)
+ for _, o := range objectList {
+ if obj.IDNoGen() == o.IDNoGen() && o.Generation > highScore {
+ highScore = o.Generation
+ }
+ }
+ return highScore
+}
+
+// NewStorageMemory creates an instance of StorageMemory.
+func NewStorageMemory(objects []StreamingObject) (Storage, error) {
+ s := &storageMemory{
+ buckets: make(map[string]bucketInMemory),
}
for _, o := range objects {
- s.buckets[o.BucketName] = append(s.buckets[o.BucketName], o)
+ bufferedObject, err := o.BufferedObject()
+ if err != nil {
+ return nil, err
+ }
+ s.CreateBucket(o.BucketName, BucketAttrs{false, false})
+ bucket := s.buckets[o.BucketName]
+ bucket.addObject(bufferedObject)
+ s.buckets[o.BucketName] = bucket
}
- return s
+ return s, nil
}
-// CreateBucket creates a bucket
-func (s *StorageMemory) CreateBucket(name string) error {
- s.mtx.Lock()
- defer s.mtx.Unlock()
- if _, ok := s.buckets[name]; !ok {
- s.buckets[name] = nil
+func (s *storageMemory) UpdateBucket(bucketName string, attrsToUpdate BucketAttrs) error {
+ bucketInMemory, err := s.getBucketInMemory(bucketName)
+ if err != nil {
+ return err
}
+ bucketInMemory.DefaultEventBasedHold = attrsToUpdate.DefaultEventBasedHold
+ bucketInMemory.VersioningEnabled = attrsToUpdate.VersioningEnabled
+ s.buckets[bucketName] = bucketInMemory
return nil
}
-// ListBuckets lists buckets
-func (s *StorageMemory) ListBuckets() ([]string, error) {
+// CreateBucket creates a bucket.
+func (s *storageMemory) CreateBucket(name string, bucketAttrs BucketAttrs) error {
s.mtx.Lock()
defer s.mtx.Unlock()
- buckets := []string{}
- for bucket := range s.buckets {
- buckets = append(buckets, bucket)
+ bucket, err := s.getBucketInMemory(name)
+ if err == nil {
+ if bucket.VersioningEnabled != bucketAttrs.VersioningEnabled {
+ return fmt.Errorf("a bucket named %s already exists, but with different properties", name)
+ }
+ return nil
+ }
+ s.buckets[name] = newBucketInMemory(name, bucketAttrs.VersioningEnabled, bucketAttrs)
+ return nil
+}
+
+// ListBuckets lists buckets currently registered in the backend.
+func (s *storageMemory) ListBuckets() ([]Bucket, error) {
+ s.mtx.RLock()
+ defer s.mtx.RUnlock()
+ buckets := []Bucket{}
+ for _, bucketInMemory := range s.buckets {
+ buckets = append(buckets, Bucket{bucketInMemory.Name, bucketInMemory.VersioningEnabled, bucketInMemory.TimeCreated, false})
}
return buckets, nil
}
-// GetBucket checks if a bucket exists
-func (s *StorageMemory) GetBucket(name string) error {
- s.mtx.Lock()
- defer s.mtx.Unlock()
+// GetBucket retrieves the bucket information from the backend.
+func (s *storageMemory) GetBucket(name string) (Bucket, error) {
+ s.mtx.RLock()
+ defer s.mtx.RUnlock()
+ bucketInMemory, err := s.getBucketInMemory(name)
+ return Bucket{bucketInMemory.Name, bucketInMemory.VersioningEnabled, bucketInMemory.TimeCreated, bucketInMemory.DefaultEventBasedHold}, err
+}
- if _, ok := s.buckets[name]; !ok {
- return fmt.Errorf("no bucket named %s", name)
+func (s *storageMemory) getBucketInMemory(name string) (bucketInMemory, error) {
+ if bucketInMemory, found := s.buckets[name]; found {
+ return bucketInMemory, nil
}
- return nil
+ return bucketInMemory{}, fmt.Errorf("no bucket named %s", name)
}
-// CreateObject stores an object
-func (s *StorageMemory) CreateObject(obj Object) error {
+// DeleteBucket removes the bucket from the backend.
+func (s *storageMemory) DeleteBucket(name string) error {
+ objs, err := s.ListObjects(name, "", false)
+ if err != nil {
+ return BucketNotFound
+ }
+ if len(objs) > 0 {
+ return BucketNotEmpty
+ }
+
s.mtx.Lock()
defer s.mtx.Unlock()
-
- index := s.findObject(obj)
- if index < 0 {
- s.buckets[obj.BucketName] = append(s.buckets[obj.BucketName], obj)
- } else {
- s.buckets[obj.BucketName][index] = obj
- }
+ delete(s.buckets, name)
return nil
}
-// findObject looks for an object in its bucket and return the index where it
-// was found, or -1 if the object doesn't exist.
-//
-// It doesn't lock the mutex, callers must lock the mutex before calling this
-// method.
-func (s *StorageMemory) findObject(obj Object) int {
- for i, o := range s.buckets[obj.BucketName] {
- if obj.ID() == o.ID() {
- return i
- }
+// CreateObject stores an object in the backend.
+func (s *storageMemory) CreateObject(obj StreamingObject, conditions Conditions) (StreamingObject, error) {
+ s.mtx.Lock()
+ defer s.mtx.Unlock()
+ bucketInMemory, err := s.getBucketInMemory(obj.BucketName)
+ if err != nil {
+ bucketInMemory = newBucketInMemory(obj.BucketName, false, BucketAttrs{})
}
- return -1
+ bufferedObj, err := obj.BufferedObject()
+ currentGeneration := findLastObjectGeneration(bufferedObj, bucketInMemory.activeObjects)
+ if !conditions.ConditionsMet(currentGeneration) {
+ return StreamingObject{}, PreConditionFailed
+ }
+ if err != nil {
+ return StreamingObject{}, err
+ }
+ newObj := bucketInMemory.addObject(bufferedObj)
+ s.buckets[obj.BucketName] = bucketInMemory
+ return newObj.StreamingObject(), nil
}
-// ListObjects lists the objects in a given bucket with a given prefix and delimeter
-func (s *StorageMemory) ListObjects(bucketName string) ([]Object, error) {
+// ListObjects lists the objects in a given bucket with a given prefix and
+// delimiter.
+func (s *storageMemory) ListObjects(bucketName string, prefix string, versions bool) ([]ObjectAttrs, error) {
s.mtx.RLock()
defer s.mtx.RUnlock()
- objects, ok := s.buckets[bucketName]
- if !ok {
- return nil, errors.New("bucket not found")
+ bucketInMemory, err := s.getBucketInMemory(bucketName)
+ if err != nil {
+ return []ObjectAttrs{}, err
+ }
+ objAttrs := make([]ObjectAttrs, 0, len(bucketInMemory.activeObjects))
+ for _, obj := range bucketInMemory.activeObjects {
+ if prefix != "" && !strings.HasPrefix(obj.Name, prefix) {
+ continue
+ }
+ objAttrs = append(objAttrs, obj.ObjectAttrs)
+ }
+ if !versions {
+ return objAttrs, nil
}
- return objects, nil
+
+ archvObjs := make([]ObjectAttrs, 0, len(bucketInMemory.archivedObjects))
+ for _, obj := range bucketInMemory.archivedObjects {
+ if prefix != "" && !strings.HasPrefix(obj.Name, prefix) {
+ continue
+ }
+ archvObjs = append(archvObjs, obj.ObjectAttrs)
+ }
+ return append(objAttrs, archvObjs...), nil
}
-// GetObject get an object by bucket and name
-func (s *StorageMemory) GetObject(bucketName, objectName string) (Object, error) {
- obj := Object{BucketName: bucketName, Name: objectName}
+func (s *storageMemory) GetObject(bucketName, objectName string) (StreamingObject, error) {
+ return s.GetObjectWithGeneration(bucketName, objectName, 0)
+}
+
+// GetObjectWithGeneration retrieves a specific version of the object.
+func (s *storageMemory) GetObjectWithGeneration(bucketName, objectName string, generation int64) (StreamingObject, error) {
s.mtx.RLock()
defer s.mtx.RUnlock()
- index := s.findObject(obj)
+ bucketInMemory, err := s.getBucketInMemory(bucketName)
+ if err != nil {
+ return StreamingObject{}, err
+ }
+ matchGeneration := false
+ obj := Object{ObjectAttrs: ObjectAttrs{BucketName: bucketName, Name: objectName}}
+ listToConsider := bucketInMemory.activeObjects
+ if generation != 0 {
+ matchGeneration = true
+ obj.Generation = generation
+ listToConsider = append(listToConsider, bucketInMemory.archivedObjects...)
+ }
+ index := findObject(obj, listToConsider, matchGeneration)
if index < 0 {
- return obj, errors.New("object not found")
+ return obj.StreamingObject(), errors.New("object not found")
}
- return s.buckets[bucketName][index], nil
+
+ return listToConsider[index].StreamingObject(), nil
}
-// DeleteObject deletes an object by bucket and name
-func (s *StorageMemory) DeleteObject(bucketName, objectName string) error {
- obj := Object{BucketName: bucketName, Name: objectName}
- index := s.findObject(obj)
- if index < 0 {
- return fmt.Errorf("no such object in bucket %s: %s", bucketName, objectName)
+func (s *storageMemory) DeleteObject(bucketName, objectName string) error {
+ obj, err := s.GetObject(bucketName, objectName)
+ if err != nil {
+ return err
+ }
+ s.mtx.Lock()
+ defer s.mtx.Unlock()
+ bucketInMemory, err := s.getBucketInMemory(bucketName)
+ if err != nil {
+ return err
+ }
+ bufferedObject, err := obj.BufferedObject()
+ if err != nil {
+ return err
}
- bucket := s.buckets[obj.BucketName]
- bucket[index] = bucket[len(bucket)-1]
- s.buckets[obj.BucketName] = bucket[:len(bucket)-1]
+ bucketInMemory.deleteObject(bufferedObject, true)
+ s.buckets[bucketName] = bucketInMemory
return nil
}
+
+func (s *storageMemory) PatchObject(bucketName, objectName string, attrsToUpdate ObjectAttrs) (StreamingObject, error) {
+ obj, err := s.GetObject(bucketName, objectName)
+ if err != nil {
+ return StreamingObject{}, err
+ }
+
+ obj.patch(attrsToUpdate)
+ s.CreateObject(obj, NoConditions{})
+ return obj, nil
+}
+
+// UpdateObject replaces an object metadata, custom time, and acl.
+func (s *storageMemory) UpdateObject(bucketName, objectName string, attrsToUpdate ObjectAttrs) (StreamingObject, error) {
+ obj, err := s.GetObject(bucketName, objectName)
+ if err != nil {
+ return StreamingObject{}, err
+ }
+
+ if attrsToUpdate.Metadata != nil {
+ obj.Metadata = map[string]string{}
+ }
+ obj.patch(attrsToUpdate)
+ s.CreateObject(obj, NoConditions{})
+ return obj, nil
+}
+
+func (s *storageMemory) ComposeObject(bucketName string, objectNames []string, destinationName string, metadata map[string]string, contentType string) (StreamingObject, error) {
+ var data []byte
+ for _, n := range objectNames {
+ obj, err := s.GetObject(bucketName, n)
+ if err != nil {
+ return StreamingObject{}, err
+ }
+ objectContent, err := io.ReadAll(obj.Content)
+ if err != nil {
+ return StreamingObject{}, err
+ }
+ data = append(data, objectContent...)
+ }
+
+ var dest Object
+ streamingDest, err := s.GetObject(bucketName, destinationName)
+ if err != nil {
+ dest = Object{
+ ObjectAttrs: ObjectAttrs{
+ BucketName: bucketName,
+ Name: destinationName,
+ ContentType: contentType,
+ Created: time.Now().String(),
+ },
+ }
+ } else {
+ dest, err = streamingDest.BufferedObject()
+ if err != nil {
+ return StreamingObject{}, err
+ }
+ }
+
+ dest.Content = data
+ dest.Crc32c = ""
+ dest.Md5Hash = ""
+ dest.Etag = ""
+ dest.Size = 0
+ dest.Metadata = metadata
+
+ result, err := s.CreateObject(dest.StreamingObject(), NoConditions{})
+ if err != nil {
+ return result, err
+ }
+
+ return result, nil
+}
diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/metadata.go b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/metadata.go
new file mode 100644
index 0000000000000..6d9d2313d27dc
--- /dev/null
+++ b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/metadata.go
@@ -0,0 +1,13 @@
+// Copyright 2022 Francisco Souza. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package backend
+
+type metadataHandler interface {
+ write(path string, encoded []byte) error
+ read(path string) ([]byte, error)
+ remove(path string) error
+ isSpecialFile(path string) bool
+ rename(pathSrc, pathDst string) error
+}
diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/metadata_file.go b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/metadata_file.go
new file mode 100644
index 0000000000000..94cce654a8c69
--- /dev/null
+++ b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/metadata_file.go
@@ -0,0 +1,34 @@
+// Copyright 2022 Francisco Souza. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package backend
+
+import (
+ "os"
+ "strings"
+)
+
+const metadataSuffix = ".metadata"
+
+type metadataFile struct{}
+
+func (m metadataFile) write(path string, encoded []byte) error {
+ return writeFile(path+metadataSuffix, encoded, 0o600)
+}
+
+func (m metadataFile) read(path string) ([]byte, error) {
+ return os.ReadFile(path + metadataSuffix)
+}
+
+func (m metadataFile) isSpecialFile(path string) bool {
+ return strings.HasSuffix(path, metadataSuffix)
+}
+
+func (m metadataFile) remove(path string) error {
+ return os.Remove(path + metadataSuffix)
+}
+
+func (m metadataFile) rename(pathSrc, pathDst string) error {
+ return os.Rename(pathSrc+metadataSuffix, pathDst+metadataSuffix)
+}
diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/metadata_xattr.go b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/metadata_xattr.go
new file mode 100644
index 0000000000000..9d40580120be6
--- /dev/null
+++ b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/metadata_xattr.go
@@ -0,0 +1,33 @@
+// Copyright 2022 Francisco Souza. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package backend
+
+import (
+ "github.com/pkg/xattr"
+)
+
+const xattrKey = "user.metadata"
+
+type metadataXattr struct{}
+
+func (m metadataXattr) write(path string, encoded []byte) error {
+ return xattr.Set(path, xattrKey, encoded)
+}
+
+func (m metadataXattr) read(path string) ([]byte, error) {
+ return xattr.Get(path, xattrKey)
+}
+
+func (m metadataXattr) isSpecialFile(path string) bool {
+ return false
+}
+
+func (m metadataXattr) remove(path string) error {
+ return nil
+}
+
+func (m metadataXattr) rename(pathSrc, pathDst string) error {
+ return nil
+}
diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/object.go b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/object.go
index e0ca2b12ec571..63bf8d6d147c3 100644
--- a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/object.go
+++ b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/object.go
@@ -4,16 +4,104 @@
package backend
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "reflect"
+
+ "cloud.google.com/go/storage"
+)
+
+// ObjectAttrs represents the meta-data without its contents.
+type ObjectAttrs struct {
+ BucketName string `json:"-"`
+ Name string `json:"-"`
+ Size int64 `json:"-"`
+ ContentType string
+ ContentEncoding string
+ ContentDisposition string
+ CacheControl string
+ Crc32c string
+ Md5Hash string
+ Etag string
+ ACL []storage.ACLRule
+ Metadata map[string]string
+ Created string
+ Deleted string
+ Updated string
+ CustomTime string
+ Generation int64
+}
+
+// ID is used for comparing objects.
+func (o *ObjectAttrs) ID() string {
+ return fmt.Sprintf("%s#%d", o.IDNoGen(), o.Generation)
+}
+
+// IDNoGen does not consider the generation field.
+func (o *ObjectAttrs) IDNoGen() string {
+ return fmt.Sprintf("%s/%s", o.BucketName, o.Name)
+}
+
// Object represents the object that is stored within the fake server.
type Object struct {
- BucketName string `json:"-"`
- Name string `json:"-"`
- Content []byte
- Crc32c string
- Md5Hash string
+ ObjectAttrs
+ Content []byte
+}
+
+type noopSeekCloser struct {
+ io.ReadSeeker
+}
+
+func (n noopSeekCloser) Close() error {
+ return nil
+}
+
+func (o Object) StreamingObject() StreamingObject {
+ return StreamingObject{
+ ObjectAttrs: o.ObjectAttrs,
+ Content: noopSeekCloser{bytes.NewReader(o.Content)},
+ }
+}
+
+type StreamingObject struct {
+ ObjectAttrs
+ Content io.ReadSeekCloser
+}
+
+func (o *StreamingObject) Close() error {
+ if o != nil && o.Content != nil {
+ return o.Content.Close()
+ }
+ return nil
+}
+
+// Convert this StreamingObject to a (buffered) Object.
+func (o *StreamingObject) BufferedObject() (Object, error) {
+ data, err := io.ReadAll(o.Content)
+ return Object{
+ ObjectAttrs: o.ObjectAttrs,
+ Content: data,
+ }, err
}
-// ID is useful for comparing objects
-func (o *Object) ID() string {
- return o.BucketName + "/" + o.Name
+func (o *StreamingObject) patch(attrsToUpdate ObjectAttrs) {
+ currObjValues := reflect.ValueOf(&(o.ObjectAttrs)).Elem()
+ currObjType := currObjValues.Type()
+ newObjValues := reflect.ValueOf(attrsToUpdate)
+ for i := 0; i < newObjValues.NumField(); i++ {
+ if reflect.Value.IsZero(newObjValues.Field(i)) {
+ continue
+ } else if currObjType.Field(i).Name == "Metadata" {
+ if o.Metadata == nil {
+ o.Metadata = map[string]string{}
+ }
+ for k, v := range attrsToUpdate.Metadata {
+ o.Metadata[k] = v
+ }
+ } else {
+ currObjValues.Field(i).Set(newObjValues.Field(i))
+ }
+ }
}
diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/storage.go b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/storage.go
index c77583462fdb5..da8e8e51e2128 100644
--- a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/storage.go
+++ b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/storage.go
@@ -2,15 +2,43 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+// Package backend proides the backends used by fake-gcs-server.
package backend
-// Storage is the generic interface for implementing the backend storage of the server
+type Conditions interface {
+ ConditionsMet(activeGeneration int64) bool
+}
+
+type NoConditions struct{}
+
+func (NoConditions) ConditionsMet(int64) bool {
+ return true
+}
+
+// Storage is the generic interface for implementing the backend storage of the
+// server.
type Storage interface {
- CreateBucket(name string) error
- ListBuckets() ([]string, error)
- GetBucket(name string) error
- CreateObject(obj Object) error
- ListObjects(bucketName string) ([]Object, error)
- GetObject(bucketName, objectName string) (Object, error)
+ CreateBucket(name string, bucketAttrs BucketAttrs) error
+ ListBuckets() ([]Bucket, error)
+ GetBucket(name string) (Bucket, error)
+ UpdateBucket(name string, attrsToUpdate BucketAttrs) error
+ DeleteBucket(name string) error
+ CreateObject(obj StreamingObject, conditions Conditions) (StreamingObject, error)
+ ListObjects(bucketName string, prefix string, versions bool) ([]ObjectAttrs, error)
+ GetObject(bucketName, objectName string) (StreamingObject, error)
+ GetObjectWithGeneration(bucketName, objectName string, generation int64) (StreamingObject, error)
DeleteObject(bucketName, objectName string) error
+ PatchObject(bucketName, objectName string, attrsToUpdate ObjectAttrs) (StreamingObject, error)
+ UpdateObject(bucketName, objectName string, attrsToUpdate ObjectAttrs) (StreamingObject, error)
+ ComposeObject(bucketName string, objectNames []string, destinationName string, metadata map[string]string, contentType string) (StreamingObject, error)
}
+
+type Error string
+
+func (e Error) Error() string { return string(e) }
+
+const (
+ BucketNotFound = Error("bucket not found")
+ BucketNotEmpty = Error("bucket must be empty prior to deletion")
+ PreConditionFailed = Error("Precondition failed")
+)
diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/time_bsd.go b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/time_bsd.go
new file mode 100644
index 0000000000000..a01893f6be036
--- /dev/null
+++ b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/time_bsd.go
@@ -0,0 +1,19 @@
+// Copyright 2019 Francisco Souza. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build darwin || freebsd
+
+package backend
+
+import (
+ "os"
+ "syscall"
+)
+
+func createTimeFromFileInfo(input os.FileInfo) syscall.Timespec {
+ if statT, ok := input.Sys().(*syscall.Stat_t); ok {
+ return statT.Ctimespec
+ }
+ return syscall.Timespec{}
+}
diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/time_linux.go b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/time_linux.go
new file mode 100644
index 0000000000000..0f959e9b74c6c
--- /dev/null
+++ b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/time_linux.go
@@ -0,0 +1,18 @@
+// Copyright 2019 Francisco Souza. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package backend
+
+import (
+ "os"
+ "syscall"
+)
+
+func createTimeFromFileInfo(input os.FileInfo) syscall.Timespec {
+ if statT, ok := input.Sys().(*syscall.Stat_t); ok {
+ // not true: Ctime is not created time, but not creating a file to persist this metadata, yet...
+ return statT.Ctim
+ }
+ return syscall.Timespec{}
+}
diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/time_windows.go b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/time_windows.go
new file mode 100644
index 0000000000000..54c7bc9b0badd
--- /dev/null
+++ b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/time_windows.go
@@ -0,0 +1,18 @@
+// Copyright 2019 Francisco Souza. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package backend
+
+import (
+ "os"
+ "syscall"
+)
+
+func createTimeFromFileInfo(input os.FileInfo) syscall.Timespec {
+ if statT, ok := input.Sys().(*syscall.Win32FileAttributeData); ok {
+ nsec := statT.CreationTime.Nanoseconds()
+ return syscall.NsecToTimespec(nsec)
+ }
+ return syscall.Timespec{}
+}
diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/writefile_unix.go b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/writefile_unix.go
new file mode 100644
index 0000000000000..2e5e510fbc3d4
--- /dev/null
+++ b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/writefile_unix.go
@@ -0,0 +1,17 @@
+// Copyright 2022 Francisco Souza. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !windows
+
+package backend
+
+import (
+ "os"
+
+ "github.com/google/renameio/v2"
+)
+
+func writeFile(filename string, data []byte, perm os.FileMode) error {
+ return renameio.WriteFile(filename, data, perm)
+}
diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/writefile_windows.go b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/writefile_windows.go
new file mode 100644
index 0000000000000..2d6600c803024
--- /dev/null
+++ b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/writefile_windows.go
@@ -0,0 +1,13 @@
+// Copyright 2022 Francisco Souza. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package backend
+
+import (
+ "os"
+)
+
+func writeFile(filename string, data []byte, perm os.FileMode) error {
+ return os.WriteFile(filename, data, perm)
+}
diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/checksum/checksum.go b/vendor/github.com/fsouza/fake-gcs-server/internal/checksum/checksum.go
new file mode 100644
index 0000000000000..c247336d8e65e
--- /dev/null
+++ b/vendor/github.com/fsouza/fake-gcs-server/internal/checksum/checksum.go
@@ -0,0 +1,70 @@
+// Copyright 2021 Francisco Souza. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package checksum
+
+import (
+ "crypto/md5"
+ "encoding/base64"
+ "hash"
+ "hash/crc32"
+)
+
+var crc32cTable = crc32.MakeTable(crc32.Castagnoli)
+
+func crc32cChecksum(content []byte) []byte {
+ checksummer := crc32.New(crc32cTable)
+ checksummer.Write(content)
+ return checksummer.Sum(make([]byte, 0, 4))
+}
+
+func EncodedChecksum(checksum []byte) string {
+ return base64.StdEncoding.EncodeToString(checksum)
+}
+
+func EncodedCrc32cChecksum(content []byte) string {
+ return EncodedChecksum(crc32cChecksum(content))
+}
+
+func MD5Hash(b []byte) []byte {
+ h := md5.New()
+ h.Write(b)
+ return h.Sum(nil)
+}
+
+func EncodedHash(hash []byte) string {
+ return base64.StdEncoding.EncodeToString(hash)
+}
+
+func EncodedMd5Hash(content []byte) string {
+ return EncodedHash(MD5Hash(content))
+}
+
+type StreamingHasher struct {
+ crc32 hash.Hash32
+ md5 hash.Hash
+}
+
+func NewStreamingHasher() *StreamingHasher {
+ return &StreamingHasher{
+ crc32: crc32.New(crc32cTable),
+ md5: md5.New(),
+ }
+}
+
+func (s *StreamingHasher) Write(p []byte) (n int, err error) {
+ n, err = s.crc32.Write(p)
+ if err != nil {
+ return n, err
+ }
+ return s.md5.Write(p)
+}
+
+func (s *StreamingHasher) EncodedCrc32cChecksum() string {
+ return EncodedChecksum(s.crc32.Sum(nil))
+}
+
+func (s *StreamingHasher) EncodedMd5Hash() string {
+ return EncodedHash(s.md5.Sum(nil))
+}
diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/notification/event.go b/vendor/github.com/fsouza/fake-gcs-server/internal/notification/event.go
new file mode 100644
index 0000000000000..f20ac8c87a40a
--- /dev/null
+++ b/vendor/github.com/fsouza/fake-gcs-server/internal/notification/event.go
@@ -0,0 +1,222 @@
+package notification
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+ "time"
+
+ "cloud.google.com/go/pubsub"
+ "github.com/fsouza/fake-gcs-server/internal/backend"
+)
+
+// EventType is the type of event to trigger. The descriptions of the events
+// can be found here:
+// https://cloud.google.com/storage/docs/pubsub-notifications#events.
+type EventType string
+
+const (
+ // EventFinalize is triggered when an object is added.
+ EventFinalize EventType = "OBJECT_FINALIZE"
+ // EventDelete is triggered when an object is deleted.
+ EventDelete = "OBJECT_DELETE"
+ // EventMetadata is triggered when an object's metadata is changed.
+ EventMetadata = "OBJECT_METADATA_UPDATE"
+ // EventArchive bucket versioning must be enabled. is triggered when an object becomes the non current version
+ EventArchive = "OBJECT_ARCHIVE"
+)
+
+// EventNotificationOptions contains flags for events, that if true, will create
+// trigger notifications when they occur.
+type EventNotificationOptions struct {
+ Finalize bool
+ Delete bool
+ MetadataUpdate bool
+ Archive bool
+}
+
+// EventManagerOptions determines what events are triggered and where.
+type EventManagerOptions struct {
+ // ProjectID is the project ID containing the pubsub topic.
+ ProjectID string
+ // TopicName is the pubsub topic name to publish events on.
+ TopicName string
+ // Bucket is the name of the bucket to publish events from.
+ Bucket string
+ // ObjectPrefix, if not empty, only objects having this prefix will generate
+ // trigger events.
+ ObjectPrefix string
+ // NotifyOn determines what events to trigger.
+ NotifyOn EventNotificationOptions
+}
+
+type EventManager interface {
+ Trigger(o *backend.StreamingObject, eventType EventType, extraEventAttr map[string]string)
+}
+
+// PubsubEventManager checks if an event should be published.
+type PubsubEventManager struct {
+ // publishSynchronously is a flag that if true, events will be published
+ // synchronously and not in a goroutine. It is used during tests to prevent
+ // race conditions.
+ publishSynchronously bool
+ // notifyOn determines what events are triggered.
+ notifyOn EventNotificationOptions
+ // writer is where logs are written to.
+ writer io.Writer
+ // bucket, if not empty, only objects from this bucker will generate trigger events.
+ bucket string
+ // objectPrefix, if not empty, only objects having this prefix will generate
+ // trigger events.
+ objectPrefix string
+ // publisher is used to publish events on.
+ publisher eventPublisher
+}
+
+func NewPubsubEventManager(options EventManagerOptions, w io.Writer) (*PubsubEventManager, error) {
+ manager := &PubsubEventManager{
+ writer: w,
+ notifyOn: options.NotifyOn,
+ bucket: options.Bucket,
+ objectPrefix: options.ObjectPrefix,
+ }
+ if options.ProjectID != "" && options.TopicName != "" {
+ ctx := context.Background()
+ client, err := pubsub.NewClient(ctx, options.ProjectID)
+ if err != nil {
+ return nil, fmt.Errorf("error creating pubsub client: %v", err)
+ }
+ manager.publisher = client.Topic(options.TopicName)
+ }
+ return manager, nil
+}
+
+// eventPublisher is the interface to publish triggered events.
+type eventPublisher interface {
+ Publish(ctx context.Context, msg *pubsub.Message) *pubsub.PublishResult
+}
+
+// Trigger checks if an event should be triggered. If so, it publishes the
+// event to a pubsub queue.
+func (m *PubsubEventManager) Trigger(o *backend.StreamingObject, eventType EventType, extraEventAttr map[string]string) {
+ if m.publisher == nil {
+ return
+ }
+ if m.bucket != "" && o.BucketName != m.bucket {
+ return
+ }
+ if m.objectPrefix != "" && !strings.HasPrefix(o.Name, m.objectPrefix) {
+ return
+ }
+ switch eventType {
+ case EventFinalize:
+ if !m.notifyOn.Finalize {
+ return
+ }
+ case EventDelete:
+ if !m.notifyOn.Delete {
+ return
+ }
+ case EventMetadata:
+ if !m.notifyOn.MetadataUpdate {
+ return
+ }
+ case EventArchive:
+ if !m.notifyOn.Archive {
+ return
+ }
+ }
+ eventTime := time.Now().Format(time.RFC3339)
+ publishFunc := func() {
+ err := m.publish(o, eventType, eventTime, extraEventAttr)
+ if m.writer != nil {
+ if err != nil {
+ fmt.Fprintf(m.writer, "error publishing event: %v", err)
+ } else {
+ fmt.Fprintf(m.writer, "sent event %s for object %s\n", string(eventType), o.ID())
+ }
+ }
+ }
+ if m.publishSynchronously {
+ publishFunc()
+ } else {
+ go publishFunc()
+ }
+}
+
+func (m *PubsubEventManager) publish(o *backend.StreamingObject, eventType EventType, eventTime string, extraEventAttr map[string]string) error {
+ ctx := context.Background()
+ data, attributes, err := generateEvent(o, eventType, eventTime, extraEventAttr)
+ if err != nil {
+ return err
+ }
+ if r := m.publisher.Publish(ctx, &pubsub.Message{
+ Data: data,
+ Attributes: attributes,
+ }); r != nil {
+ _, err = r.Get(ctx)
+ return err
+ }
+ return nil
+}
+
+// gcsEvent is the payload of a GCS event. Note that all properties are string-quoted.
+// The description of the full object can be found here:
+// https://cloud.google.com/storage/docs/json_api/v1/objects#resource-representations.
+type gcsEvent struct {
+ Kind string `json:"kind"`
+ ID string `json:"id"`
+ Name string `json:"name"`
+ Bucket string `json:"bucket"`
+ Generation int64 `json:"generation,string,omitempty"`
+ ContentType string `json:"contentType"`
+ ContentEncoding string `json:"contentEncoding,omitempty"`
+ Created string `json:"timeCreated,omitempty"`
+ Updated string `json:"updated,omitempty"`
+ StorageClass string `json:"storageClass"`
+ Size int64 `json:"size,string"`
+ MD5Hash string `json:"md5Hash,omitempty"`
+ CRC32c string `json:"crc32c,omitempty"`
+ MetaData map[string]string `json:"metadata,omitempty"`
+}
+
+func generateEvent(o *backend.StreamingObject, eventType EventType, eventTime string, extraEventAttr map[string]string) ([]byte, map[string]string, error) {
+ payload := gcsEvent{
+ Kind: "storage#object",
+ ID: o.ID(),
+ Name: o.Name,
+ Bucket: o.BucketName,
+ Generation: o.Generation,
+ ContentType: o.ContentType,
+ ContentEncoding: o.ContentEncoding,
+ Created: o.Created,
+ Updated: o.Updated,
+ StorageClass: "STANDARD",
+ Size: o.Size,
+ MD5Hash: o.Md5Hash,
+ CRC32c: o.Crc32c,
+ MetaData: o.Metadata,
+ }
+ attributes := map[string]string{
+ "bucketId": o.BucketName,
+ "eventTime": eventTime,
+ "eventType": string(eventType),
+ "objectGeneration": strconv.FormatInt(o.Generation, 10),
+ "objectId": o.Name,
+ "payloadFormat": "JSON_API_V1",
+ }
+ for k, v := range extraEventAttr {
+ if _, exists := attributes[k]; exists {
+ return nil, nil, fmt.Errorf("cannot overwrite duplicate event attribute %s", k)
+ }
+ attributes[k] = v
+ }
+ data, err := json.Marshal(&payload)
+ if err != nil {
+ return nil, nil, err
+ }
+ return data, attributes, nil
+}
diff --git a/vendor/github.com/google/btree/btree.go b/vendor/github.com/google/btree/btree.go
index 969b910d706d4..6f5184fef7d8e 100644
--- a/vendor/github.com/google/btree/btree.go
+++ b/vendor/github.com/google/btree/btree.go
@@ -479,7 +479,7 @@ func (n *node) growChildAndRemove(i int, item Item, minItems int, typ toRemove)
child := n.mutableChild(i)
// merge with right child
mergeItem := n.items.removeAt(i)
- mergeChild := n.children.removeAt(i + 1)
+ mergeChild := n.children.removeAt(i + 1).mutableFor(n.cow)
child.items = append(child.items, mergeItem)
child.items = append(child.items, mergeChild.items...)
child.children = append(child.children, mergeChild.children...)
diff --git a/vendor/github.com/gorilla/handlers/.editorconfig b/vendor/github.com/gorilla/handlers/.editorconfig
new file mode 100644
index 0000000000000..c6b74c3e0d0c7
--- /dev/null
+++ b/vendor/github.com/gorilla/handlers/.editorconfig
@@ -0,0 +1,20 @@
+; https://editorconfig.org/
+
+root = true
+
+[*]
+insert_final_newline = true
+charset = utf-8
+trim_trailing_whitespace = true
+indent_style = space
+indent_size = 2
+
+[{Makefile,go.mod,go.sum,*.go,.gitmodules}]
+indent_style = tab
+indent_size = 4
+
+[*.md]
+indent_size = 4
+trim_trailing_whitespace = false
+
+eclint_indent_style = unset
\ No newline at end of file
diff --git a/vendor/github.com/gorilla/handlers/.gitignore b/vendor/github.com/gorilla/handlers/.gitignore
new file mode 100644
index 0000000000000..577a89e813831
--- /dev/null
+++ b/vendor/github.com/gorilla/handlers/.gitignore
@@ -0,0 +1,2 @@
+# Output of the go test coverage tool
+coverage.coverprofile
diff --git a/vendor/github.com/gorilla/handlers/LICENSE b/vendor/github.com/gorilla/handlers/LICENSE
new file mode 100644
index 0000000000000..bb9d80bc9b6bc
--- /dev/null
+++ b/vendor/github.com/gorilla/handlers/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2023 The Gorilla Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/gorilla/handlers/Makefile b/vendor/github.com/gorilla/handlers/Makefile
new file mode 100644
index 0000000000000..003b784f7edbf
--- /dev/null
+++ b/vendor/github.com/gorilla/handlers/Makefile
@@ -0,0 +1,34 @@
+GO_LINT=$(shell which golangci-lint 2> /dev/null || echo '')
+GO_LINT_URI=github.com/golangci/golangci-lint/cmd/golangci-lint@latest
+
+GO_SEC=$(shell which gosec 2> /dev/null || echo '')
+GO_SEC_URI=github.com/securego/gosec/v2/cmd/gosec@latest
+
+GO_VULNCHECK=$(shell which govulncheck 2> /dev/null || echo '')
+GO_VULNCHECK_URI=golang.org/x/vuln/cmd/govulncheck@latest
+
+.PHONY: verify
+verify: sec govulncheck lint test
+
+.PHONY: lint
+lint:
+ $(if $(GO_LINT), ,go install $(GO_LINT_URI))
+ @echo "##### Running golangci-lint #####"
+ golangci-lint run -v
+
+.PHONY: sec
+sec:
+ $(if $(GO_SEC), ,go install $(GO_SEC_URI))
+ @echo "##### Running gosec #####"
+ gosec ./...
+
+.PHONY: govulncheck
+govulncheck:
+ $(if $(GO_VULNCHECK), ,go install $(GO_VULNCHECK_URI))
+ @echo "##### Running govulncheck #####"
+ govulncheck ./...
+
+.PHONY: test
+test:
+ @echo "##### Running tests #####"
+ go test -race -cover -coverprofile=coverage.coverprofile -covermode=atomic -v ./...
diff --git a/vendor/github.com/gorilla/handlers/README.md b/vendor/github.com/gorilla/handlers/README.md
new file mode 100644
index 0000000000000..02555b2642c5f
--- /dev/null
+++ b/vendor/github.com/gorilla/handlers/README.md
@@ -0,0 +1,56 @@
+# gorilla/handlers
+
+
+[](https://codecov.io/github/gorilla/handlers)
+[](https://godoc.org/github.com/gorilla/handlers)
+[](https://sourcegraph.com/github.com/gorilla/handlers?badge)
+
+Package handlers is a collection of handlers (aka "HTTP middleware") for use
+with Go's `net/http` package (or any framework supporting `http.Handler`), including:
+
+* [**LoggingHandler**](https://godoc.org/github.com/gorilla/handlers#LoggingHandler) for logging HTTP requests in the Apache [Common Log
+ Format](http://httpd.apache.org/docs/2.2/logs.html#common).
+* [**CombinedLoggingHandler**](https://godoc.org/github.com/gorilla/handlers#CombinedLoggingHandler) for logging HTTP requests in the Apache [Combined Log
+ Format](http://httpd.apache.org/docs/2.2/logs.html#combined) commonly used by
+ both Apache and nginx.
+* [**CompressHandler**](https://godoc.org/github.com/gorilla/handlers#CompressHandler) for gzipping responses.
+* [**ContentTypeHandler**](https://godoc.org/github.com/gorilla/handlers#ContentTypeHandler) for validating requests against a list of accepted
+ content types.
+* [**MethodHandler**](https://godoc.org/github.com/gorilla/handlers#MethodHandler) for matching HTTP methods against handlers in a
+ `map[string]http.Handler`
+* [**ProxyHeaders**](https://godoc.org/github.com/gorilla/handlers#ProxyHeaders) for populating `r.RemoteAddr` and `r.URL.Scheme` based on the
+ `X-Forwarded-For`, `X-Real-IP`, `X-Forwarded-Proto` and RFC7239 `Forwarded`
+ headers when running a Go server behind a HTTP reverse proxy.
+* [**CanonicalHost**](https://godoc.org/github.com/gorilla/handlers#CanonicalHost) for re-directing to the preferred host when handling multiple
+ domains (i.e. multiple CNAME aliases).
+* [**RecoveryHandler**](https://godoc.org/github.com/gorilla/handlers#RecoveryHandler) for recovering from unexpected panics.
+
+Other handlers are documented [on the Gorilla
+website](https://www.gorillatoolkit.org/pkg/handlers).
+
+## Example
+
+A simple example using `handlers.LoggingHandler` and `handlers.CompressHandler`:
+
+```go
+import (
+ "net/http"
+ "github.com/gorilla/handlers"
+)
+
+func main() {
+ r := http.NewServeMux()
+
+ // Only log requests to our admin dashboard to stdout
+ r.Handle("/admin", handlers.LoggingHandler(os.Stdout, http.HandlerFunc(ShowAdminDashboard)))
+ r.HandleFunc("/", ShowIndex)
+
+ // Wrap our server with our gzip handler to gzip compress all responses.
+ http.ListenAndServe(":8000", handlers.CompressHandler(r))
+}
+```
+
+## License
+
+BSD licensed. See the included LICENSE file for details.
+
diff --git a/vendor/github.com/gorilla/handlers/canonical.go b/vendor/github.com/gorilla/handlers/canonical.go
new file mode 100644
index 0000000000000..7121f5307bec9
--- /dev/null
+++ b/vendor/github.com/gorilla/handlers/canonical.go
@@ -0,0 +1,73 @@
+package handlers
+
+import (
+ "net/http"
+ "net/url"
+ "strings"
+)
+
+type canonical struct {
+ h http.Handler
+ domain string
+ code int
+}
+
+// CanonicalHost is HTTP middleware that re-directs requests to the canonical
+// domain. It accepts a domain and a status code (e.g. 301 or 302) and
+// re-directs clients to this domain. The existing request path is maintained.
+//
+// Note: If the provided domain is considered invalid by url.Parse or otherwise
+// returns an empty scheme or host, clients are not re-directed.
+//
+// Example:
+//
+// r := mux.NewRouter()
+// canonical := handlers.CanonicalHost("http://www.gorillatoolkit.org", 302)
+// r.HandleFunc("/route", YourHandler)
+//
+// log.Fatal(http.ListenAndServe(":7000", canonical(r)))
+func CanonicalHost(domain string, code int) func(h http.Handler) http.Handler {
+ fn := func(h http.Handler) http.Handler {
+ return canonical{h, domain, code}
+ }
+
+ return fn
+}
+
+func (c canonical) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ dest, err := url.Parse(c.domain)
+ if err != nil {
+ // Call the next handler if the provided domain fails to parse.
+ c.h.ServeHTTP(w, r)
+ return
+ }
+
+ if dest.Scheme == "" || dest.Host == "" {
+ // Call the next handler if the scheme or host are empty.
+ // Note that url.Parse won't fail on in this case.
+ c.h.ServeHTTP(w, r)
+ return
+ }
+
+ if !strings.EqualFold(cleanHost(r.Host), dest.Host) {
+ // Re-build the destination URL
+ dest := dest.Scheme + "://" + dest.Host + r.URL.Path
+ if r.URL.RawQuery != "" {
+ dest += "?" + r.URL.RawQuery
+ }
+ http.Redirect(w, r, dest, c.code)
+ return
+ }
+
+ c.h.ServeHTTP(w, r)
+}
+
+// cleanHost cleans invalid Host headers by stripping anything after '/' or ' '.
+// This is backported from Go 1.5 (in response to issue #11206) and attempts to
+// mitigate malformed Host headers that do not match the format in RFC7230.
+func cleanHost(in string) string {
+ if i := strings.IndexAny(in, " /"); i != -1 {
+ return in[:i]
+ }
+ return in
+}
diff --git a/vendor/github.com/gorilla/handlers/compress.go b/vendor/github.com/gorilla/handlers/compress.go
new file mode 100644
index 0000000000000..d6f589503b5ea
--- /dev/null
+++ b/vendor/github.com/gorilla/handlers/compress.go
@@ -0,0 +1,143 @@
+// Copyright 2013 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package handlers
+
+import (
+ "compress/flate"
+ "compress/gzip"
+ "io"
+ "net/http"
+ "strings"
+
+ "github.com/felixge/httpsnoop"
+)
+
+const acceptEncoding string = "Accept-Encoding"
+
+type compressResponseWriter struct {
+ compressor io.Writer
+ w http.ResponseWriter
+}
+
+func (cw *compressResponseWriter) WriteHeader(c int) {
+ cw.w.Header().Del("Content-Length")
+ cw.w.WriteHeader(c)
+}
+
+func (cw *compressResponseWriter) Write(b []byte) (int, error) {
+ h := cw.w.Header()
+ if h.Get("Content-Type") == "" {
+ h.Set("Content-Type", http.DetectContentType(b))
+ }
+ h.Del("Content-Length")
+
+ return cw.compressor.Write(b)
+}
+
+func (cw *compressResponseWriter) ReadFrom(r io.Reader) (int64, error) {
+ return io.Copy(cw.compressor, r)
+}
+
+type flusher interface {
+ Flush() error
+}
+
+func (cw *compressResponseWriter) Flush() {
+ // Flush compressed data if compressor supports it.
+ if f, ok := cw.compressor.(flusher); ok {
+ _ = f.Flush()
+ }
+ // Flush HTTP response.
+ if f, ok := cw.w.(http.Flusher); ok {
+ f.Flush()
+ }
+}
+
+// CompressHandler gzip compresses HTTP responses for clients that support it
+// via the 'Accept-Encoding' header.
+//
+// Compressing TLS traffic may leak the page contents to an attacker if the
+// page contains user input: http://security.stackexchange.com/a/102015/12208
+func CompressHandler(h http.Handler) http.Handler {
+ return CompressHandlerLevel(h, gzip.DefaultCompression)
+}
+
+// CompressHandlerLevel gzip compresses HTTP responses with specified compression level
+// for clients that support it via the 'Accept-Encoding' header.
+//
+// The compression level should be gzip.DefaultCompression, gzip.NoCompression,
+// or any integer value between gzip.BestSpeed and gzip.BestCompression inclusive.
+// gzip.DefaultCompression is used in case of invalid compression level.
+func CompressHandlerLevel(h http.Handler, level int) http.Handler {
+ if level < gzip.DefaultCompression || level > gzip.BestCompression {
+ level = gzip.DefaultCompression
+ }
+
+ const (
+ gzipEncoding = "gzip"
+ flateEncoding = "deflate"
+ )
+
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ // detect what encoding to use
+ var encoding string
+ for _, curEnc := range strings.Split(r.Header.Get(acceptEncoding), ",") {
+ curEnc = strings.TrimSpace(curEnc)
+ if curEnc == gzipEncoding || curEnc == flateEncoding {
+ encoding = curEnc
+ break
+ }
+ }
+
+ // always add Accept-Encoding to Vary to prevent intermediate caches corruption
+ w.Header().Add("Vary", acceptEncoding)
+
+ // if we weren't able to identify an encoding we're familiar with, pass on the
+ // request to the handler and return
+ if encoding == "" {
+ h.ServeHTTP(w, r)
+ return
+ }
+
+ if r.Header.Get("Upgrade") != "" {
+ h.ServeHTTP(w, r)
+ return
+ }
+
+ // wrap the ResponseWriter with the writer for the chosen encoding
+ var encWriter io.WriteCloser
+ if encoding == gzipEncoding {
+ encWriter, _ = gzip.NewWriterLevel(w, level)
+ } else if encoding == flateEncoding {
+ encWriter, _ = flate.NewWriter(w, level)
+ }
+ defer encWriter.Close()
+
+ w.Header().Set("Content-Encoding", encoding)
+ r.Header.Del(acceptEncoding)
+
+ cw := &compressResponseWriter{
+ w: w,
+ compressor: encWriter,
+ }
+
+ w = httpsnoop.Wrap(w, httpsnoop.Hooks{
+ Write: func(httpsnoop.WriteFunc) httpsnoop.WriteFunc {
+ return cw.Write
+ },
+ WriteHeader: func(httpsnoop.WriteHeaderFunc) httpsnoop.WriteHeaderFunc {
+ return cw.WriteHeader
+ },
+ Flush: func(httpsnoop.FlushFunc) httpsnoop.FlushFunc {
+ return cw.Flush
+ },
+ ReadFrom: func(rff httpsnoop.ReadFromFunc) httpsnoop.ReadFromFunc {
+ return cw.ReadFrom
+ },
+ })
+
+ h.ServeHTTP(w, r)
+ })
+}
diff --git a/vendor/github.com/gorilla/handlers/cors.go b/vendor/github.com/gorilla/handlers/cors.go
new file mode 100644
index 0000000000000..8af9c096e5e40
--- /dev/null
+++ b/vendor/github.com/gorilla/handlers/cors.go
@@ -0,0 +1,352 @@
+package handlers
+
+import (
+ "net/http"
+ "strconv"
+ "strings"
+)
+
+// CORSOption represents a functional option for configuring the CORS middleware.
+type CORSOption func(*cors) error
+
+type cors struct {
+ h http.Handler
+ allowedHeaders []string
+ allowedMethods []string
+ allowedOrigins []string
+ allowedOriginValidator OriginValidator
+ exposedHeaders []string
+ maxAge int
+ ignoreOptions bool
+ allowCredentials bool
+ optionStatusCode int
+}
+
+// OriginValidator takes an origin string and returns whether or not that origin is allowed.
+type OriginValidator func(string) bool
+
+var (
+ defaultCorsOptionStatusCode = http.StatusOK
+ defaultCorsMethods = []string{http.MethodGet, http.MethodHead, http.MethodPost}
+ defaultCorsHeaders = []string{"Accept", "Accept-Language", "Content-Language", "Origin"}
+ // (WebKit/Safari v9 sends the Origin header by default in AJAX requests).
+)
+
+const (
+ corsOptionMethod string = http.MethodOptions
+ corsAllowOriginHeader string = "Access-Control-Allow-Origin"
+ corsExposeHeadersHeader string = "Access-Control-Expose-Headers"
+ corsMaxAgeHeader string = "Access-Control-Max-Age"
+ corsAllowMethodsHeader string = "Access-Control-Allow-Methods"
+ corsAllowHeadersHeader string = "Access-Control-Allow-Headers"
+ corsAllowCredentialsHeader string = "Access-Control-Allow-Credentials"
+ corsRequestMethodHeader string = "Access-Control-Request-Method"
+ corsRequestHeadersHeader string = "Access-Control-Request-Headers"
+ corsOriginHeader string = "Origin"
+ corsVaryHeader string = "Vary"
+ corsOriginMatchAll string = "*"
+)
+
+func (ch *cors) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ origin := r.Header.Get(corsOriginHeader)
+ if !ch.isOriginAllowed(origin) {
+ if r.Method != corsOptionMethod || ch.ignoreOptions {
+ ch.h.ServeHTTP(w, r)
+ }
+
+ return
+ }
+
+ if r.Method == corsOptionMethod {
+ if ch.ignoreOptions {
+ ch.h.ServeHTTP(w, r)
+ return
+ }
+
+ if _, ok := r.Header[corsRequestMethodHeader]; !ok {
+ w.WriteHeader(http.StatusBadRequest)
+ return
+ }
+
+ method := r.Header.Get(corsRequestMethodHeader)
+ if !ch.isMatch(method, ch.allowedMethods) {
+ w.WriteHeader(http.StatusMethodNotAllowed)
+ return
+ }
+
+ requestHeaders := strings.Split(r.Header.Get(corsRequestHeadersHeader), ",")
+ allowedHeaders := []string{}
+ for _, v := range requestHeaders {
+ canonicalHeader := http.CanonicalHeaderKey(strings.TrimSpace(v))
+ if canonicalHeader == "" || ch.isMatch(canonicalHeader, defaultCorsHeaders) {
+ continue
+ }
+
+ if !ch.isMatch(canonicalHeader, ch.allowedHeaders) {
+ w.WriteHeader(http.StatusForbidden)
+ return
+ }
+
+ allowedHeaders = append(allowedHeaders, canonicalHeader)
+ }
+
+ if len(allowedHeaders) > 0 {
+ w.Header().Set(corsAllowHeadersHeader, strings.Join(allowedHeaders, ","))
+ }
+
+ if ch.maxAge > 0 {
+ w.Header().Set(corsMaxAgeHeader, strconv.Itoa(ch.maxAge))
+ }
+
+ if !ch.isMatch(method, defaultCorsMethods) {
+ w.Header().Set(corsAllowMethodsHeader, method)
+ }
+ } else if len(ch.exposedHeaders) > 0 {
+ w.Header().Set(corsExposeHeadersHeader, strings.Join(ch.exposedHeaders, ","))
+ }
+
+ if ch.allowCredentials {
+ w.Header().Set(corsAllowCredentialsHeader, "true")
+ }
+
+ if len(ch.allowedOrigins) > 1 {
+ w.Header().Set(corsVaryHeader, corsOriginHeader)
+ }
+
+ returnOrigin := origin
+ if ch.allowedOriginValidator == nil && len(ch.allowedOrigins) == 0 {
+ returnOrigin = "*"
+ } else {
+ for _, o := range ch.allowedOrigins {
+ // A configuration of * is different than explicitly setting an allowed
+ // origin. Returning arbitrary origin headers in an access control allow
+ // origin header is unsafe and is not required by any use case.
+ if o == corsOriginMatchAll {
+ returnOrigin = "*"
+ break
+ }
+ }
+ }
+ w.Header().Set(corsAllowOriginHeader, returnOrigin)
+
+ if r.Method == corsOptionMethod {
+ w.WriteHeader(ch.optionStatusCode)
+ return
+ }
+ ch.h.ServeHTTP(w, r)
+}
+
+// CORS provides Cross-Origin Resource Sharing middleware.
+// Example:
+//
+// import (
+// "net/http"
+//
+// "github.com/gorilla/handlers"
+// "github.com/gorilla/mux"
+// )
+//
+// func main() {
+// r := mux.NewRouter()
+// r.HandleFunc("/users", UserEndpoint)
+// r.HandleFunc("/projects", ProjectEndpoint)
+//
+// // Apply the CORS middleware to our top-level router, with the defaults.
+// http.ListenAndServe(":8000", handlers.CORS()(r))
+// }
+func CORS(opts ...CORSOption) func(http.Handler) http.Handler {
+ return func(h http.Handler) http.Handler {
+ ch := parseCORSOptions(opts...)
+ ch.h = h
+ return ch
+ }
+}
+
+func parseCORSOptions(opts ...CORSOption) *cors {
+ ch := &cors{
+ allowedMethods: defaultCorsMethods,
+ allowedHeaders: defaultCorsHeaders,
+ allowedOrigins: []string{},
+ optionStatusCode: defaultCorsOptionStatusCode,
+ }
+
+ for _, option := range opts {
+ _ = option(ch) //TODO: @bharat-rajani, return error to caller if not nil?
+ }
+
+ return ch
+}
+
+//
+// Functional options for configuring CORS.
+//
+
+// AllowedHeaders adds the provided headers to the list of allowed headers in a
+// CORS request.
+// This is an append operation so the headers Accept, Accept-Language,
+// and Content-Language are always allowed.
+// Content-Type must be explicitly declared if accepting Content-Types other than
+// application/x-www-form-urlencoded, multipart/form-data, or text/plain.
+func AllowedHeaders(headers []string) CORSOption {
+ return func(ch *cors) error {
+ for _, v := range headers {
+ normalizedHeader := http.CanonicalHeaderKey(strings.TrimSpace(v))
+ if normalizedHeader == "" {
+ continue
+ }
+
+ if !ch.isMatch(normalizedHeader, ch.allowedHeaders) {
+ ch.allowedHeaders = append(ch.allowedHeaders, normalizedHeader)
+ }
+ }
+
+ return nil
+ }
+}
+
+// AllowedMethods can be used to explicitly allow methods in the
+// Access-Control-Allow-Methods header.
+// This is a replacement operation so you must also
+// pass GET, HEAD, and POST if you wish to support those methods.
+func AllowedMethods(methods []string) CORSOption {
+ return func(ch *cors) error {
+ ch.allowedMethods = []string{}
+ for _, v := range methods {
+ normalizedMethod := strings.ToUpper(strings.TrimSpace(v))
+ if normalizedMethod == "" {
+ continue
+ }
+
+ if !ch.isMatch(normalizedMethod, ch.allowedMethods) {
+ ch.allowedMethods = append(ch.allowedMethods, normalizedMethod)
+ }
+ }
+
+ return nil
+ }
+}
+
+// AllowedOrigins sets the allowed origins for CORS requests, as used in the
+// 'Allow-Access-Control-Origin' HTTP header.
+// Note: Passing in a []string{"*"} will allow any domain.
+func AllowedOrigins(origins []string) CORSOption {
+ return func(ch *cors) error {
+ for _, v := range origins {
+ if v == corsOriginMatchAll {
+ ch.allowedOrigins = []string{corsOriginMatchAll}
+ return nil
+ }
+ }
+
+ ch.allowedOrigins = origins
+ return nil
+ }
+}
+
+// AllowedOriginValidator sets a function for evaluating allowed origins in CORS requests, represented by the
+// 'Allow-Access-Control-Origin' HTTP header.
+func AllowedOriginValidator(fn OriginValidator) CORSOption {
+ return func(ch *cors) error {
+ ch.allowedOriginValidator = fn
+ return nil
+ }
+}
+
+// OptionStatusCode sets a custom status code on the OPTIONS requests.
+// Default behaviour sets it to 200 to reflect best practices. This is option is not mandatory
+// and can be used if you need a custom status code (i.e 204).
+//
+// More informations on the spec:
+// https://fetch.spec.whatwg.org/#cors-preflight-fetch
+func OptionStatusCode(code int) CORSOption {
+ return func(ch *cors) error {
+ ch.optionStatusCode = code
+ return nil
+ }
+}
+
+// ExposedHeaders can be used to specify headers that are available
+// and will not be stripped out by the user-agent.
+func ExposedHeaders(headers []string) CORSOption {
+ return func(ch *cors) error {
+ ch.exposedHeaders = []string{}
+ for _, v := range headers {
+ normalizedHeader := http.CanonicalHeaderKey(strings.TrimSpace(v))
+ if normalizedHeader == "" {
+ continue
+ }
+
+ if !ch.isMatch(normalizedHeader, ch.exposedHeaders) {
+ ch.exposedHeaders = append(ch.exposedHeaders, normalizedHeader)
+ }
+ }
+
+ return nil
+ }
+}
+
+// MaxAge determines the maximum age (in seconds) between preflight requests. A
+// maximum of 10 minutes is allowed. An age above this value will default to 10
+// minutes.
+func MaxAge(age int) CORSOption {
+ return func(ch *cors) error {
+ // Maximum of 10 minutes.
+ if age > 600 {
+ age = 600
+ }
+
+ ch.maxAge = age
+ return nil
+ }
+}
+
+// IgnoreOptions causes the CORS middleware to ignore OPTIONS requests, instead
+// passing them through to the next handler. This is useful when your application
+// or framework has a pre-existing mechanism for responding to OPTIONS requests.
+func IgnoreOptions() CORSOption {
+ return func(ch *cors) error {
+ ch.ignoreOptions = true
+ return nil
+ }
+}
+
+// AllowCredentials can be used to specify that the user agent may pass
+// authentication details along with the request.
+func AllowCredentials() CORSOption {
+ return func(ch *cors) error {
+ ch.allowCredentials = true
+ return nil
+ }
+}
+
+func (ch *cors) isOriginAllowed(origin string) bool {
+ if origin == "" {
+ return false
+ }
+
+ if ch.allowedOriginValidator != nil {
+ return ch.allowedOriginValidator(origin)
+ }
+
+ if len(ch.allowedOrigins) == 0 {
+ return true
+ }
+
+ for _, allowedOrigin := range ch.allowedOrigins {
+ if allowedOrigin == origin || allowedOrigin == corsOriginMatchAll {
+ return true
+ }
+ }
+
+ return false
+}
+
+func (ch *cors) isMatch(needle string, haystack []string) bool {
+ for _, v := range haystack {
+ if v == needle {
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/vendor/github.com/gorilla/handlers/doc.go b/vendor/github.com/gorilla/handlers/doc.go
new file mode 100644
index 0000000000000..944e5a8ae9982
--- /dev/null
+++ b/vendor/github.com/gorilla/handlers/doc.go
@@ -0,0 +1,9 @@
+/*
+Package handlers is a collection of handlers (aka "HTTP middleware") for use
+with Go's net/http package (or any framework supporting http.Handler).
+
+The package includes handlers for logging in standardised formats, compressing
+HTTP responses, validating content types and other useful tools for manipulating
+requests and responses.
+*/
+package handlers
diff --git a/vendor/github.com/gorilla/handlers/handlers.go b/vendor/github.com/gorilla/handlers/handlers.go
new file mode 100644
index 0000000000000..9b92fce3333e7
--- /dev/null
+++ b/vendor/github.com/gorilla/handlers/handlers.go
@@ -0,0 +1,150 @@
+// Copyright 2013 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package handlers
+
+import (
+ "bufio"
+ "fmt"
+ "net"
+ "net/http"
+ "sort"
+ "strings"
+)
+
+// MethodHandler is an http.Handler that dispatches to a handler whose key in the
+// MethodHandler's map matches the name of the HTTP request's method, eg: GET
+//
+// If the request's method is OPTIONS and OPTIONS is not a key in the map then
+// the handler responds with a status of 200 and sets the Allow header to a
+// comma-separated list of available methods.
+//
+// If the request's method doesn't match any of its keys the handler responds
+// with a status of HTTP 405 "Method Not Allowed" and sets the Allow header to a
+// comma-separated list of available methods.
+type MethodHandler map[string]http.Handler
+
+func (h MethodHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ if handler, ok := h[req.Method]; ok {
+ handler.ServeHTTP(w, req)
+ } else {
+ allow := []string{}
+ for k := range h {
+ allow = append(allow, k)
+ }
+ sort.Strings(allow)
+ w.Header().Set("Allow", strings.Join(allow, ", "))
+ if req.Method == http.MethodOptions {
+ w.WriteHeader(http.StatusOK)
+ } else {
+ http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
+ }
+ }
+}
+
+// responseLogger is wrapper of http.ResponseWriter that keeps track of its HTTP
+// status code and body size.
+type responseLogger struct {
+ w http.ResponseWriter
+ status int
+ size int
+}
+
+func (l *responseLogger) Write(b []byte) (int, error) {
+ size, err := l.w.Write(b)
+ l.size += size
+ return size, err
+}
+
+func (l *responseLogger) WriteHeader(s int) {
+ l.w.WriteHeader(s)
+ l.status = s
+}
+
+func (l *responseLogger) Status() int {
+ return l.status
+}
+
+func (l *responseLogger) Size() int {
+ return l.size
+}
+
+func (l *responseLogger) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+ conn, rw, err := l.w.(http.Hijacker).Hijack()
+ if err == nil && l.status == 0 {
+ // The status will be StatusSwitchingProtocols if there was no error and
+ // WriteHeader has not been called yet
+ l.status = http.StatusSwitchingProtocols
+ }
+ return conn, rw, err
+}
+
+// isContentType validates the Content-Type header matches the supplied
+// contentType. That is, its type and subtype match.
+func isContentType(h http.Header, contentType string) bool {
+ ct := h.Get("Content-Type")
+ if i := strings.IndexRune(ct, ';'); i != -1 {
+ ct = ct[0:i]
+ }
+ return ct == contentType
+}
+
+// ContentTypeHandler wraps and returns a http.Handler, validating the request
+// content type is compatible with the contentTypes list. It writes a HTTP 415
+// error if that fails.
+//
+// Only PUT, POST, and PATCH requests are considered.
+func ContentTypeHandler(h http.Handler, contentTypes ...string) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if !(r.Method == http.MethodPut || r.Method == http.MethodPost || r.Method == http.MethodPatch) {
+ h.ServeHTTP(w, r)
+ return
+ }
+
+ for _, ct := range contentTypes {
+ if isContentType(r.Header, ct) {
+ h.ServeHTTP(w, r)
+ return
+ }
+ }
+ http.Error(w, fmt.Sprintf("Unsupported content type %q; expected one of %q",
+ r.Header.Get("Content-Type"),
+ contentTypes),
+ http.StatusUnsupportedMediaType)
+ })
+}
+
+const (
+ // HTTPMethodOverrideHeader is a commonly used
+ // http header to override a request method.
+ HTTPMethodOverrideHeader = "X-HTTP-Method-Override"
+ // HTTPMethodOverrideFormKey is a commonly used
+ // HTML form key to override a request method.
+ HTTPMethodOverrideFormKey = "_method"
+)
+
+// HTTPMethodOverrideHandler wraps and returns a http.Handler which checks for
+// the X-HTTP-Method-Override header or the _method form key, and overrides (if
+// valid) request.Method with its value.
+//
+// This is especially useful for HTTP clients that don't support many http verbs.
+// It isn't secure to override e.g a GET to a POST, so only POST requests are
+// considered. Likewise, the override method can only be a "write" method: PUT,
+// PATCH or DELETE.
+//
+// Form method takes precedence over header method.
+func HTTPMethodOverrideHandler(h http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.Method == http.MethodPost {
+ om := r.FormValue(HTTPMethodOverrideFormKey)
+ if om == "" {
+ om = r.Header.Get(HTTPMethodOverrideHeader)
+ }
+ if om == http.MethodPut || om == http.MethodPatch || om == http.MethodDelete {
+ r.Method = om
+ }
+ }
+ h.ServeHTTP(w, r)
+ })
+}
diff --git a/vendor/github.com/gorilla/handlers/logging.go b/vendor/github.com/gorilla/handlers/logging.go
new file mode 100644
index 0000000000000..2badb6fbff844
--- /dev/null
+++ b/vendor/github.com/gorilla/handlers/logging.go
@@ -0,0 +1,246 @@
+// Copyright 2013 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package handlers
+
+import (
+ "io"
+ "net"
+ "net/http"
+ "net/url"
+ "strconv"
+ "time"
+ "unicode/utf8"
+
+ "github.com/felixge/httpsnoop"
+)
+
+// Logging
+
+// LogFormatterParams is the structure any formatter will be handed when time to log comes.
+type LogFormatterParams struct {
+ Request *http.Request
+ URL url.URL
+ TimeStamp time.Time
+ StatusCode int
+ Size int
+}
+
+// LogFormatter gives the signature of the formatter function passed to CustomLoggingHandler.
+type LogFormatter func(writer io.Writer, params LogFormatterParams)
+
+// loggingHandler is the http.Handler implementation for LoggingHandlerTo and its
+// friends
+
+type loggingHandler struct {
+ writer io.Writer
+ handler http.Handler
+ formatter LogFormatter
+}
+
+func (h loggingHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ t := time.Now()
+ logger, w := makeLogger(w)
+ url := *req.URL
+
+ h.handler.ServeHTTP(w, req)
+ if req.MultipartForm != nil {
+ err := req.MultipartForm.RemoveAll()
+ if err != nil {
+ return
+ }
+ }
+
+ params := LogFormatterParams{
+ Request: req,
+ URL: url,
+ TimeStamp: t,
+ StatusCode: logger.Status(),
+ Size: logger.Size(),
+ }
+
+ h.formatter(h.writer, params)
+}
+
+func makeLogger(w http.ResponseWriter) (*responseLogger, http.ResponseWriter) {
+ logger := &responseLogger{w: w, status: http.StatusOK}
+ return logger, httpsnoop.Wrap(w, httpsnoop.Hooks{
+ Write: func(httpsnoop.WriteFunc) httpsnoop.WriteFunc {
+ return logger.Write
+ },
+ WriteHeader: func(httpsnoop.WriteHeaderFunc) httpsnoop.WriteHeaderFunc {
+ return logger.WriteHeader
+ },
+ })
+}
+
+const lowerhex = "0123456789abcdef"
+
+func appendQuoted(buf []byte, s string) []byte {
+ var runeTmp [utf8.UTFMax]byte
+ for width := 0; len(s) > 0; s = s[width:] { //nolint: wastedassign //TODO: why width starts from 0and reassigned as 1
+ r := rune(s[0])
+ width = 1
+ if r >= utf8.RuneSelf {
+ r, width = utf8.DecodeRuneInString(s)
+ }
+ if width == 1 && r == utf8.RuneError {
+ buf = append(buf, `\x`...)
+ buf = append(buf, lowerhex[s[0]>>4])
+ buf = append(buf, lowerhex[s[0]&0xF])
+ continue
+ }
+ if r == rune('"') || r == '\\' { // always backslashed
+ buf = append(buf, '\\')
+ buf = append(buf, byte(r))
+ continue
+ }
+ if strconv.IsPrint(r) {
+ n := utf8.EncodeRune(runeTmp[:], r)
+ buf = append(buf, runeTmp[:n]...)
+ continue
+ }
+ switch r {
+ case '\a':
+ buf = append(buf, `\a`...)
+ case '\b':
+ buf = append(buf, `\b`...)
+ case '\f':
+ buf = append(buf, `\f`...)
+ case '\n':
+ buf = append(buf, `\n`...)
+ case '\r':
+ buf = append(buf, `\r`...)
+ case '\t':
+ buf = append(buf, `\t`...)
+ case '\v':
+ buf = append(buf, `\v`...)
+ default:
+ switch {
+ case r < ' ':
+ buf = append(buf, `\x`...)
+ buf = append(buf, lowerhex[s[0]>>4])
+ buf = append(buf, lowerhex[s[0]&0xF])
+ case r > utf8.MaxRune:
+ r = 0xFFFD
+ fallthrough
+ case r < 0x10000:
+ buf = append(buf, `\u`...)
+ for s := 12; s >= 0; s -= 4 {
+ buf = append(buf, lowerhex[r>>uint(s)&0xF])
+ }
+ default:
+ buf = append(buf, `\U`...)
+ for s := 28; s >= 0; s -= 4 {
+ buf = append(buf, lowerhex[r>>uint(s)&0xF])
+ }
+ }
+ }
+ }
+ return buf
+}
+
+// buildCommonLogLine builds a log entry for req in Apache Common Log Format.
+// ts is the timestamp with which the entry should be logged.
+// status and size are used to provide the response HTTP status and size.
+func buildCommonLogLine(req *http.Request, url url.URL, ts time.Time, status int, size int) []byte {
+ username := "-"
+ if url.User != nil {
+ if name := url.User.Username(); name != "" {
+ username = name
+ }
+ }
+
+ host, _, err := net.SplitHostPort(req.RemoteAddr)
+ if err != nil {
+ host = req.RemoteAddr
+ }
+
+ uri := req.RequestURI
+
+ // Requests using the CONNECT method over HTTP/2.0 must use
+ // the authority field (aka r.Host) to identify the target.
+ // Refer: https://httpwg.github.io/specs/rfc7540.html#CONNECT
+ if req.ProtoMajor == 2 && req.Method == "CONNECT" {
+ uri = req.Host
+ }
+ if uri == "" {
+ uri = url.RequestURI()
+ }
+
+ buf := make([]byte, 0, 3*(len(host)+len(username)+len(req.Method)+len(uri)+len(req.Proto)+50)/2)
+ buf = append(buf, host...)
+ buf = append(buf, " - "...)
+ buf = append(buf, username...)
+ buf = append(buf, " ["...)
+ buf = append(buf, ts.Format("02/Jan/2006:15:04:05 -0700")...)
+ buf = append(buf, `] "`...)
+ buf = append(buf, req.Method...)
+ buf = append(buf, " "...)
+ buf = appendQuoted(buf, uri)
+ buf = append(buf, " "...)
+ buf = append(buf, req.Proto...)
+ buf = append(buf, `" `...)
+ buf = append(buf, strconv.Itoa(status)...)
+ buf = append(buf, " "...)
+ buf = append(buf, strconv.Itoa(size)...)
+ return buf
+}
+
+// writeLog writes a log entry for req to w in Apache Common Log Format.
+// ts is the timestamp with which the entry should be logged.
+// status and size are used to provide the response HTTP status and size.
+func writeLog(writer io.Writer, params LogFormatterParams) {
+ buf := buildCommonLogLine(params.Request, params.URL, params.TimeStamp, params.StatusCode, params.Size)
+ buf = append(buf, '\n')
+ _, _ = writer.Write(buf)
+}
+
+// writeCombinedLog writes a log entry for req to w in Apache Combined Log Format.
+// ts is the timestamp with which the entry should be logged.
+// status and size are used to provide the response HTTP status and size.
+func writeCombinedLog(writer io.Writer, params LogFormatterParams) {
+ buf := buildCommonLogLine(params.Request, params.URL, params.TimeStamp, params.StatusCode, params.Size)
+ buf = append(buf, ` "`...)
+ buf = appendQuoted(buf, params.Request.Referer())
+ buf = append(buf, `" "`...)
+ buf = appendQuoted(buf, params.Request.UserAgent())
+ buf = append(buf, '"', '\n')
+ _, _ = writer.Write(buf)
+}
+
+// CombinedLoggingHandler return a http.Handler that wraps h and logs requests to out in
+// Apache Combined Log Format.
+//
+// See http://httpd.apache.org/docs/2.2/logs.html#combined for a description of this format.
+//
+// LoggingHandler always sets the ident field of the log to -.
+func CombinedLoggingHandler(out io.Writer, h http.Handler) http.Handler {
+ return loggingHandler{out, h, writeCombinedLog}
+}
+
+// LoggingHandler return a http.Handler that wraps h and logs requests to out in
+// Apache Common Log Format (CLF).
+//
+// See http://httpd.apache.org/docs/2.2/logs.html#common for a description of this format.
+//
+// LoggingHandler always sets the ident field of the log to -
+//
+// Example:
+//
+// r := mux.NewRouter()
+// r.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+// w.Write([]byte("This is a catch-all route"))
+// })
+// loggedRouter := handlers.LoggingHandler(os.Stdout, r)
+// http.ListenAndServe(":1123", loggedRouter)
+func LoggingHandler(out io.Writer, h http.Handler) http.Handler {
+ return loggingHandler{out, h, writeLog}
+}
+
+// CustomLoggingHandler provides a way to supply a custom log formatter
+// while taking advantage of the mechanisms in this package.
+func CustomLoggingHandler(out io.Writer, h http.Handler, f LogFormatter) http.Handler {
+ return loggingHandler{out, h, f}
+}
diff --git a/vendor/github.com/gorilla/handlers/proxy_headers.go b/vendor/github.com/gorilla/handlers/proxy_headers.go
new file mode 100644
index 0000000000000..281d753e95a28
--- /dev/null
+++ b/vendor/github.com/gorilla/handlers/proxy_headers.go
@@ -0,0 +1,120 @@
+package handlers
+
+import (
+ "net/http"
+ "regexp"
+ "strings"
+)
+
+var (
+ // De-facto standard header keys.
+ xForwardedFor = http.CanonicalHeaderKey("X-Forwarded-For")
+ xForwardedHost = http.CanonicalHeaderKey("X-Forwarded-Host")
+ xForwardedProto = http.CanonicalHeaderKey("X-Forwarded-Proto")
+ xForwardedScheme = http.CanonicalHeaderKey("X-Forwarded-Scheme")
+ xRealIP = http.CanonicalHeaderKey("X-Real-IP")
+)
+
+var (
+ // RFC7239 defines a new "Forwarded: " header designed to replace the
+ // existing use of X-Forwarded-* headers.
+ // e.g. Forwarded: for=192.0.2.60;proto=https;by=203.0.113.43.
+ forwarded = http.CanonicalHeaderKey("Forwarded")
+ // Allows for a sub-match of the first value after 'for=' to the next
+ // comma, semi-colon or space. The match is case-insensitive.
+ forRegex = regexp.MustCompile(`(?i)(?:for=)([^(;|,| )]+)`)
+ // Allows for a sub-match for the first instance of scheme (http|https)
+ // prefixed by 'proto='. The match is case-insensitive.
+ protoRegex = regexp.MustCompile(`(?i)(?:proto=)(https|http)`)
+)
+
+// ProxyHeaders inspects common reverse proxy headers and sets the corresponding
+// fields in the HTTP request struct. These are X-Forwarded-For and X-Real-IP
+// for the remote (client) IP address, X-Forwarded-Proto or X-Forwarded-Scheme
+// for the scheme (http|https), X-Forwarded-Host for the host and the RFC7239
+// Forwarded header, which may include both client IPs and schemes.
+//
+// NOTE: This middleware should only be used when behind a reverse
+// proxy like nginx, HAProxy or Apache. Reverse proxies that don't (or are
+// configured not to) strip these headers from client requests, or where these
+// headers are accepted "as is" from a remote client (e.g. when Go is not behind
+// a proxy), can manifest as a vulnerability if your application uses these
+// headers for validating the 'trustworthiness' of a request.
+func ProxyHeaders(h http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ // Set the remote IP with the value passed from the proxy.
+ if fwd := getIP(r); fwd != "" {
+ r.RemoteAddr = fwd
+ }
+
+ // Set the scheme (proto) with the value passed from the proxy.
+ if scheme := getScheme(r); scheme != "" {
+ r.URL.Scheme = scheme
+ }
+ // Set the host with the value passed by the proxy
+ if r.Header.Get(xForwardedHost) != "" {
+ r.Host = r.Header.Get(xForwardedHost)
+ }
+ // Call the next handler in the chain.
+ h.ServeHTTP(w, r)
+ }
+
+ return http.HandlerFunc(fn)
+}
+
+// getIP retrieves the IP from the X-Forwarded-For, X-Real-IP and RFC7239
+// Forwarded headers (in that order).
+func getIP(r *http.Request) string {
+ var addr string
+
+ switch {
+ case r.Header.Get(xForwardedFor) != "":
+ fwd := r.Header.Get(xForwardedFor)
+ // Only grab the first (client) address. Note that '192.168.0.1,
+ // 10.1.1.1' is a valid key for X-Forwarded-For where addresses after
+ // the first may represent forwarding proxies earlier in the chain.
+ s := strings.Index(fwd, ", ")
+ if s == -1 {
+ s = len(fwd)
+ }
+ addr = fwd[:s]
+ case r.Header.Get(xRealIP) != "":
+ addr = r.Header.Get(xRealIP)
+ case r.Header.Get(forwarded) != "":
+ // match should contain at least two elements if the protocol was
+ // specified in the Forwarded header. The first element will always be
+ // the 'for=' capture, which we ignore. In the case of multiple IP
+ // addresses (for=8.8.8.8, 8.8.4.4,172.16.1.20 is valid) we only
+ // extract the first, which should be the client IP.
+ if match := forRegex.FindStringSubmatch(r.Header.Get(forwarded)); len(match) > 1 {
+ // IPv6 addresses in Forwarded headers are quoted-strings. We strip
+ // these quotes.
+ addr = strings.Trim(match[1], `"`)
+ }
+ }
+
+ return addr
+}
+
+// getScheme retrieves the scheme from the X-Forwarded-Proto and RFC7239
+// Forwarded headers (in that order).
+func getScheme(r *http.Request) string {
+ var scheme string
+
+ // Retrieve the scheme from X-Forwarded-Proto.
+ if proto := r.Header.Get(xForwardedProto); proto != "" {
+ scheme = strings.ToLower(proto)
+ } else if proto = r.Header.Get(xForwardedScheme); proto != "" {
+ scheme = strings.ToLower(proto)
+ } else if proto = r.Header.Get(forwarded); proto != "" {
+ // match should contain at least two elements if the protocol was
+ // specified in the Forwarded header. The first element will always be
+ // the 'proto=' capture, which we ignore. In the case of multiple proto
+ // parameters (invalid) we only extract the first.
+ if match := protoRegex.FindStringSubmatch(proto); len(match) > 1 {
+ scheme = strings.ToLower(match[1])
+ }
+ }
+
+ return scheme
+}
diff --git a/vendor/github.com/gorilla/handlers/recovery.go b/vendor/github.com/gorilla/handlers/recovery.go
new file mode 100644
index 0000000000000..0d4f955ecbda0
--- /dev/null
+++ b/vendor/github.com/gorilla/handlers/recovery.go
@@ -0,0 +1,98 @@
+package handlers
+
+import (
+ "log"
+ "net/http"
+ "runtime/debug"
+)
+
+// RecoveryHandlerLogger is an interface used by the recovering handler to print logs.
+type RecoveryHandlerLogger interface {
+ Println(...interface{})
+}
+
+type recoveryHandler struct {
+ handler http.Handler
+ logger RecoveryHandlerLogger
+ printStack bool
+}
+
+// RecoveryOption provides a functional approach to define
+// configuration for a handler; such as setting the logging
+// whether or not to print stack traces on panic.
+type RecoveryOption func(http.Handler)
+
+func parseRecoveryOptions(h http.Handler, opts ...RecoveryOption) http.Handler {
+ for _, option := range opts {
+ option(h)
+ }
+
+ return h
+}
+
+// RecoveryHandler is HTTP middleware that recovers from a panic,
+// logs the panic, writes http.StatusInternalServerError, and
+// continues to the next handler.
+//
+// Example:
+//
+// r := mux.NewRouter()
+// r.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+// panic("Unexpected error!")
+// })
+//
+// http.ListenAndServe(":1123", handlers.RecoveryHandler()(r))
+func RecoveryHandler(opts ...RecoveryOption) func(h http.Handler) http.Handler {
+ return func(h http.Handler) http.Handler {
+ r := &recoveryHandler{handler: h}
+ return parseRecoveryOptions(r, opts...)
+ }
+}
+
+// RecoveryLogger is a functional option to override
+// the default logger.
+func RecoveryLogger(logger RecoveryHandlerLogger) RecoveryOption {
+ return func(h http.Handler) {
+ r := h.(*recoveryHandler) //nolint:errcheck //TODO:
+ // @bharat-rajani should return type-assertion error but would break the API?
+ r.logger = logger
+ }
+}
+
+// PrintRecoveryStack is a functional option to enable
+// or disable printing stack traces on panic.
+func PrintRecoveryStack(shouldPrint bool) RecoveryOption {
+ return func(h http.Handler) {
+ r := h.(*recoveryHandler) //nolint:errcheck //TODO:
+ // @bharat-rajani should return type-assertion error but would break the API?
+ r.printStack = shouldPrint
+ }
+}
+
+func (h recoveryHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ defer func() {
+ if err := recover(); err != nil {
+ w.WriteHeader(http.StatusInternalServerError)
+ h.log(err)
+ }
+ }()
+
+ h.handler.ServeHTTP(w, req)
+}
+
+func (h recoveryHandler) log(v ...interface{}) {
+ if h.logger != nil {
+ h.logger.Println(v...)
+ } else {
+ log.Println(v...)
+ }
+
+ if h.printStack {
+ stack := string(debug.Stack())
+ if h.logger != nil {
+ h.logger.Println(stack)
+ } else {
+ log.Println(stack)
+ }
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml
index a22953805c633..4528059ca6815 100644
--- a/vendor/github.com/klauspost/compress/.goreleaser.yml
+++ b/vendor/github.com/klauspost/compress/.goreleaser.yml
@@ -1,5 +1,5 @@
-# This is an example goreleaser.yaml file with some sane defaults.
-# Make sure to check the documentation at http://goreleaser.com
+version: 2
+
before:
hooks:
- ./gen.sh
@@ -99,7 +99,7 @@ archives:
checksum:
name_template: 'checksums.txt'
snapshot:
- name_template: "{{ .Tag }}-next"
+ version_template: "{{ .Tag }}-next"
changelog:
sort: asc
filters:
diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md
index 684a30853ab64..de264c85a5ad4 100644
--- a/vendor/github.com/klauspost/compress/README.md
+++ b/vendor/github.com/klauspost/compress/README.md
@@ -16,6 +16,13 @@ This package provides various compression algorithms.
# changelog
+* Sep 23rd, 2024 - [1.17.10](https://github.com/klauspost/compress/releases/tag/v1.17.10)
+ * gzhttp: Add TransportAlwaysDecompress option. https://github.com/klauspost/compress/pull/978
+ * gzhttp: Add supported decompress request body by @mirecl in https://github.com/klauspost/compress/pull/1002
+ * s2: Add EncodeBuffer buffer recycling callback https://github.com/klauspost/compress/pull/982
+ * zstd: Improve memory usage on small streaming encodes https://github.com/klauspost/compress/pull/1007
+ * flate: read data written with partial flush by @vajexal in https://github.com/klauspost/compress/pull/996
+
* Jun 12th, 2024 - [1.17.9](https://github.com/klauspost/compress/releases/tag/v1.17.9)
* s2: Reduce ReadFrom temporary allocations https://github.com/klauspost/compress/pull/949
* flate, zstd: Shave some bytes off amd64 matchLen by @greatroar in https://github.com/klauspost/compress/pull/963
diff --git a/vendor/github.com/klauspost/compress/s2/encode.go b/vendor/github.com/klauspost/compress/s2/encode.go
index 0c9088adfee0c..20b802270a717 100644
--- a/vendor/github.com/klauspost/compress/s2/encode.go
+++ b/vendor/github.com/klauspost/compress/s2/encode.go
@@ -9,6 +9,9 @@ import (
"encoding/binary"
"math"
"math/bits"
+ "sync"
+
+ "github.com/klauspost/compress/internal/race"
)
// Encode returns the encoded form of src. The returned slice may be a sub-
@@ -52,6 +55,8 @@ func Encode(dst, src []byte) []byte {
return dst[:d]
}
+var estblockPool [2]sync.Pool
+
// EstimateBlockSize will perform a very fast compression
// without outputting the result and return the compressed output size.
// The function returns -1 if no improvement could be achieved.
@@ -61,9 +66,25 @@ func EstimateBlockSize(src []byte) (d int) {
return -1
}
if len(src) <= 1024 {
- d = calcBlockSizeSmall(src)
+ const sz, pool = 2048, 0
+ tmp, ok := estblockPool[pool].Get().(*[sz]byte)
+ if !ok {
+ tmp = &[sz]byte{}
+ }
+ race.WriteSlice(tmp[:])
+ defer estblockPool[pool].Put(tmp)
+
+ d = calcBlockSizeSmall(src, tmp)
} else {
- d = calcBlockSize(src)
+ const sz, pool = 32768, 1
+ tmp, ok := estblockPool[pool].Get().(*[sz]byte)
+ if !ok {
+ tmp = &[sz]byte{}
+ }
+ race.WriteSlice(tmp[:])
+ defer estblockPool[pool].Put(tmp)
+
+ d = calcBlockSize(src, tmp)
}
if d == 0 {
diff --git a/vendor/github.com/klauspost/compress/s2/encode_amd64.go b/vendor/github.com/klauspost/compress/s2/encode_amd64.go
index 4f45206a4ef49..7aadd255fe3b9 100644
--- a/vendor/github.com/klauspost/compress/s2/encode_amd64.go
+++ b/vendor/github.com/klauspost/compress/s2/encode_amd64.go
@@ -3,10 +3,16 @@
package s2
-import "github.com/klauspost/compress/internal/race"
+import (
+ "sync"
+
+ "github.com/klauspost/compress/internal/race"
+)
const hasAmd64Asm = true
+var encPools [4]sync.Pool
+
// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
// assumes that the varint-encoded length of the decompressed bytes has already
// been written.
@@ -29,23 +35,60 @@ func encodeBlock(dst, src []byte) (d int) {
)
if len(src) >= 4<<20 {
- return encodeBlockAsm(dst, src)
+ const sz, pool = 65536, 0
+ tmp, ok := encPools[pool].Get().(*[sz]byte)
+ if !ok {
+ tmp = &[sz]byte{}
+ }
+ race.WriteSlice(tmp[:])
+ defer encPools[pool].Put(tmp)
+ return encodeBlockAsm(dst, src, tmp)
}
if len(src) >= limit12B {
- return encodeBlockAsm4MB(dst, src)
+ const sz, pool = 65536, 0
+ tmp, ok := encPools[pool].Get().(*[sz]byte)
+ if !ok {
+ tmp = &[sz]byte{}
+ }
+ race.WriteSlice(tmp[:])
+ defer encPools[pool].Put(tmp)
+ return encodeBlockAsm4MB(dst, src, tmp)
}
if len(src) >= limit10B {
- return encodeBlockAsm12B(dst, src)
+ const sz, pool = 16384, 1
+ tmp, ok := encPools[pool].Get().(*[sz]byte)
+ if !ok {
+ tmp = &[sz]byte{}
+ }
+ race.WriteSlice(tmp[:])
+ defer encPools[pool].Put(tmp)
+ return encodeBlockAsm12B(dst, src, tmp)
}
if len(src) >= limit8B {
- return encodeBlockAsm10B(dst, src)
+ const sz, pool = 4096, 2
+ tmp, ok := encPools[pool].Get().(*[sz]byte)
+ if !ok {
+ tmp = &[sz]byte{}
+ }
+ race.WriteSlice(tmp[:])
+ defer encPools[pool].Put(tmp)
+ return encodeBlockAsm10B(dst, src, tmp)
}
if len(src) < minNonLiteralBlockSize {
return 0
}
- return encodeBlockAsm8B(dst, src)
+ const sz, pool = 1024, 3
+ tmp, ok := encPools[pool].Get().(*[sz]byte)
+ if !ok {
+ tmp = &[sz]byte{}
+ }
+ race.WriteSlice(tmp[:])
+ defer encPools[pool].Put(tmp)
+ return encodeBlockAsm8B(dst, src, tmp)
}
+var encBetterPools [5]sync.Pool
+
// encodeBlockBetter encodes a non-empty src to a guaranteed-large-enough dst. It
// assumes that the varint-encoded length of the decompressed bytes has already
// been written.
@@ -68,21 +111,59 @@ func encodeBlockBetter(dst, src []byte) (d int) {
)
if len(src) > 4<<20 {
- return encodeBetterBlockAsm(dst, src)
+ const sz, pool = 589824, 0
+ tmp, ok := encBetterPools[pool].Get().(*[sz]byte)
+ if !ok {
+ tmp = &[sz]byte{}
+ }
+ race.WriteSlice(tmp[:])
+ defer encBetterPools[pool].Put(tmp)
+ return encodeBetterBlockAsm(dst, src, tmp)
}
if len(src) >= limit12B {
- return encodeBetterBlockAsm4MB(dst, src)
+ const sz, pool = 589824, 0
+ tmp, ok := encBetterPools[pool].Get().(*[sz]byte)
+ if !ok {
+ tmp = &[sz]byte{}
+ }
+ race.WriteSlice(tmp[:])
+ defer encBetterPools[pool].Put(tmp)
+
+ return encodeBetterBlockAsm4MB(dst, src, tmp)
}
if len(src) >= limit10B {
- return encodeBetterBlockAsm12B(dst, src)
+ const sz, pool = 81920, 0
+ tmp, ok := encBetterPools[pool].Get().(*[sz]byte)
+ if !ok {
+ tmp = &[sz]byte{}
+ }
+ race.WriteSlice(tmp[:])
+ defer encBetterPools[pool].Put(tmp)
+
+ return encodeBetterBlockAsm12B(dst, src, tmp)
}
if len(src) >= limit8B {
- return encodeBetterBlockAsm10B(dst, src)
+ const sz, pool = 20480, 1
+ tmp, ok := encBetterPools[pool].Get().(*[sz]byte)
+ if !ok {
+ tmp = &[sz]byte{}
+ }
+ race.WriteSlice(tmp[:])
+ defer encBetterPools[pool].Put(tmp)
+ return encodeBetterBlockAsm10B(dst, src, tmp)
}
if len(src) < minNonLiteralBlockSize {
return 0
}
- return encodeBetterBlockAsm8B(dst, src)
+
+ const sz, pool = 5120, 2
+ tmp, ok := encBetterPools[pool].Get().(*[sz]byte)
+ if !ok {
+ tmp = &[sz]byte{}
+ }
+ race.WriteSlice(tmp[:])
+ defer encBetterPools[pool].Put(tmp)
+ return encodeBetterBlockAsm8B(dst, src, tmp)
}
// encodeBlockSnappy encodes a non-empty src to a guaranteed-large-enough dst. It
@@ -105,22 +186,57 @@ func encodeBlockSnappy(dst, src []byte) (d int) {
// Use 8 bit table when less than...
limit8B = 512
)
- if len(src) >= 64<<10 {
- return encodeSnappyBlockAsm(dst, src)
+ if len(src) > 65536 {
+ const sz, pool = 65536, 0
+ tmp, ok := encPools[pool].Get().(*[sz]byte)
+ if !ok {
+ tmp = &[sz]byte{}
+ }
+ race.WriteSlice(tmp[:])
+ defer encPools[pool].Put(tmp)
+ return encodeSnappyBlockAsm(dst, src, tmp)
}
if len(src) >= limit12B {
- return encodeSnappyBlockAsm64K(dst, src)
+ const sz, pool = 65536, 0
+ tmp, ok := encPools[pool].Get().(*[sz]byte)
+ if !ok {
+ tmp = &[sz]byte{}
+ }
+ race.WriteSlice(tmp[:])
+ defer encPools[pool].Put(tmp)
+ return encodeSnappyBlockAsm64K(dst, src, tmp)
}
if len(src) >= limit10B {
- return encodeSnappyBlockAsm12B(dst, src)
+ const sz, pool = 16384, 1
+ tmp, ok := encPools[pool].Get().(*[sz]byte)
+ if !ok {
+ tmp = &[sz]byte{}
+ }
+ race.WriteSlice(tmp[:])
+ defer encPools[pool].Put(tmp)
+ return encodeSnappyBlockAsm12B(dst, src, tmp)
}
if len(src) >= limit8B {
- return encodeSnappyBlockAsm10B(dst, src)
+ const sz, pool = 4096, 2
+ tmp, ok := encPools[pool].Get().(*[sz]byte)
+ if !ok {
+ tmp = &[sz]byte{}
+ }
+ race.WriteSlice(tmp[:])
+ defer encPools[pool].Put(tmp)
+ return encodeSnappyBlockAsm10B(dst, src, tmp)
}
if len(src) < minNonLiteralBlockSize {
return 0
}
- return encodeSnappyBlockAsm8B(dst, src)
+ const sz, pool = 1024, 3
+ tmp, ok := encPools[pool].Get().(*[sz]byte)
+ if !ok {
+ tmp = &[sz]byte{}
+ }
+ race.WriteSlice(tmp[:])
+ defer encPools[pool].Put(tmp)
+ return encodeSnappyBlockAsm8B(dst, src, tmp)
}
// encodeBlockSnappy encodes a non-empty src to a guaranteed-large-enough dst. It
@@ -143,20 +259,59 @@ func encodeBlockBetterSnappy(dst, src []byte) (d int) {
// Use 8 bit table when less than...
limit8B = 512
)
- if len(src) >= 64<<10 {
- return encodeSnappyBetterBlockAsm(dst, src)
+ if len(src) > 65536 {
+ const sz, pool = 589824, 0
+ tmp, ok := encBetterPools[pool].Get().(*[sz]byte)
+ if !ok {
+ tmp = &[sz]byte{}
+ }
+ race.WriteSlice(tmp[:])
+ defer encBetterPools[pool].Put(tmp)
+ return encodeSnappyBetterBlockAsm(dst, src, tmp)
}
+
if len(src) >= limit12B {
- return encodeSnappyBetterBlockAsm64K(dst, src)
+ const sz, pool = 294912, 4
+ tmp, ok := encBetterPools[pool].Get().(*[sz]byte)
+ if !ok {
+ tmp = &[sz]byte{}
+ }
+ race.WriteSlice(tmp[:])
+ defer encBetterPools[pool].Put(tmp)
+
+ return encodeSnappyBetterBlockAsm64K(dst, src, tmp)
}
if len(src) >= limit10B {
- return encodeSnappyBetterBlockAsm12B(dst, src)
+ const sz, pool = 81920, 0
+ tmp, ok := encBetterPools[pool].Get().(*[sz]byte)
+ if !ok {
+ tmp = &[sz]byte{}
+ }
+ race.WriteSlice(tmp[:])
+ defer encBetterPools[pool].Put(tmp)
+
+ return encodeSnappyBetterBlockAsm12B(dst, src, tmp)
}
if len(src) >= limit8B {
- return encodeSnappyBetterBlockAsm10B(dst, src)
+ const sz, pool = 20480, 1
+ tmp, ok := encBetterPools[pool].Get().(*[sz]byte)
+ if !ok {
+ tmp = &[sz]byte{}
+ }
+ race.WriteSlice(tmp[:])
+ defer encBetterPools[pool].Put(tmp)
+ return encodeSnappyBetterBlockAsm10B(dst, src, tmp)
}
if len(src) < minNonLiteralBlockSize {
return 0
}
- return encodeSnappyBetterBlockAsm8B(dst, src)
+
+ const sz, pool = 5120, 2
+ tmp, ok := encBetterPools[pool].Get().(*[sz]byte)
+ if !ok {
+ tmp = &[sz]byte{}
+ }
+ race.WriteSlice(tmp[:])
+ defer encBetterPools[pool].Put(tmp)
+ return encodeSnappyBetterBlockAsm8B(dst, src, tmp)
}
diff --git a/vendor/github.com/klauspost/compress/s2/encode_go.go b/vendor/github.com/klauspost/compress/s2/encode_go.go
index 6b393c34d376c..dd1c973ca51bf 100644
--- a/vendor/github.com/klauspost/compress/s2/encode_go.go
+++ b/vendor/github.com/klauspost/compress/s2/encode_go.go
@@ -317,7 +317,7 @@ func matchLen(a []byte, b []byte) int {
}
// input must be > inputMargin
-func calcBlockSize(src []byte) (d int) {
+func calcBlockSize(src []byte, _ *[32768]byte) (d int) {
// Initialize the hash table.
const (
tableBits = 13
@@ -503,7 +503,7 @@ emitRemainder:
}
// length must be > inputMargin.
-func calcBlockSizeSmall(src []byte) (d int) {
+func calcBlockSizeSmall(src []byte, _ *[2048]byte) (d int) {
// Initialize the hash table.
const (
tableBits = 9
diff --git a/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go
index 297e41501ba76..f43aa8154355a 100644
--- a/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go
+++ b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go
@@ -11,154 +11,154 @@ func _dummy_()
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
-func encodeBlockAsm(dst []byte, src []byte) int
+func encodeBlockAsm(dst []byte, src []byte, tmp *[65536]byte) int
// encodeBlockAsm4MB encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 4194304 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
-func encodeBlockAsm4MB(dst []byte, src []byte) int
+func encodeBlockAsm4MB(dst []byte, src []byte, tmp *[65536]byte) int
// encodeBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 16383 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
-func encodeBlockAsm12B(dst []byte, src []byte) int
+func encodeBlockAsm12B(dst []byte, src []byte, tmp *[16384]byte) int
// encodeBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 4095 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
-func encodeBlockAsm10B(dst []byte, src []byte) int
+func encodeBlockAsm10B(dst []byte, src []byte, tmp *[4096]byte) int
// encodeBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 511 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
-func encodeBlockAsm8B(dst []byte, src []byte) int
+func encodeBlockAsm8B(dst []byte, src []byte, tmp *[1024]byte) int
// encodeBetterBlockAsm encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 4294967295 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
-func encodeBetterBlockAsm(dst []byte, src []byte) int
+func encodeBetterBlockAsm(dst []byte, src []byte, tmp *[589824]byte) int
// encodeBetterBlockAsm4MB encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 4194304 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
-func encodeBetterBlockAsm4MB(dst []byte, src []byte) int
+func encodeBetterBlockAsm4MB(dst []byte, src []byte, tmp *[589824]byte) int
// encodeBetterBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 16383 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
-func encodeBetterBlockAsm12B(dst []byte, src []byte) int
+func encodeBetterBlockAsm12B(dst []byte, src []byte, tmp *[81920]byte) int
// encodeBetterBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 4095 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
-func encodeBetterBlockAsm10B(dst []byte, src []byte) int
+func encodeBetterBlockAsm10B(dst []byte, src []byte, tmp *[20480]byte) int
// encodeBetterBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 511 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
-func encodeBetterBlockAsm8B(dst []byte, src []byte) int
+func encodeBetterBlockAsm8B(dst []byte, src []byte, tmp *[5120]byte) int
// encodeSnappyBlockAsm encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 4294967295 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
-func encodeSnappyBlockAsm(dst []byte, src []byte) int
+func encodeSnappyBlockAsm(dst []byte, src []byte, tmp *[65536]byte) int
// encodeSnappyBlockAsm64K encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 65535 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
-func encodeSnappyBlockAsm64K(dst []byte, src []byte) int
+func encodeSnappyBlockAsm64K(dst []byte, src []byte, tmp *[65536]byte) int
// encodeSnappyBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 16383 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
-func encodeSnappyBlockAsm12B(dst []byte, src []byte) int
+func encodeSnappyBlockAsm12B(dst []byte, src []byte, tmp *[16384]byte) int
// encodeSnappyBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 4095 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
-func encodeSnappyBlockAsm10B(dst []byte, src []byte) int
+func encodeSnappyBlockAsm10B(dst []byte, src []byte, tmp *[4096]byte) int
// encodeSnappyBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 511 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
-func encodeSnappyBlockAsm8B(dst []byte, src []byte) int
+func encodeSnappyBlockAsm8B(dst []byte, src []byte, tmp *[1024]byte) int
// encodeSnappyBetterBlockAsm encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 4294967295 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
-func encodeSnappyBetterBlockAsm(dst []byte, src []byte) int
+func encodeSnappyBetterBlockAsm(dst []byte, src []byte, tmp *[589824]byte) int
// encodeSnappyBetterBlockAsm64K encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 65535 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
-func encodeSnappyBetterBlockAsm64K(dst []byte, src []byte) int
+func encodeSnappyBetterBlockAsm64K(dst []byte, src []byte, tmp *[294912]byte) int
// encodeSnappyBetterBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 16383 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
-func encodeSnappyBetterBlockAsm12B(dst []byte, src []byte) int
+func encodeSnappyBetterBlockAsm12B(dst []byte, src []byte, tmp *[81920]byte) int
// encodeSnappyBetterBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 4095 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
-func encodeSnappyBetterBlockAsm10B(dst []byte, src []byte) int
+func encodeSnappyBetterBlockAsm10B(dst []byte, src []byte, tmp *[20480]byte) int
// encodeSnappyBetterBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 511 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
-func encodeSnappyBetterBlockAsm8B(dst []byte, src []byte) int
+func encodeSnappyBetterBlockAsm8B(dst []byte, src []byte, tmp *[5120]byte) int
// calcBlockSize encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 4294967295 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
-func calcBlockSize(src []byte) int
+func calcBlockSize(src []byte, tmp *[32768]byte) int
// calcBlockSizeSmall encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 1024 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
-func calcBlockSizeSmall(src []byte) int
+func calcBlockSizeSmall(src []byte, tmp *[2048]byte) int
// emitLiteral writes a literal chunk and returns the number of bytes written.
//
diff --git a/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s
index 2ff5b334017aa..df9be687be7a8 100644
--- a/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s
+++ b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s
@@ -13,1270 +13,1271 @@ TEXT ·_dummy_(SB), $0
#endif
RET
-// func encodeBlockAsm(dst []byte, src []byte) int
+// func encodeBlockAsm(dst []byte, src []byte, tmp *[65536]byte) int
// Requires: BMI, SSE2
-TEXT ·encodeBlockAsm(SB), $65560-56
- MOVQ dst_base+0(FP), AX
- MOVQ $0x00000200, CX
- LEAQ 24(SP), DX
+TEXT ·encodeBlockAsm(SB), $24-64
+ MOVQ tmp+48(FP), AX
+ MOVQ dst_base+0(FP), CX
+ MOVQ $0x00000200, DX
+ MOVQ AX, BX
PXOR X0, X0
zero_loop_encodeBlockAsm:
- MOVOU X0, (DX)
- MOVOU X0, 16(DX)
- MOVOU X0, 32(DX)
- MOVOU X0, 48(DX)
- MOVOU X0, 64(DX)
- MOVOU X0, 80(DX)
- MOVOU X0, 96(DX)
- MOVOU X0, 112(DX)
- ADDQ $0x80, DX
- DECQ CX
+ MOVOU X0, (BX)
+ MOVOU X0, 16(BX)
+ MOVOU X0, 32(BX)
+ MOVOU X0, 48(BX)
+ MOVOU X0, 64(BX)
+ MOVOU X0, 80(BX)
+ MOVOU X0, 96(BX)
+ MOVOU X0, 112(BX)
+ ADDQ $0x80, BX
+ DECQ DX
JNZ zero_loop_encodeBlockAsm
MOVL $0x00000000, 12(SP)
- MOVQ src_len+32(FP), CX
- LEAQ -9(CX), DX
- LEAQ -8(CX), BX
- MOVL BX, 8(SP)
- SHRQ $0x05, CX
- SUBL CX, DX
- LEAQ (AX)(DX*1), DX
- MOVQ DX, (SP)
- MOVL $0x00000001, CX
- MOVL CX, 16(SP)
- MOVQ src_base+24(FP), DX
+ MOVQ src_len+32(FP), DX
+ LEAQ -9(DX), BX
+ LEAQ -8(DX), SI
+ MOVL SI, 8(SP)
+ SHRQ $0x05, DX
+ SUBL DX, BX
+ LEAQ (CX)(BX*1), BX
+ MOVQ BX, (SP)
+ MOVL $0x00000001, DX
+ MOVL DX, 16(SP)
+ MOVQ src_base+24(FP), BX
search_loop_encodeBlockAsm:
- MOVL CX, BX
- SUBL 12(SP), BX
- SHRL $0x06, BX
- LEAL 4(CX)(BX*1), BX
- CMPL BX, 8(SP)
+ MOVL DX, SI
+ SUBL 12(SP), SI
+ SHRL $0x06, SI
+ LEAL 4(DX)(SI*1), SI
+ CMPL SI, 8(SP)
JAE emit_remainder_encodeBlockAsm
- MOVQ (DX)(CX*1), SI
- MOVL BX, 20(SP)
- MOVQ $0x0000cf1bbcdcbf9b, R8
- MOVQ SI, R9
- MOVQ SI, R10
- SHRQ $0x08, R10
- SHLQ $0x10, R9
- IMULQ R8, R9
- SHRQ $0x32, R9
+ MOVQ (BX)(DX*1), DI
+ MOVL SI, 20(SP)
+ MOVQ $0x0000cf1bbcdcbf9b, R9
+ MOVQ DI, R10
+ MOVQ DI, R11
+ SHRQ $0x08, R11
SHLQ $0x10, R10
- IMULQ R8, R10
+ IMULQ R9, R10
SHRQ $0x32, R10
- MOVL 24(SP)(R9*4), BX
- MOVL 24(SP)(R10*4), DI
- MOVL CX, 24(SP)(R9*4)
- LEAL 1(CX), R9
- MOVL R9, 24(SP)(R10*4)
- MOVQ SI, R9
- SHRQ $0x10, R9
- SHLQ $0x10, R9
- IMULQ R8, R9
- SHRQ $0x32, R9
- MOVL CX, R8
- SUBL 16(SP), R8
- MOVL 1(DX)(R8*1), R10
- MOVQ SI, R8
- SHRQ $0x08, R8
- CMPL R8, R10
+ SHLQ $0x10, R11
+ IMULQ R9, R11
+ SHRQ $0x32, R11
+ MOVL (AX)(R10*4), SI
+ MOVL (AX)(R11*4), R8
+ MOVL DX, (AX)(R10*4)
+ LEAL 1(DX), R10
+ MOVL R10, (AX)(R11*4)
+ MOVQ DI, R10
+ SHRQ $0x10, R10
+ SHLQ $0x10, R10
+ IMULQ R9, R10
+ SHRQ $0x32, R10
+ MOVL DX, R9
+ SUBL 16(SP), R9
+ MOVL 1(BX)(R9*1), R11
+ MOVQ DI, R9
+ SHRQ $0x08, R9
+ CMPL R9, R11
JNE no_repeat_found_encodeBlockAsm
- LEAL 1(CX), SI
- MOVL 12(SP), DI
- MOVL SI, BX
- SUBL 16(SP), BX
+ LEAL 1(DX), DI
+ MOVL 12(SP), R8
+ MOVL DI, SI
+ SUBL 16(SP), SI
JZ repeat_extend_back_end_encodeBlockAsm
repeat_extend_back_loop_encodeBlockAsm:
- CMPL SI, DI
+ CMPL DI, R8
JBE repeat_extend_back_end_encodeBlockAsm
- MOVB -1(DX)(BX*1), R8
- MOVB -1(DX)(SI*1), R9
- CMPB R8, R9
+ MOVB -1(BX)(SI*1), R9
+ MOVB -1(BX)(DI*1), R10
+ CMPB R9, R10
JNE repeat_extend_back_end_encodeBlockAsm
- LEAL -1(SI), SI
- DECL BX
+ LEAL -1(DI), DI
+ DECL SI
JNZ repeat_extend_back_loop_encodeBlockAsm
repeat_extend_back_end_encodeBlockAsm:
- MOVL SI, BX
- SUBL 12(SP), BX
- LEAQ 5(AX)(BX*1), BX
- CMPQ BX, (SP)
+ MOVL DI, SI
+ SUBL 12(SP), SI
+ LEAQ 5(CX)(SI*1), SI
+ CMPQ SI, (SP)
JB repeat_dst_size_check_encodeBlockAsm
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
repeat_dst_size_check_encodeBlockAsm:
- MOVL 12(SP), BX
- CMPL BX, SI
+ MOVL 12(SP), SI
+ CMPL SI, DI
JEQ emit_literal_done_repeat_emit_encodeBlockAsm
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(BX*1), R9
- SUBL BX, R8
- LEAL -1(R8), BX
- CMPL BX, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(SI*1), R10
+ SUBL SI, R9
+ LEAL -1(R9), SI
+ CMPL SI, $0x3c
JB one_byte_repeat_emit_encodeBlockAsm
- CMPL BX, $0x00000100
+ CMPL SI, $0x00000100
JB two_bytes_repeat_emit_encodeBlockAsm
- CMPL BX, $0x00010000
+ CMPL SI, $0x00010000
JB three_bytes_repeat_emit_encodeBlockAsm
- CMPL BX, $0x01000000
+ CMPL SI, $0x01000000
JB four_bytes_repeat_emit_encodeBlockAsm
- MOVB $0xfc, (AX)
- MOVL BX, 1(AX)
- ADDQ $0x05, AX
+ MOVB $0xfc, (CX)
+ MOVL SI, 1(CX)
+ ADDQ $0x05, CX
JMP memmove_long_repeat_emit_encodeBlockAsm
four_bytes_repeat_emit_encodeBlockAsm:
- MOVL BX, R10
- SHRL $0x10, R10
- MOVB $0xf8, (AX)
- MOVW BX, 1(AX)
- MOVB R10, 3(AX)
- ADDQ $0x04, AX
+ MOVL SI, R11
+ SHRL $0x10, R11
+ MOVB $0xf8, (CX)
+ MOVW SI, 1(CX)
+ MOVB R11, 3(CX)
+ ADDQ $0x04, CX
JMP memmove_long_repeat_emit_encodeBlockAsm
three_bytes_repeat_emit_encodeBlockAsm:
- MOVB $0xf4, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_repeat_emit_encodeBlockAsm
two_bytes_repeat_emit_encodeBlockAsm:
- MOVB $0xf0, (AX)
- MOVB BL, 1(AX)
- ADDQ $0x02, AX
- CMPL BX, $0x40
+ MOVB $0xf0, (CX)
+ MOVB SI, 1(CX)
+ ADDQ $0x02, CX
+ CMPL SI, $0x40
JB memmove_repeat_emit_encodeBlockAsm
JMP memmove_long_repeat_emit_encodeBlockAsm
one_byte_repeat_emit_encodeBlockAsm:
- SHLB $0x02, BL
- MOVB BL, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, SI
+ MOVB SI, (CX)
+ ADDQ $0x01, CX
memmove_repeat_emit_encodeBlockAsm:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveShort
- CMPQ R8, $0x08
+ CMPQ R9, $0x08
JBE emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_8
- CMPQ R8, $0x10
+ CMPQ R9, $0x10
JBE emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ R9, $0x20
JBE emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_17through32
JMP emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_33through64
emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_8:
- MOVQ (R9), R10
- MOVQ R10, (AX)
+ MOVQ (R10), R11
+ MOVQ R11, (CX)
JMP memmove_end_copy_repeat_emit_encodeBlockAsm
emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_8through16:
- MOVQ (R9), R10
- MOVQ -8(R9)(R8*1), R9
- MOVQ R10, (AX)
- MOVQ R9, -8(AX)(R8*1)
+ MOVQ (R10), R11
+ MOVQ -8(R10)(R9*1), R10
+ MOVQ R11, (CX)
+ MOVQ R10, -8(CX)(R9*1)
JMP memmove_end_copy_repeat_emit_encodeBlockAsm
emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_17through32:
- MOVOU (R9), X0
- MOVOU -16(R9)(R8*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU -16(R10)(R9*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R9*1)
JMP memmove_end_copy_repeat_emit_encodeBlockAsm
emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_33through64:
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
memmove_end_copy_repeat_emit_encodeBlockAsm:
- MOVQ BX, AX
+ MOVQ SI, CX
JMP emit_literal_done_repeat_emit_encodeBlockAsm
memmove_long_repeat_emit_encodeBlockAsm:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveLong
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVQ R8, R11
- SHRQ $0x05, R11
- MOVQ AX, R10
- ANDL $0x0000001f, R10
- MOVQ $0x00000040, R12
- SUBQ R10, R12
- DECQ R11
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVQ R9, R12
+ SHRQ $0x05, R12
+ MOVQ CX, R11
+ ANDL $0x0000001f, R11
+ MOVQ $0x00000040, R13
+ SUBQ R11, R13
+ DECQ R12
JA emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_forward_sse_loop_32
- LEAQ -32(R9)(R12*1), R10
- LEAQ -32(AX)(R12*1), R13
+ LEAQ -32(R10)(R13*1), R11
+ LEAQ -32(CX)(R13*1), R14
emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_big_loop_back:
- MOVOU (R10), X4
- MOVOU 16(R10), X5
- MOVOA X4, (R13)
- MOVOA X5, 16(R13)
+ MOVOU (R11), X4
+ MOVOU 16(R11), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
+ ADDQ $0x20, R14
+ ADDQ $0x20, R11
ADDQ $0x20, R13
- ADDQ $0x20, R10
- ADDQ $0x20, R12
- DECQ R11
+ DECQ R12
JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_big_loop_back
emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_forward_sse_loop_32:
- MOVOU -32(R9)(R12*1), X4
- MOVOU -16(R9)(R12*1), X5
- MOVOA X4, -32(AX)(R12*1)
- MOVOA X5, -16(AX)(R12*1)
- ADDQ $0x20, R12
- CMPQ R8, R12
+ MOVOU -32(R10)(R13*1), X4
+ MOVOU -16(R10)(R13*1), X5
+ MOVOA X4, -32(CX)(R13*1)
+ MOVOA X5, -16(CX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R9, R13
JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ BX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
+ MOVQ SI, CX
emit_literal_done_repeat_emit_encodeBlockAsm:
- ADDL $0x05, CX
- MOVL CX, BX
- SUBL 16(SP), BX
- MOVQ src_len+32(FP), R8
- SUBL CX, R8
- LEAQ (DX)(CX*1), R9
- LEAQ (DX)(BX*1), BX
+ ADDL $0x05, DX
+ MOVL DX, SI
+ SUBL 16(SP), SI
+ MOVQ src_len+32(FP), R9
+ SUBL DX, R9
+ LEAQ (BX)(DX*1), R10
+ LEAQ (BX)(SI*1), SI
// matchLen
- XORL R11, R11
+ XORL R12, R12
matchlen_loopback_16_repeat_extend_encodeBlockAsm:
- CMPL R8, $0x10
+ CMPL R9, $0x10
JB matchlen_match8_repeat_extend_encodeBlockAsm
- MOVQ (R9)(R11*1), R10
- MOVQ 8(R9)(R11*1), R12
- XORQ (BX)(R11*1), R10
+ MOVQ (R10)(R12*1), R11
+ MOVQ 8(R10)(R12*1), R13
+ XORQ (SI)(R12*1), R11
JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm
- XORQ 8(BX)(R11*1), R12
+ XORQ 8(SI)(R12*1), R13
JNZ matchlen_bsf_16repeat_extend_encodeBlockAsm
- LEAL -16(R8), R8
- LEAL 16(R11), R11
+ LEAL -16(R9), R9
+ LEAL 16(R12), R12
JMP matchlen_loopback_16_repeat_extend_encodeBlockAsm
matchlen_bsf_16repeat_extend_encodeBlockAsm:
#ifdef GOAMD64_v3
- TZCNTQ R12, R12
+ TZCNTQ R13, R13
#else
- BSFQ R12, R12
+ BSFQ R13, R13
#endif
- SARQ $0x03, R12
- LEAL 8(R11)(R12*1), R11
+ SARQ $0x03, R13
+ LEAL 8(R12)(R13*1), R12
JMP repeat_extend_forward_end_encodeBlockAsm
matchlen_match8_repeat_extend_encodeBlockAsm:
- CMPL R8, $0x08
+ CMPL R9, $0x08
JB matchlen_match4_repeat_extend_encodeBlockAsm
- MOVQ (R9)(R11*1), R10
- XORQ (BX)(R11*1), R10
+ MOVQ (R10)(R12*1), R11
+ XORQ (SI)(R12*1), R11
JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm
- LEAL -8(R8), R8
- LEAL 8(R11), R11
+ LEAL -8(R9), R9
+ LEAL 8(R12), R12
JMP matchlen_match4_repeat_extend_encodeBlockAsm
matchlen_bsf_8_repeat_extend_encodeBlockAsm:
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R11, R11
#else
- BSFQ R10, R10
+ BSFQ R11, R11
#endif
- SARQ $0x03, R10
- LEAL (R11)(R10*1), R11
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
JMP repeat_extend_forward_end_encodeBlockAsm
matchlen_match4_repeat_extend_encodeBlockAsm:
- CMPL R8, $0x04
+ CMPL R9, $0x04
JB matchlen_match2_repeat_extend_encodeBlockAsm
- MOVL (R9)(R11*1), R10
- CMPL (BX)(R11*1), R10
+ MOVL (R10)(R12*1), R11
+ CMPL (SI)(R12*1), R11
JNE matchlen_match2_repeat_extend_encodeBlockAsm
- LEAL -4(R8), R8
- LEAL 4(R11), R11
+ LEAL -4(R9), R9
+ LEAL 4(R12), R12
matchlen_match2_repeat_extend_encodeBlockAsm:
- CMPL R8, $0x01
+ CMPL R9, $0x01
JE matchlen_match1_repeat_extend_encodeBlockAsm
JB repeat_extend_forward_end_encodeBlockAsm
- MOVW (R9)(R11*1), R10
- CMPW (BX)(R11*1), R10
+ MOVW (R10)(R12*1), R11
+ CMPW (SI)(R12*1), R11
JNE matchlen_match1_repeat_extend_encodeBlockAsm
- LEAL 2(R11), R11
- SUBL $0x02, R8
+ LEAL 2(R12), R12
+ SUBL $0x02, R9
JZ repeat_extend_forward_end_encodeBlockAsm
matchlen_match1_repeat_extend_encodeBlockAsm:
- MOVB (R9)(R11*1), R10
- CMPB (BX)(R11*1), R10
+ MOVB (R10)(R12*1), R11
+ CMPB (SI)(R12*1), R11
JNE repeat_extend_forward_end_encodeBlockAsm
- LEAL 1(R11), R11
+ LEAL 1(R12), R12
repeat_extend_forward_end_encodeBlockAsm:
- ADDL R11, CX
- MOVL CX, BX
- SUBL SI, BX
- MOVL 16(SP), SI
- TESTL DI, DI
+ ADDL R12, DX
+ MOVL DX, SI
+ SUBL DI, SI
+ MOVL 16(SP), DI
+ TESTL R8, R8
JZ repeat_as_copy_encodeBlockAsm
// emitRepeat
emit_repeat_again_match_repeat_encodeBlockAsm:
- MOVL BX, DI
- LEAL -4(BX), BX
- CMPL DI, $0x08
+ MOVL SI, R8
+ LEAL -4(SI), SI
+ CMPL R8, $0x08
JBE repeat_two_match_repeat_encodeBlockAsm
- CMPL DI, $0x0c
+ CMPL R8, $0x0c
JAE cant_repeat_two_offset_match_repeat_encodeBlockAsm
- CMPL SI, $0x00000800
+ CMPL DI, $0x00000800
JB repeat_two_offset_match_repeat_encodeBlockAsm
cant_repeat_two_offset_match_repeat_encodeBlockAsm:
- CMPL BX, $0x00000104
+ CMPL SI, $0x00000104
JB repeat_three_match_repeat_encodeBlockAsm
- CMPL BX, $0x00010100
+ CMPL SI, $0x00010100
JB repeat_four_match_repeat_encodeBlockAsm
- CMPL BX, $0x0100ffff
+ CMPL SI, $0x0100ffff
JB repeat_five_match_repeat_encodeBlockAsm
- LEAL -16842747(BX), BX
- MOVL $0xfffb001d, (AX)
- MOVB $0xff, 4(AX)
- ADDQ $0x05, AX
+ LEAL -16842747(SI), SI
+ MOVL $0xfffb001d, (CX)
+ MOVB $0xff, 4(CX)
+ ADDQ $0x05, CX
JMP emit_repeat_again_match_repeat_encodeBlockAsm
repeat_five_match_repeat_encodeBlockAsm:
- LEAL -65536(BX), BX
- MOVL BX, SI
- MOVW $0x001d, (AX)
- MOVW BX, 2(AX)
- SARL $0x10, SI
- MOVB SI, 4(AX)
- ADDQ $0x05, AX
+ LEAL -65536(SI), SI
+ MOVL SI, DI
+ MOVW $0x001d, (CX)
+ MOVW SI, 2(CX)
+ SARL $0x10, DI
+ MOVB DI, 4(CX)
+ ADDQ $0x05, CX
JMP repeat_end_emit_encodeBlockAsm
repeat_four_match_repeat_encodeBlockAsm:
- LEAL -256(BX), BX
- MOVW $0x0019, (AX)
- MOVW BX, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(SI), SI
+ MOVW $0x0019, (CX)
+ MOVW SI, 2(CX)
+ ADDQ $0x04, CX
JMP repeat_end_emit_encodeBlockAsm
repeat_three_match_repeat_encodeBlockAsm:
- LEAL -4(BX), BX
- MOVW $0x0015, (AX)
- MOVB BL, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(SI), SI
+ MOVW $0x0015, (CX)
+ MOVB SI, 2(CX)
+ ADDQ $0x03, CX
JMP repeat_end_emit_encodeBlockAsm
repeat_two_match_repeat_encodeBlockAsm:
- SHLL $0x02, BX
- ORL $0x01, BX
- MOVW BX, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, SI
+ ORL $0x01, SI
+ MOVW SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm
repeat_two_offset_match_repeat_encodeBlockAsm:
- XORQ DI, DI
- LEAL 1(DI)(BX*4), BX
- MOVB SI, 1(AX)
- SARL $0x08, SI
- SHLL $0x05, SI
- ORL SI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
+ XORQ R8, R8
+ LEAL 1(R8)(SI*4), SI
+ MOVB DI, 1(CX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm
repeat_as_copy_encodeBlockAsm:
// emitCopy
- CMPL SI, $0x00010000
+ CMPL DI, $0x00010000
JB two_byte_offset_repeat_as_copy_encodeBlockAsm
- CMPL BX, $0x40
+ CMPL SI, $0x40
JBE four_bytes_remain_repeat_as_copy_encodeBlockAsm
- MOVB $0xff, (AX)
- MOVL SI, 1(AX)
- LEAL -64(BX), BX
- ADDQ $0x05, AX
- CMPL BX, $0x04
+ MOVB $0xff, (CX)
+ MOVL DI, 1(CX)
+ LEAL -64(SI), SI
+ ADDQ $0x05, CX
+ CMPL SI, $0x04
JB four_bytes_remain_repeat_as_copy_encodeBlockAsm
// emitRepeat
emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy:
- MOVL BX, DI
- LEAL -4(BX), BX
- CMPL DI, $0x08
+ MOVL SI, R8
+ LEAL -4(SI), SI
+ CMPL R8, $0x08
JBE repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy
- CMPL DI, $0x0c
+ CMPL R8, $0x0c
JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy
- CMPL SI, $0x00000800
+ CMPL DI, $0x00000800
JB repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy
cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy:
- CMPL BX, $0x00000104
+ CMPL SI, $0x00000104
JB repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy
- CMPL BX, $0x00010100
+ CMPL SI, $0x00010100
JB repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy
- CMPL BX, $0x0100ffff
+ CMPL SI, $0x0100ffff
JB repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy
- LEAL -16842747(BX), BX
- MOVL $0xfffb001d, (AX)
- MOVB $0xff, 4(AX)
- ADDQ $0x05, AX
+ LEAL -16842747(SI), SI
+ MOVL $0xfffb001d, (CX)
+ MOVB $0xff, 4(CX)
+ ADDQ $0x05, CX
JMP emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy
repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy:
- LEAL -65536(BX), BX
- MOVL BX, SI
- MOVW $0x001d, (AX)
- MOVW BX, 2(AX)
- SARL $0x10, SI
- MOVB SI, 4(AX)
- ADDQ $0x05, AX
+ LEAL -65536(SI), SI
+ MOVL SI, DI
+ MOVW $0x001d, (CX)
+ MOVW SI, 2(CX)
+ SARL $0x10, DI
+ MOVB DI, 4(CX)
+ ADDQ $0x05, CX
JMP repeat_end_emit_encodeBlockAsm
repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy:
- LEAL -256(BX), BX
- MOVW $0x0019, (AX)
- MOVW BX, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(SI), SI
+ MOVW $0x0019, (CX)
+ MOVW SI, 2(CX)
+ ADDQ $0x04, CX
JMP repeat_end_emit_encodeBlockAsm
repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy:
- LEAL -4(BX), BX
- MOVW $0x0015, (AX)
- MOVB BL, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(SI), SI
+ MOVW $0x0015, (CX)
+ MOVB SI, 2(CX)
+ ADDQ $0x03, CX
JMP repeat_end_emit_encodeBlockAsm
repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy:
- SHLL $0x02, BX
- ORL $0x01, BX
- MOVW BX, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, SI
+ ORL $0x01, SI
+ MOVW SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm
repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy:
- XORQ DI, DI
- LEAL 1(DI)(BX*4), BX
- MOVB SI, 1(AX)
- SARL $0x08, SI
- SHLL $0x05, SI
- ORL SI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
+ XORQ R8, R8
+ LEAL 1(R8)(SI*4), SI
+ MOVB DI, 1(CX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm
four_bytes_remain_repeat_as_copy_encodeBlockAsm:
- TESTL BX, BX
+ TESTL SI, SI
JZ repeat_end_emit_encodeBlockAsm
- XORL DI, DI
- LEAL -1(DI)(BX*4), BX
- MOVB BL, (AX)
- MOVL SI, 1(AX)
- ADDQ $0x05, AX
+ XORL R8, R8
+ LEAL -1(R8)(SI*4), SI
+ MOVB SI, (CX)
+ MOVL DI, 1(CX)
+ ADDQ $0x05, CX
JMP repeat_end_emit_encodeBlockAsm
two_byte_offset_repeat_as_copy_encodeBlockAsm:
- CMPL BX, $0x40
+ CMPL SI, $0x40
JBE two_byte_offset_short_repeat_as_copy_encodeBlockAsm
- CMPL SI, $0x00000800
+ CMPL DI, $0x00000800
JAE long_offset_short_repeat_as_copy_encodeBlockAsm
- MOVL $0x00000001, DI
- LEAL 16(DI), DI
- MOVB SI, 1(AX)
- MOVL SI, R8
- SHRL $0x08, R8
- SHLL $0x05, R8
- ORL R8, DI
- MOVB DI, (AX)
- ADDQ $0x02, AX
- SUBL $0x08, BX
+ MOVL $0x00000001, R8
+ LEAL 16(R8), R8
+ MOVB DI, 1(CX)
+ MOVL DI, R9
+ SHRL $0x08, R9
+ SHLL $0x05, R9
+ ORL R9, R8
+ MOVB R8, (CX)
+ ADDQ $0x02, CX
+ SUBL $0x08, SI
// emitRepeat
- LEAL -4(BX), BX
+ LEAL -4(SI), SI
JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b
emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b:
- MOVL BX, DI
- LEAL -4(BX), BX
- CMPL DI, $0x08
+ MOVL SI, R8
+ LEAL -4(SI), SI
+ CMPL R8, $0x08
JBE repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b
- CMPL DI, $0x0c
+ CMPL R8, $0x0c
JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b
- CMPL SI, $0x00000800
+ CMPL DI, $0x00000800
JB repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b
cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b:
- CMPL BX, $0x00000104
+ CMPL SI, $0x00000104
JB repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b
- CMPL BX, $0x00010100
+ CMPL SI, $0x00010100
JB repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b
- CMPL BX, $0x0100ffff
+ CMPL SI, $0x0100ffff
JB repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b
- LEAL -16842747(BX), BX
- MOVL $0xfffb001d, (AX)
- MOVB $0xff, 4(AX)
- ADDQ $0x05, AX
+ LEAL -16842747(SI), SI
+ MOVL $0xfffb001d, (CX)
+ MOVB $0xff, 4(CX)
+ ADDQ $0x05, CX
JMP emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b
repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b:
- LEAL -65536(BX), BX
- MOVL BX, SI
- MOVW $0x001d, (AX)
- MOVW BX, 2(AX)
- SARL $0x10, SI
- MOVB SI, 4(AX)
- ADDQ $0x05, AX
+ LEAL -65536(SI), SI
+ MOVL SI, DI
+ MOVW $0x001d, (CX)
+ MOVW SI, 2(CX)
+ SARL $0x10, DI
+ MOVB DI, 4(CX)
+ ADDQ $0x05, CX
JMP repeat_end_emit_encodeBlockAsm
repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b:
- LEAL -256(BX), BX
- MOVW $0x0019, (AX)
- MOVW BX, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(SI), SI
+ MOVW $0x0019, (CX)
+ MOVW SI, 2(CX)
+ ADDQ $0x04, CX
JMP repeat_end_emit_encodeBlockAsm
repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b:
- LEAL -4(BX), BX
- MOVW $0x0015, (AX)
- MOVB BL, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(SI), SI
+ MOVW $0x0015, (CX)
+ MOVB SI, 2(CX)
+ ADDQ $0x03, CX
JMP repeat_end_emit_encodeBlockAsm
repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b:
- SHLL $0x02, BX
- ORL $0x01, BX
- MOVW BX, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, SI
+ ORL $0x01, SI
+ MOVW SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm
repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b:
- XORQ DI, DI
- LEAL 1(DI)(BX*4), BX
- MOVB SI, 1(AX)
- SARL $0x08, SI
- SHLL $0x05, SI
- ORL SI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
+ XORQ R8, R8
+ LEAL 1(R8)(SI*4), SI
+ MOVB DI, 1(CX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm
long_offset_short_repeat_as_copy_encodeBlockAsm:
- MOVB $0xee, (AX)
- MOVW SI, 1(AX)
- LEAL -60(BX), BX
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW DI, 1(CX)
+ LEAL -60(SI), SI
+ ADDQ $0x03, CX
// emitRepeat
emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy_short:
- MOVL BX, DI
- LEAL -4(BX), BX
- CMPL DI, $0x08
+ MOVL SI, R8
+ LEAL -4(SI), SI
+ CMPL R8, $0x08
JBE repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy_short
- CMPL DI, $0x0c
+ CMPL R8, $0x0c
JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short
- CMPL SI, $0x00000800
+ CMPL DI, $0x00000800
JB repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short
cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short:
- CMPL BX, $0x00000104
+ CMPL SI, $0x00000104
JB repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy_short
- CMPL BX, $0x00010100
+ CMPL SI, $0x00010100
JB repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy_short
- CMPL BX, $0x0100ffff
+ CMPL SI, $0x0100ffff
JB repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy_short
- LEAL -16842747(BX), BX
- MOVL $0xfffb001d, (AX)
- MOVB $0xff, 4(AX)
- ADDQ $0x05, AX
+ LEAL -16842747(SI), SI
+ MOVL $0xfffb001d, (CX)
+ MOVB $0xff, 4(CX)
+ ADDQ $0x05, CX
JMP emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy_short
repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy_short:
- LEAL -65536(BX), BX
- MOVL BX, SI
- MOVW $0x001d, (AX)
- MOVW BX, 2(AX)
- SARL $0x10, SI
- MOVB SI, 4(AX)
- ADDQ $0x05, AX
+ LEAL -65536(SI), SI
+ MOVL SI, DI
+ MOVW $0x001d, (CX)
+ MOVW SI, 2(CX)
+ SARL $0x10, DI
+ MOVB DI, 4(CX)
+ ADDQ $0x05, CX
JMP repeat_end_emit_encodeBlockAsm
repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy_short:
- LEAL -256(BX), BX
- MOVW $0x0019, (AX)
- MOVW BX, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(SI), SI
+ MOVW $0x0019, (CX)
+ MOVW SI, 2(CX)
+ ADDQ $0x04, CX
JMP repeat_end_emit_encodeBlockAsm
repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy_short:
- LEAL -4(BX), BX
- MOVW $0x0015, (AX)
- MOVB BL, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(SI), SI
+ MOVW $0x0015, (CX)
+ MOVB SI, 2(CX)
+ ADDQ $0x03, CX
JMP repeat_end_emit_encodeBlockAsm
repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy_short:
- SHLL $0x02, BX
- ORL $0x01, BX
- MOVW BX, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, SI
+ ORL $0x01, SI
+ MOVW SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm
repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short:
- XORQ DI, DI
- LEAL 1(DI)(BX*4), BX
- MOVB SI, 1(AX)
- SARL $0x08, SI
- SHLL $0x05, SI
- ORL SI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
+ XORQ R8, R8
+ LEAL 1(R8)(SI*4), SI
+ MOVB DI, 1(CX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm
two_byte_offset_short_repeat_as_copy_encodeBlockAsm:
- MOVL BX, DI
- SHLL $0x02, DI
- CMPL BX, $0x0c
+ MOVL SI, R8
+ SHLL $0x02, R8
+ CMPL SI, $0x0c
JAE emit_copy_three_repeat_as_copy_encodeBlockAsm
- CMPL SI, $0x00000800
+ CMPL DI, $0x00000800
JAE emit_copy_three_repeat_as_copy_encodeBlockAsm
- LEAL -15(DI), DI
- MOVB SI, 1(AX)
- SHRL $0x08, SI
- SHLL $0x05, SI
- ORL SI, DI
- MOVB DI, (AX)
- ADDQ $0x02, AX
+ LEAL -15(R8), R8
+ MOVB DI, 1(CX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R8
+ MOVB R8, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm
emit_copy_three_repeat_as_copy_encodeBlockAsm:
- LEAL -2(DI), DI
- MOVB DI, (AX)
- MOVW SI, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(R8), R8
+ MOVB R8, (CX)
+ MOVW DI, 1(CX)
+ ADDQ $0x03, CX
repeat_end_emit_encodeBlockAsm:
- MOVL CX, 12(SP)
+ MOVL DX, 12(SP)
JMP search_loop_encodeBlockAsm
no_repeat_found_encodeBlockAsm:
- CMPL (DX)(BX*1), SI
+ CMPL (BX)(SI*1), DI
JEQ candidate_match_encodeBlockAsm
- SHRQ $0x08, SI
- MOVL 24(SP)(R9*4), BX
- LEAL 2(CX), R8
- CMPL (DX)(DI*1), SI
+ SHRQ $0x08, DI
+ MOVL (AX)(R10*4), SI
+ LEAL 2(DX), R9
+ CMPL (BX)(R8*1), DI
JEQ candidate2_match_encodeBlockAsm
- MOVL R8, 24(SP)(R9*4)
- SHRQ $0x08, SI
- CMPL (DX)(BX*1), SI
+ MOVL R9, (AX)(R10*4)
+ SHRQ $0x08, DI
+ CMPL (BX)(SI*1), DI
JEQ candidate3_match_encodeBlockAsm
- MOVL 20(SP), CX
+ MOVL 20(SP), DX
JMP search_loop_encodeBlockAsm
candidate3_match_encodeBlockAsm:
- ADDL $0x02, CX
+ ADDL $0x02, DX
JMP candidate_match_encodeBlockAsm
candidate2_match_encodeBlockAsm:
- MOVL R8, 24(SP)(R9*4)
- INCL CX
- MOVL DI, BX
+ MOVL R9, (AX)(R10*4)
+ INCL DX
+ MOVL R8, SI
candidate_match_encodeBlockAsm:
- MOVL 12(SP), SI
- TESTL BX, BX
+ MOVL 12(SP), DI
+ TESTL SI, SI
JZ match_extend_back_end_encodeBlockAsm
match_extend_back_loop_encodeBlockAsm:
- CMPL CX, SI
+ CMPL DX, DI
JBE match_extend_back_end_encodeBlockAsm
- MOVB -1(DX)(BX*1), DI
- MOVB -1(DX)(CX*1), R8
- CMPB DI, R8
+ MOVB -1(BX)(SI*1), R8
+ MOVB -1(BX)(DX*1), R9
+ CMPB R8, R9
JNE match_extend_back_end_encodeBlockAsm
- LEAL -1(CX), CX
- DECL BX
+ LEAL -1(DX), DX
+ DECL SI
JZ match_extend_back_end_encodeBlockAsm
JMP match_extend_back_loop_encodeBlockAsm
match_extend_back_end_encodeBlockAsm:
- MOVL CX, SI
- SUBL 12(SP), SI
- LEAQ 5(AX)(SI*1), SI
- CMPQ SI, (SP)
+ MOVL DX, DI
+ SUBL 12(SP), DI
+ LEAQ 5(CX)(DI*1), DI
+ CMPQ DI, (SP)
JB match_dst_size_check_encodeBlockAsm
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_dst_size_check_encodeBlockAsm:
- MOVL CX, SI
- MOVL 12(SP), DI
- CMPL DI, SI
+ MOVL DX, DI
+ MOVL 12(SP), R8
+ CMPL R8, DI
JEQ emit_literal_done_match_emit_encodeBlockAsm
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(DI*1), SI
- SUBL DI, R8
- LEAL -1(R8), DI
- CMPL DI, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(R8*1), DI
+ SUBL R8, R9
+ LEAL -1(R9), R8
+ CMPL R8, $0x3c
JB one_byte_match_emit_encodeBlockAsm
- CMPL DI, $0x00000100
+ CMPL R8, $0x00000100
JB two_bytes_match_emit_encodeBlockAsm
- CMPL DI, $0x00010000
+ CMPL R8, $0x00010000
JB three_bytes_match_emit_encodeBlockAsm
- CMPL DI, $0x01000000
+ CMPL R8, $0x01000000
JB four_bytes_match_emit_encodeBlockAsm
- MOVB $0xfc, (AX)
- MOVL DI, 1(AX)
- ADDQ $0x05, AX
+ MOVB $0xfc, (CX)
+ MOVL R8, 1(CX)
+ ADDQ $0x05, CX
JMP memmove_long_match_emit_encodeBlockAsm
four_bytes_match_emit_encodeBlockAsm:
- MOVL DI, R9
- SHRL $0x10, R9
- MOVB $0xf8, (AX)
- MOVW DI, 1(AX)
- MOVB R9, 3(AX)
- ADDQ $0x04, AX
+ MOVL R8, R10
+ SHRL $0x10, R10
+ MOVB $0xf8, (CX)
+ MOVW R8, 1(CX)
+ MOVB R10, 3(CX)
+ ADDQ $0x04, CX
JMP memmove_long_match_emit_encodeBlockAsm
three_bytes_match_emit_encodeBlockAsm:
- MOVB $0xf4, (AX)
- MOVW DI, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW R8, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_match_emit_encodeBlockAsm
two_bytes_match_emit_encodeBlockAsm:
- MOVB $0xf0, (AX)
- MOVB DI, 1(AX)
- ADDQ $0x02, AX
- CMPL DI, $0x40
+ MOVB $0xf0, (CX)
+ MOVB R8, 1(CX)
+ ADDQ $0x02, CX
+ CMPL R8, $0x40
JB memmove_match_emit_encodeBlockAsm
JMP memmove_long_match_emit_encodeBlockAsm
one_byte_match_emit_encodeBlockAsm:
- SHLB $0x02, DI
- MOVB DI, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, R8
+ MOVB R8, (CX)
+ ADDQ $0x01, CX
memmove_match_emit_encodeBlockAsm:
- LEAQ (AX)(R8*1), DI
+ LEAQ (CX)(R9*1), R8
// genMemMoveShort
- CMPQ R8, $0x08
+ CMPQ R9, $0x08
JBE emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_8
- CMPQ R8, $0x10
+ CMPQ R9, $0x10
JBE emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ R9, $0x20
JBE emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_33through64
emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_8:
- MOVQ (SI), R9
- MOVQ R9, (AX)
+ MOVQ (DI), R10
+ MOVQ R10, (CX)
JMP memmove_end_copy_match_emit_encodeBlockAsm
emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_8through16:
- MOVQ (SI), R9
- MOVQ -8(SI)(R8*1), SI
- MOVQ R9, (AX)
- MOVQ SI, -8(AX)(R8*1)
+ MOVQ (DI), R10
+ MOVQ -8(DI)(R9*1), DI
+ MOVQ R10, (CX)
+ MOVQ DI, -8(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeBlockAsm
emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_17through32:
- MOVOU (SI), X0
- MOVOU -16(SI)(R8*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU (DI), X0
+ MOVOU -16(DI)(R9*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeBlockAsm
emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_33through64:
- MOVOU (SI), X0
- MOVOU 16(SI), X1
- MOVOU -32(SI)(R8*1), X2
- MOVOU -16(SI)(R8*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU (DI), X0
+ MOVOU 16(DI), X1
+ MOVOU -32(DI)(R9*1), X2
+ MOVOU -16(DI)(R9*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
memmove_end_copy_match_emit_encodeBlockAsm:
- MOVQ DI, AX
+ MOVQ R8, CX
JMP emit_literal_done_match_emit_encodeBlockAsm
memmove_long_match_emit_encodeBlockAsm:
- LEAQ (AX)(R8*1), DI
+ LEAQ (CX)(R9*1), R8
// genMemMoveLong
- MOVOU (SI), X0
- MOVOU 16(SI), X1
- MOVOU -32(SI)(R8*1), X2
- MOVOU -16(SI)(R8*1), X3
- MOVQ R8, R10
- SHRQ $0x05, R10
- MOVQ AX, R9
- ANDL $0x0000001f, R9
- MOVQ $0x00000040, R11
- SUBQ R9, R11
- DECQ R10
+ MOVOU (DI), X0
+ MOVOU 16(DI), X1
+ MOVOU -32(DI)(R9*1), X2
+ MOVOU -16(DI)(R9*1), X3
+ MOVQ R9, R11
+ SHRQ $0x05, R11
+ MOVQ CX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R12
+ SUBQ R10, R12
+ DECQ R11
JA emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_forward_sse_loop_32
- LEAQ -32(SI)(R11*1), R9
- LEAQ -32(AX)(R11*1), R12
+ LEAQ -32(DI)(R12*1), R10
+ LEAQ -32(CX)(R12*1), R13
emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_big_loop_back:
- MOVOU (R9), X4
- MOVOU 16(R9), X5
- MOVOA X4, (R12)
- MOVOA X5, 16(R12)
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R13)
+ MOVOA X5, 16(R13)
+ ADDQ $0x20, R13
+ ADDQ $0x20, R10
ADDQ $0x20, R12
- ADDQ $0x20, R9
- ADDQ $0x20, R11
- DECQ R10
+ DECQ R11
JNA emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_big_loop_back
emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_forward_sse_loop_32:
- MOVOU -32(SI)(R11*1), X4
- MOVOU -16(SI)(R11*1), X5
- MOVOA X4, -32(AX)(R11*1)
- MOVOA X5, -16(AX)(R11*1)
- ADDQ $0x20, R11
- CMPQ R8, R11
+ MOVOU -32(DI)(R12*1), X4
+ MOVOU -16(DI)(R12*1), X5
+ MOVOA X4, -32(CX)(R12*1)
+ MOVOA X5, -16(CX)(R12*1)
+ ADDQ $0x20, R12
+ CMPQ R9, R12
JAE emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ DI, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
+ MOVQ R8, CX
emit_literal_done_match_emit_encodeBlockAsm:
match_nolit_loop_encodeBlockAsm:
- MOVL CX, SI
- SUBL BX, SI
- MOVL SI, 16(SP)
- ADDL $0x04, CX
- ADDL $0x04, BX
- MOVQ src_len+32(FP), SI
- SUBL CX, SI
- LEAQ (DX)(CX*1), DI
- LEAQ (DX)(BX*1), BX
+ MOVL DX, DI
+ SUBL SI, DI
+ MOVL DI, 16(SP)
+ ADDL $0x04, DX
+ ADDL $0x04, SI
+ MOVQ src_len+32(FP), DI
+ SUBL DX, DI
+ LEAQ (BX)(DX*1), R8
+ LEAQ (BX)(SI*1), SI
// matchLen
- XORL R9, R9
+ XORL R10, R10
matchlen_loopback_16_match_nolit_encodeBlockAsm:
- CMPL SI, $0x10
+ CMPL DI, $0x10
JB matchlen_match8_match_nolit_encodeBlockAsm
- MOVQ (DI)(R9*1), R8
- MOVQ 8(DI)(R9*1), R10
- XORQ (BX)(R9*1), R8
+ MOVQ (R8)(R10*1), R9
+ MOVQ 8(R8)(R10*1), R11
+ XORQ (SI)(R10*1), R9
JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm
- XORQ 8(BX)(R9*1), R10
+ XORQ 8(SI)(R10*1), R11
JNZ matchlen_bsf_16match_nolit_encodeBlockAsm
- LEAL -16(SI), SI
- LEAL 16(R9), R9
+ LEAL -16(DI), DI
+ LEAL 16(R10), R10
JMP matchlen_loopback_16_match_nolit_encodeBlockAsm
matchlen_bsf_16match_nolit_encodeBlockAsm:
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R11, R11
#else
- BSFQ R10, R10
+ BSFQ R11, R11
#endif
- SARQ $0x03, R10
- LEAL 8(R9)(R10*1), R9
+ SARQ $0x03, R11
+ LEAL 8(R10)(R11*1), R10
JMP match_nolit_end_encodeBlockAsm
matchlen_match8_match_nolit_encodeBlockAsm:
- CMPL SI, $0x08
+ CMPL DI, $0x08
JB matchlen_match4_match_nolit_encodeBlockAsm
- MOVQ (DI)(R9*1), R8
- XORQ (BX)(R9*1), R8
+ MOVQ (R8)(R10*1), R9
+ XORQ (SI)(R10*1), R9
JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm
- LEAL -8(SI), SI
- LEAL 8(R9), R9
+ LEAL -8(DI), DI
+ LEAL 8(R10), R10
JMP matchlen_match4_match_nolit_encodeBlockAsm
matchlen_bsf_8_match_nolit_encodeBlockAsm:
#ifdef GOAMD64_v3
- TZCNTQ R8, R8
+ TZCNTQ R9, R9
#else
- BSFQ R8, R8
+ BSFQ R9, R9
#endif
- SARQ $0x03, R8
- LEAL (R9)(R8*1), R9
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
JMP match_nolit_end_encodeBlockAsm
matchlen_match4_match_nolit_encodeBlockAsm:
- CMPL SI, $0x04
+ CMPL DI, $0x04
JB matchlen_match2_match_nolit_encodeBlockAsm
- MOVL (DI)(R9*1), R8
- CMPL (BX)(R9*1), R8
+ MOVL (R8)(R10*1), R9
+ CMPL (SI)(R10*1), R9
JNE matchlen_match2_match_nolit_encodeBlockAsm
- LEAL -4(SI), SI
- LEAL 4(R9), R9
+ LEAL -4(DI), DI
+ LEAL 4(R10), R10
matchlen_match2_match_nolit_encodeBlockAsm:
- CMPL SI, $0x01
+ CMPL DI, $0x01
JE matchlen_match1_match_nolit_encodeBlockAsm
JB match_nolit_end_encodeBlockAsm
- MOVW (DI)(R9*1), R8
- CMPW (BX)(R9*1), R8
+ MOVW (R8)(R10*1), R9
+ CMPW (SI)(R10*1), R9
JNE matchlen_match1_match_nolit_encodeBlockAsm
- LEAL 2(R9), R9
- SUBL $0x02, SI
+ LEAL 2(R10), R10
+ SUBL $0x02, DI
JZ match_nolit_end_encodeBlockAsm
matchlen_match1_match_nolit_encodeBlockAsm:
- MOVB (DI)(R9*1), R8
- CMPB (BX)(R9*1), R8
+ MOVB (R8)(R10*1), R9
+ CMPB (SI)(R10*1), R9
JNE match_nolit_end_encodeBlockAsm
- LEAL 1(R9), R9
+ LEAL 1(R10), R10
match_nolit_end_encodeBlockAsm:
- ADDL R9, CX
- MOVL 16(SP), BX
- ADDL $0x04, R9
- MOVL CX, 12(SP)
+ ADDL R10, DX
+ MOVL 16(SP), SI
+ ADDL $0x04, R10
+ MOVL DX, 12(SP)
// emitCopy
- CMPL BX, $0x00010000
+ CMPL SI, $0x00010000
JB two_byte_offset_match_nolit_encodeBlockAsm
- CMPL R9, $0x40
+ CMPL R10, $0x40
JBE four_bytes_remain_match_nolit_encodeBlockAsm
- MOVB $0xff, (AX)
- MOVL BX, 1(AX)
- LEAL -64(R9), R9
- ADDQ $0x05, AX
- CMPL R9, $0x04
+ MOVB $0xff, (CX)
+ MOVL SI, 1(CX)
+ LEAL -64(R10), R10
+ ADDQ $0x05, CX
+ CMPL R10, $0x04
JB four_bytes_remain_match_nolit_encodeBlockAsm
// emitRepeat
emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy:
- MOVL R9, SI
- LEAL -4(R9), R9
- CMPL SI, $0x08
+ MOVL R10, DI
+ LEAL -4(R10), R10
+ CMPL DI, $0x08
JBE repeat_two_match_nolit_encodeBlockAsm_emit_copy
- CMPL SI, $0x0c
+ CMPL DI, $0x0c
JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy
- CMPL BX, $0x00000800
+ CMPL SI, $0x00000800
JB repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy
cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy:
- CMPL R9, $0x00000104
+ CMPL R10, $0x00000104
JB repeat_three_match_nolit_encodeBlockAsm_emit_copy
- CMPL R9, $0x00010100
+ CMPL R10, $0x00010100
JB repeat_four_match_nolit_encodeBlockAsm_emit_copy
- CMPL R9, $0x0100ffff
+ CMPL R10, $0x0100ffff
JB repeat_five_match_nolit_encodeBlockAsm_emit_copy
- LEAL -16842747(R9), R9
- MOVL $0xfffb001d, (AX)
- MOVB $0xff, 4(AX)
- ADDQ $0x05, AX
+ LEAL -16842747(R10), R10
+ MOVL $0xfffb001d, (CX)
+ MOVB $0xff, 4(CX)
+ ADDQ $0x05, CX
JMP emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy
repeat_five_match_nolit_encodeBlockAsm_emit_copy:
- LEAL -65536(R9), R9
- MOVL R9, BX
- MOVW $0x001d, (AX)
- MOVW R9, 2(AX)
- SARL $0x10, BX
- MOVB BL, 4(AX)
- ADDQ $0x05, AX
+ LEAL -65536(R10), R10
+ MOVL R10, SI
+ MOVW $0x001d, (CX)
+ MOVW R10, 2(CX)
+ SARL $0x10, SI
+ MOVB SI, 4(CX)
+ ADDQ $0x05, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm
repeat_four_match_nolit_encodeBlockAsm_emit_copy:
- LEAL -256(R9), R9
- MOVW $0x0019, (AX)
- MOVW R9, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(R10), R10
+ MOVW $0x0019, (CX)
+ MOVW R10, 2(CX)
+ ADDQ $0x04, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm
repeat_three_match_nolit_encodeBlockAsm_emit_copy:
- LEAL -4(R9), R9
- MOVW $0x0015, (AX)
- MOVB R9, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(R10), R10
+ MOVW $0x0015, (CX)
+ MOVB R10, 2(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm
repeat_two_match_nolit_encodeBlockAsm_emit_copy:
- SHLL $0x02, R9
- ORL $0x01, R9
- MOVW R9, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, R10
+ ORL $0x01, R10
+ MOVW R10, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm
repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy:
- XORQ SI, SI
- LEAL 1(SI)(R9*4), R9
- MOVB BL, 1(AX)
- SARL $0x08, BX
- SHLL $0x05, BX
- ORL BX, R9
- MOVB R9, (AX)
- ADDQ $0x02, AX
+ XORQ DI, DI
+ LEAL 1(DI)(R10*4), R10
+ MOVB SI, 1(CX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, R10
+ MOVB R10, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm
four_bytes_remain_match_nolit_encodeBlockAsm:
- TESTL R9, R9
+ TESTL R10, R10
JZ match_nolit_emitcopy_end_encodeBlockAsm
- XORL SI, SI
- LEAL -1(SI)(R9*4), R9
- MOVB R9, (AX)
- MOVL BX, 1(AX)
- ADDQ $0x05, AX
+ XORL DI, DI
+ LEAL -1(DI)(R10*4), R10
+ MOVB R10, (CX)
+ MOVL SI, 1(CX)
+ ADDQ $0x05, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm
two_byte_offset_match_nolit_encodeBlockAsm:
- CMPL R9, $0x40
+ CMPL R10, $0x40
JBE two_byte_offset_short_match_nolit_encodeBlockAsm
- CMPL BX, $0x00000800
+ CMPL SI, $0x00000800
JAE long_offset_short_match_nolit_encodeBlockAsm
- MOVL $0x00000001, SI
- LEAL 16(SI), SI
- MOVB BL, 1(AX)
- MOVL BX, DI
- SHRL $0x08, DI
- SHLL $0x05, DI
- ORL DI, SI
- MOVB SI, (AX)
- ADDQ $0x02, AX
- SUBL $0x08, R9
+ MOVL $0x00000001, DI
+ LEAL 16(DI), DI
+ MOVB SI, 1(CX)
+ MOVL SI, R8
+ SHRL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, DI
+ MOVB DI, (CX)
+ ADDQ $0x02, CX
+ SUBL $0x08, R10
// emitRepeat
- LEAL -4(R9), R9
+ LEAL -4(R10), R10
JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b
emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy_short_2b:
- MOVL R9, SI
- LEAL -4(R9), R9
- CMPL SI, $0x08
+ MOVL R10, DI
+ LEAL -4(R10), R10
+ CMPL DI, $0x08
JBE repeat_two_match_nolit_encodeBlockAsm_emit_copy_short_2b
- CMPL SI, $0x0c
+ CMPL DI, $0x0c
JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b
- CMPL BX, $0x00000800
+ CMPL SI, $0x00000800
JB repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b
cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b:
- CMPL R9, $0x00000104
+ CMPL R10, $0x00000104
JB repeat_three_match_nolit_encodeBlockAsm_emit_copy_short_2b
- CMPL R9, $0x00010100
+ CMPL R10, $0x00010100
JB repeat_four_match_nolit_encodeBlockAsm_emit_copy_short_2b
- CMPL R9, $0x0100ffff
+ CMPL R10, $0x0100ffff
JB repeat_five_match_nolit_encodeBlockAsm_emit_copy_short_2b
- LEAL -16842747(R9), R9
- MOVL $0xfffb001d, (AX)
- MOVB $0xff, 4(AX)
- ADDQ $0x05, AX
+ LEAL -16842747(R10), R10
+ MOVL $0xfffb001d, (CX)
+ MOVB $0xff, 4(CX)
+ ADDQ $0x05, CX
JMP emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy_short_2b
repeat_five_match_nolit_encodeBlockAsm_emit_copy_short_2b:
- LEAL -65536(R9), R9
- MOVL R9, BX
- MOVW $0x001d, (AX)
- MOVW R9, 2(AX)
- SARL $0x10, BX
- MOVB BL, 4(AX)
- ADDQ $0x05, AX
+ LEAL -65536(R10), R10
+ MOVL R10, SI
+ MOVW $0x001d, (CX)
+ MOVW R10, 2(CX)
+ SARL $0x10, SI
+ MOVB SI, 4(CX)
+ ADDQ $0x05, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm
repeat_four_match_nolit_encodeBlockAsm_emit_copy_short_2b:
- LEAL -256(R9), R9
- MOVW $0x0019, (AX)
- MOVW R9, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(R10), R10
+ MOVW $0x0019, (CX)
+ MOVW R10, 2(CX)
+ ADDQ $0x04, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm
repeat_three_match_nolit_encodeBlockAsm_emit_copy_short_2b:
- LEAL -4(R9), R9
- MOVW $0x0015, (AX)
- MOVB R9, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(R10), R10
+ MOVW $0x0015, (CX)
+ MOVB R10, 2(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm
repeat_two_match_nolit_encodeBlockAsm_emit_copy_short_2b:
- SHLL $0x02, R9
- ORL $0x01, R9
- MOVW R9, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, R10
+ ORL $0x01, R10
+ MOVW R10, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm
repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b:
- XORQ SI, SI
- LEAL 1(SI)(R9*4), R9
- MOVB BL, 1(AX)
- SARL $0x08, BX
- SHLL $0x05, BX
- ORL BX, R9
- MOVB R9, (AX)
- ADDQ $0x02, AX
+ XORQ DI, DI
+ LEAL 1(DI)(R10*4), R10
+ MOVB SI, 1(CX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, R10
+ MOVB R10, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm
long_offset_short_match_nolit_encodeBlockAsm:
- MOVB $0xee, (AX)
- MOVW BX, 1(AX)
- LEAL -60(R9), R9
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW SI, 1(CX)
+ LEAL -60(R10), R10
+ ADDQ $0x03, CX
// emitRepeat
emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy_short:
- MOVL R9, SI
- LEAL -4(R9), R9
- CMPL SI, $0x08
+ MOVL R10, DI
+ LEAL -4(R10), R10
+ CMPL DI, $0x08
JBE repeat_two_match_nolit_encodeBlockAsm_emit_copy_short
- CMPL SI, $0x0c
+ CMPL DI, $0x0c
JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short
- CMPL BX, $0x00000800
+ CMPL SI, $0x00000800
JB repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short
cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short:
- CMPL R9, $0x00000104
+ CMPL R10, $0x00000104
JB repeat_three_match_nolit_encodeBlockAsm_emit_copy_short
- CMPL R9, $0x00010100
+ CMPL R10, $0x00010100
JB repeat_four_match_nolit_encodeBlockAsm_emit_copy_short
- CMPL R9, $0x0100ffff
+ CMPL R10, $0x0100ffff
JB repeat_five_match_nolit_encodeBlockAsm_emit_copy_short
- LEAL -16842747(R9), R9
- MOVL $0xfffb001d, (AX)
- MOVB $0xff, 4(AX)
- ADDQ $0x05, AX
+ LEAL -16842747(R10), R10
+ MOVL $0xfffb001d, (CX)
+ MOVB $0xff, 4(CX)
+ ADDQ $0x05, CX
JMP emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy_short
repeat_five_match_nolit_encodeBlockAsm_emit_copy_short:
- LEAL -65536(R9), R9
- MOVL R9, BX
- MOVW $0x001d, (AX)
- MOVW R9, 2(AX)
- SARL $0x10, BX
- MOVB BL, 4(AX)
- ADDQ $0x05, AX
+ LEAL -65536(R10), R10
+ MOVL R10, SI
+ MOVW $0x001d, (CX)
+ MOVW R10, 2(CX)
+ SARL $0x10, SI
+ MOVB SI, 4(CX)
+ ADDQ $0x05, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm
repeat_four_match_nolit_encodeBlockAsm_emit_copy_short:
- LEAL -256(R9), R9
- MOVW $0x0019, (AX)
- MOVW R9, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(R10), R10
+ MOVW $0x0019, (CX)
+ MOVW R10, 2(CX)
+ ADDQ $0x04, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm
repeat_three_match_nolit_encodeBlockAsm_emit_copy_short:
- LEAL -4(R9), R9
- MOVW $0x0015, (AX)
- MOVB R9, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(R10), R10
+ MOVW $0x0015, (CX)
+ MOVB R10, 2(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm
repeat_two_match_nolit_encodeBlockAsm_emit_copy_short:
- SHLL $0x02, R9
- ORL $0x01, R9
- MOVW R9, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, R10
+ ORL $0x01, R10
+ MOVW R10, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm
repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short:
- XORQ SI, SI
- LEAL 1(SI)(R9*4), R9
- MOVB BL, 1(AX)
- SARL $0x08, BX
- SHLL $0x05, BX
- ORL BX, R9
- MOVB R9, (AX)
- ADDQ $0x02, AX
+ XORQ DI, DI
+ LEAL 1(DI)(R10*4), R10
+ MOVB SI, 1(CX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, R10
+ MOVB R10, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm
two_byte_offset_short_match_nolit_encodeBlockAsm:
- MOVL R9, SI
- SHLL $0x02, SI
- CMPL R9, $0x0c
+ MOVL R10, DI
+ SHLL $0x02, DI
+ CMPL R10, $0x0c
JAE emit_copy_three_match_nolit_encodeBlockAsm
- CMPL BX, $0x00000800
+ CMPL SI, $0x00000800
JAE emit_copy_three_match_nolit_encodeBlockAsm
- LEAL -15(SI), SI
- MOVB BL, 1(AX)
- SHRL $0x08, BX
- SHLL $0x05, BX
- ORL BX, SI
- MOVB SI, (AX)
- ADDQ $0x02, AX
+ LEAL -15(DI), DI
+ MOVB SI, 1(CX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm
emit_copy_three_match_nolit_encodeBlockAsm:
- LEAL -2(SI), SI
- MOVB SI, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(DI), DI
+ MOVB DI, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
match_nolit_emitcopy_end_encodeBlockAsm:
- CMPL CX, 8(SP)
+ CMPL DX, 8(SP)
JAE emit_remainder_encodeBlockAsm
- MOVQ -2(DX)(CX*1), SI
- CMPQ AX, (SP)
+ MOVQ -2(BX)(DX*1), DI
+ CMPQ CX, (SP)
JB match_nolit_dst_ok_encodeBlockAsm
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_nolit_dst_ok_encodeBlockAsm:
- MOVQ $0x0000cf1bbcdcbf9b, R8
- MOVQ SI, DI
- SHRQ $0x10, SI
- MOVQ SI, BX
- SHLQ $0x10, DI
- IMULQ R8, DI
- SHRQ $0x32, DI
- SHLQ $0x10, BX
- IMULQ R8, BX
- SHRQ $0x32, BX
- LEAL -2(CX), R8
- LEAQ 24(SP)(BX*4), R9
- MOVL (R9), BX
- MOVL R8, 24(SP)(DI*4)
- MOVL CX, (R9)
- CMPL (DX)(BX*1), SI
+ MOVQ $0x0000cf1bbcdcbf9b, R9
+ MOVQ DI, R8
+ SHRQ $0x10, DI
+ MOVQ DI, SI
+ SHLQ $0x10, R8
+ IMULQ R9, R8
+ SHRQ $0x32, R8
+ SHLQ $0x10, SI
+ IMULQ R9, SI
+ SHRQ $0x32, SI
+ LEAL -2(DX), R9
+ LEAQ (AX)(SI*4), R10
+ MOVL (R10), SI
+ MOVL R9, (AX)(R8*4)
+ MOVL DX, (R10)
+ CMPL (BX)(SI*1), DI
JEQ match_nolit_loop_encodeBlockAsm
- INCL CX
+ INCL DX
JMP search_loop_encodeBlockAsm
emit_remainder_encodeBlockAsm:
- MOVQ src_len+32(FP), CX
- SUBL 12(SP), CX
- LEAQ 5(AX)(CX*1), CX
- CMPQ CX, (SP)
+ MOVQ src_len+32(FP), AX
+ SUBL 12(SP), AX
+ LEAQ 5(CX)(AX*1), AX
+ CMPQ AX, (SP)
JB emit_remainder_ok_encodeBlockAsm
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
emit_remainder_ok_encodeBlockAsm:
- MOVQ src_len+32(FP), CX
- MOVL 12(SP), BX
- CMPL BX, CX
+ MOVQ src_len+32(FP), AX
+ MOVL 12(SP), DX
+ CMPL DX, AX
JEQ emit_literal_done_emit_remainder_encodeBlockAsm
- MOVL CX, SI
- MOVL CX, 12(SP)
- LEAQ (DX)(BX*1), CX
- SUBL BX, SI
+ MOVL AX, SI
+ MOVL AX, 12(SP)
+ LEAQ (BX)(DX*1), AX
+ SUBL DX, SI
LEAL -1(SI), DX
CMPL DX, $0x3c
JB one_byte_emit_remainder_encodeBlockAsm
@@ -1286,41 +1287,41 @@ emit_remainder_ok_encodeBlockAsm:
JB three_bytes_emit_remainder_encodeBlockAsm
CMPL DX, $0x01000000
JB four_bytes_emit_remainder_encodeBlockAsm
- MOVB $0xfc, (AX)
- MOVL DX, 1(AX)
- ADDQ $0x05, AX
+ MOVB $0xfc, (CX)
+ MOVL DX, 1(CX)
+ ADDQ $0x05, CX
JMP memmove_long_emit_remainder_encodeBlockAsm
four_bytes_emit_remainder_encodeBlockAsm:
MOVL DX, BX
SHRL $0x10, BX
- MOVB $0xf8, (AX)
- MOVW DX, 1(AX)
- MOVB BL, 3(AX)
- ADDQ $0x04, AX
+ MOVB $0xf8, (CX)
+ MOVW DX, 1(CX)
+ MOVB BL, 3(CX)
+ ADDQ $0x04, CX
JMP memmove_long_emit_remainder_encodeBlockAsm
three_bytes_emit_remainder_encodeBlockAsm:
- MOVB $0xf4, (AX)
- MOVW DX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW DX, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_emit_remainder_encodeBlockAsm
two_bytes_emit_remainder_encodeBlockAsm:
- MOVB $0xf0, (AX)
- MOVB DL, 1(AX)
- ADDQ $0x02, AX
+ MOVB $0xf0, (CX)
+ MOVB DL, 1(CX)
+ ADDQ $0x02, CX
CMPL DX, $0x40
JB memmove_emit_remainder_encodeBlockAsm
JMP memmove_long_emit_remainder_encodeBlockAsm
one_byte_emit_remainder_encodeBlockAsm:
SHLB $0x02, DL
- MOVB DL, (AX)
- ADDQ $0x01, AX
+ MOVB DL, (CX)
+ ADDQ $0x01, CX
memmove_emit_remainder_encodeBlockAsm:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveShort
@@ -1336,73 +1337,73 @@ memmove_emit_remainder_encodeBlockAsm:
JMP emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_33through64
emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_1or2:
- MOVB (CX), SI
- MOVB -1(CX)(BX*1), CL
- MOVB SI, (AX)
- MOVB CL, -1(AX)(BX*1)
+ MOVB (AX), SI
+ MOVB -1(AX)(BX*1), AL
+ MOVB SI, (CX)
+ MOVB AL, -1(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBlockAsm
emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_3:
- MOVW (CX), SI
- MOVB 2(CX), CL
- MOVW SI, (AX)
- MOVB CL, 2(AX)
+ MOVW (AX), SI
+ MOVB 2(AX), AL
+ MOVW SI, (CX)
+ MOVB AL, 2(CX)
JMP memmove_end_copy_emit_remainder_encodeBlockAsm
emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_4through7:
- MOVL (CX), SI
- MOVL -4(CX)(BX*1), CX
- MOVL SI, (AX)
- MOVL CX, -4(AX)(BX*1)
+ MOVL (AX), SI
+ MOVL -4(AX)(BX*1), AX
+ MOVL SI, (CX)
+ MOVL AX, -4(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBlockAsm
emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_8through16:
- MOVQ (CX), SI
- MOVQ -8(CX)(BX*1), CX
- MOVQ SI, (AX)
- MOVQ CX, -8(AX)(BX*1)
+ MOVQ (AX), SI
+ MOVQ -8(AX)(BX*1), AX
+ MOVQ SI, (CX)
+ MOVQ AX, -8(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBlockAsm
emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_17through32:
- MOVOU (CX), X0
- MOVOU -16(CX)(BX*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU -16(AX)(BX*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBlockAsm
emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_33through64:
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
memmove_end_copy_emit_remainder_encodeBlockAsm:
- MOVQ DX, AX
+ MOVQ DX, CX
JMP emit_literal_done_emit_remainder_encodeBlockAsm
memmove_long_emit_remainder_encodeBlockAsm:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveLong
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
MOVQ BX, DI
SHRQ $0x05, DI
- MOVQ AX, SI
+ MOVQ CX, SI
ANDL $0x0000001f, SI
MOVQ $0x00000040, R8
SUBQ SI, R8
DECQ DI
JA emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_forward_sse_loop_32
- LEAQ -32(CX)(R8*1), SI
- LEAQ -32(AX)(R8*1), R9
+ LEAQ -32(AX)(R8*1), SI
+ LEAQ -32(CX)(R8*1), R9
emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_big_loop_back:
MOVOU (SI), X4
@@ -1416,1199 +1417,1200 @@ emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_big_loop_back:
JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_big_loop_back
emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_forward_sse_loop_32:
- MOVOU -32(CX)(R8*1), X4
- MOVOU -16(CX)(R8*1), X5
- MOVOA X4, -32(AX)(R8*1)
- MOVOA X5, -16(AX)(R8*1)
+ MOVOU -32(AX)(R8*1), X4
+ MOVOU -16(AX)(R8*1), X5
+ MOVOA X4, -32(CX)(R8*1)
+ MOVOA X5, -16(CX)(R8*1)
ADDQ $0x20, R8
CMPQ BX, R8
JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
- MOVQ DX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
+ MOVQ DX, CX
emit_literal_done_emit_remainder_encodeBlockAsm:
- MOVQ dst_base+0(FP), CX
- SUBQ CX, AX
- MOVQ AX, ret+48(FP)
+ MOVQ dst_base+0(FP), AX
+ SUBQ AX, CX
+ MOVQ CX, ret+56(FP)
RET
-// func encodeBlockAsm4MB(dst []byte, src []byte) int
+// func encodeBlockAsm4MB(dst []byte, src []byte, tmp *[65536]byte) int
// Requires: BMI, SSE2
-TEXT ·encodeBlockAsm4MB(SB), $65560-56
- MOVQ dst_base+0(FP), AX
- MOVQ $0x00000200, CX
- LEAQ 24(SP), DX
+TEXT ·encodeBlockAsm4MB(SB), $24-64
+ MOVQ tmp+48(FP), AX
+ MOVQ dst_base+0(FP), CX
+ MOVQ $0x00000200, DX
+ MOVQ AX, BX
PXOR X0, X0
zero_loop_encodeBlockAsm4MB:
- MOVOU X0, (DX)
- MOVOU X0, 16(DX)
- MOVOU X0, 32(DX)
- MOVOU X0, 48(DX)
- MOVOU X0, 64(DX)
- MOVOU X0, 80(DX)
- MOVOU X0, 96(DX)
- MOVOU X0, 112(DX)
- ADDQ $0x80, DX
- DECQ CX
+ MOVOU X0, (BX)
+ MOVOU X0, 16(BX)
+ MOVOU X0, 32(BX)
+ MOVOU X0, 48(BX)
+ MOVOU X0, 64(BX)
+ MOVOU X0, 80(BX)
+ MOVOU X0, 96(BX)
+ MOVOU X0, 112(BX)
+ ADDQ $0x80, BX
+ DECQ DX
JNZ zero_loop_encodeBlockAsm4MB
MOVL $0x00000000, 12(SP)
- MOVQ src_len+32(FP), CX
- LEAQ -9(CX), DX
- LEAQ -8(CX), BX
- MOVL BX, 8(SP)
- SHRQ $0x05, CX
- SUBL CX, DX
- LEAQ (AX)(DX*1), DX
- MOVQ DX, (SP)
- MOVL $0x00000001, CX
- MOVL CX, 16(SP)
- MOVQ src_base+24(FP), DX
+ MOVQ src_len+32(FP), DX
+ LEAQ -9(DX), BX
+ LEAQ -8(DX), SI
+ MOVL SI, 8(SP)
+ SHRQ $0x05, DX
+ SUBL DX, BX
+ LEAQ (CX)(BX*1), BX
+ MOVQ BX, (SP)
+ MOVL $0x00000001, DX
+ MOVL DX, 16(SP)
+ MOVQ src_base+24(FP), BX
search_loop_encodeBlockAsm4MB:
- MOVL CX, BX
- SUBL 12(SP), BX
- SHRL $0x06, BX
- LEAL 4(CX)(BX*1), BX
- CMPL BX, 8(SP)
+ MOVL DX, SI
+ SUBL 12(SP), SI
+ SHRL $0x06, SI
+ LEAL 4(DX)(SI*1), SI
+ CMPL SI, 8(SP)
JAE emit_remainder_encodeBlockAsm4MB
- MOVQ (DX)(CX*1), SI
- MOVL BX, 20(SP)
- MOVQ $0x0000cf1bbcdcbf9b, R8
- MOVQ SI, R9
- MOVQ SI, R10
- SHRQ $0x08, R10
- SHLQ $0x10, R9
- IMULQ R8, R9
- SHRQ $0x32, R9
+ MOVQ (BX)(DX*1), DI
+ MOVL SI, 20(SP)
+ MOVQ $0x0000cf1bbcdcbf9b, R9
+ MOVQ DI, R10
+ MOVQ DI, R11
+ SHRQ $0x08, R11
SHLQ $0x10, R10
- IMULQ R8, R10
+ IMULQ R9, R10
SHRQ $0x32, R10
- MOVL 24(SP)(R9*4), BX
- MOVL 24(SP)(R10*4), DI
- MOVL CX, 24(SP)(R9*4)
- LEAL 1(CX), R9
- MOVL R9, 24(SP)(R10*4)
- MOVQ SI, R9
- SHRQ $0x10, R9
- SHLQ $0x10, R9
- IMULQ R8, R9
- SHRQ $0x32, R9
- MOVL CX, R8
- SUBL 16(SP), R8
- MOVL 1(DX)(R8*1), R10
- MOVQ SI, R8
- SHRQ $0x08, R8
- CMPL R8, R10
+ SHLQ $0x10, R11
+ IMULQ R9, R11
+ SHRQ $0x32, R11
+ MOVL (AX)(R10*4), SI
+ MOVL (AX)(R11*4), R8
+ MOVL DX, (AX)(R10*4)
+ LEAL 1(DX), R10
+ MOVL R10, (AX)(R11*4)
+ MOVQ DI, R10
+ SHRQ $0x10, R10
+ SHLQ $0x10, R10
+ IMULQ R9, R10
+ SHRQ $0x32, R10
+ MOVL DX, R9
+ SUBL 16(SP), R9
+ MOVL 1(BX)(R9*1), R11
+ MOVQ DI, R9
+ SHRQ $0x08, R9
+ CMPL R9, R11
JNE no_repeat_found_encodeBlockAsm4MB
- LEAL 1(CX), SI
- MOVL 12(SP), DI
- MOVL SI, BX
- SUBL 16(SP), BX
+ LEAL 1(DX), DI
+ MOVL 12(SP), R8
+ MOVL DI, SI
+ SUBL 16(SP), SI
JZ repeat_extend_back_end_encodeBlockAsm4MB
repeat_extend_back_loop_encodeBlockAsm4MB:
- CMPL SI, DI
+ CMPL DI, R8
JBE repeat_extend_back_end_encodeBlockAsm4MB
- MOVB -1(DX)(BX*1), R8
- MOVB -1(DX)(SI*1), R9
- CMPB R8, R9
+ MOVB -1(BX)(SI*1), R9
+ MOVB -1(BX)(DI*1), R10
+ CMPB R9, R10
JNE repeat_extend_back_end_encodeBlockAsm4MB
- LEAL -1(SI), SI
- DECL BX
+ LEAL -1(DI), DI
+ DECL SI
JNZ repeat_extend_back_loop_encodeBlockAsm4MB
repeat_extend_back_end_encodeBlockAsm4MB:
- MOVL SI, BX
- SUBL 12(SP), BX
- LEAQ 4(AX)(BX*1), BX
- CMPQ BX, (SP)
+ MOVL DI, SI
+ SUBL 12(SP), SI
+ LEAQ 4(CX)(SI*1), SI
+ CMPQ SI, (SP)
JB repeat_dst_size_check_encodeBlockAsm4MB
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
repeat_dst_size_check_encodeBlockAsm4MB:
- MOVL 12(SP), BX
- CMPL BX, SI
+ MOVL 12(SP), SI
+ CMPL SI, DI
JEQ emit_literal_done_repeat_emit_encodeBlockAsm4MB
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(BX*1), R9
- SUBL BX, R8
- LEAL -1(R8), BX
- CMPL BX, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(SI*1), R10
+ SUBL SI, R9
+ LEAL -1(R9), SI
+ CMPL SI, $0x3c
JB one_byte_repeat_emit_encodeBlockAsm4MB
- CMPL BX, $0x00000100
+ CMPL SI, $0x00000100
JB two_bytes_repeat_emit_encodeBlockAsm4MB
- CMPL BX, $0x00010000
+ CMPL SI, $0x00010000
JB three_bytes_repeat_emit_encodeBlockAsm4MB
- MOVL BX, R10
- SHRL $0x10, R10
- MOVB $0xf8, (AX)
- MOVW BX, 1(AX)
- MOVB R10, 3(AX)
- ADDQ $0x04, AX
+ MOVL SI, R11
+ SHRL $0x10, R11
+ MOVB $0xf8, (CX)
+ MOVW SI, 1(CX)
+ MOVB R11, 3(CX)
+ ADDQ $0x04, CX
JMP memmove_long_repeat_emit_encodeBlockAsm4MB
three_bytes_repeat_emit_encodeBlockAsm4MB:
- MOVB $0xf4, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_repeat_emit_encodeBlockAsm4MB
two_bytes_repeat_emit_encodeBlockAsm4MB:
- MOVB $0xf0, (AX)
- MOVB BL, 1(AX)
- ADDQ $0x02, AX
- CMPL BX, $0x40
+ MOVB $0xf0, (CX)
+ MOVB SI, 1(CX)
+ ADDQ $0x02, CX
+ CMPL SI, $0x40
JB memmove_repeat_emit_encodeBlockAsm4MB
JMP memmove_long_repeat_emit_encodeBlockAsm4MB
one_byte_repeat_emit_encodeBlockAsm4MB:
- SHLB $0x02, BL
- MOVB BL, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, SI
+ MOVB SI, (CX)
+ ADDQ $0x01, CX
memmove_repeat_emit_encodeBlockAsm4MB:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveShort
- CMPQ R8, $0x08
+ CMPQ R9, $0x08
JBE emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_8
- CMPQ R8, $0x10
+ CMPQ R9, $0x10
JBE emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ R9, $0x20
JBE emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_17through32
JMP emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_33through64
emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_8:
- MOVQ (R9), R10
- MOVQ R10, (AX)
+ MOVQ (R10), R11
+ MOVQ R11, (CX)
JMP memmove_end_copy_repeat_emit_encodeBlockAsm4MB
emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_8through16:
- MOVQ (R9), R10
- MOVQ -8(R9)(R8*1), R9
- MOVQ R10, (AX)
- MOVQ R9, -8(AX)(R8*1)
+ MOVQ (R10), R11
+ MOVQ -8(R10)(R9*1), R10
+ MOVQ R11, (CX)
+ MOVQ R10, -8(CX)(R9*1)
JMP memmove_end_copy_repeat_emit_encodeBlockAsm4MB
emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_17through32:
- MOVOU (R9), X0
- MOVOU -16(R9)(R8*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU -16(R10)(R9*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R9*1)
JMP memmove_end_copy_repeat_emit_encodeBlockAsm4MB
emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_33through64:
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
memmove_end_copy_repeat_emit_encodeBlockAsm4MB:
- MOVQ BX, AX
+ MOVQ SI, CX
JMP emit_literal_done_repeat_emit_encodeBlockAsm4MB
memmove_long_repeat_emit_encodeBlockAsm4MB:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveLong
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVQ R8, R11
- SHRQ $0x05, R11
- MOVQ AX, R10
- ANDL $0x0000001f, R10
- MOVQ $0x00000040, R12
- SUBQ R10, R12
- DECQ R11
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVQ R9, R12
+ SHRQ $0x05, R12
+ MOVQ CX, R11
+ ANDL $0x0000001f, R11
+ MOVQ $0x00000040, R13
+ SUBQ R11, R13
+ DECQ R12
JA emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32
- LEAQ -32(R9)(R12*1), R10
- LEAQ -32(AX)(R12*1), R13
+ LEAQ -32(R10)(R13*1), R11
+ LEAQ -32(CX)(R13*1), R14
emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_big_loop_back:
- MOVOU (R10), X4
- MOVOU 16(R10), X5
- MOVOA X4, (R13)
- MOVOA X5, 16(R13)
+ MOVOU (R11), X4
+ MOVOU 16(R11), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
+ ADDQ $0x20, R14
+ ADDQ $0x20, R11
ADDQ $0x20, R13
- ADDQ $0x20, R10
- ADDQ $0x20, R12
- DECQ R11
+ DECQ R12
JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_big_loop_back
emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32:
- MOVOU -32(R9)(R12*1), X4
- MOVOU -16(R9)(R12*1), X5
- MOVOA X4, -32(AX)(R12*1)
- MOVOA X5, -16(AX)(R12*1)
- ADDQ $0x20, R12
- CMPQ R8, R12
+ MOVOU -32(R10)(R13*1), X4
+ MOVOU -16(R10)(R13*1), X5
+ MOVOA X4, -32(CX)(R13*1)
+ MOVOA X5, -16(CX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R9, R13
JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ BX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
+ MOVQ SI, CX
emit_literal_done_repeat_emit_encodeBlockAsm4MB:
- ADDL $0x05, CX
- MOVL CX, BX
- SUBL 16(SP), BX
- MOVQ src_len+32(FP), R8
- SUBL CX, R8
- LEAQ (DX)(CX*1), R9
- LEAQ (DX)(BX*1), BX
+ ADDL $0x05, DX
+ MOVL DX, SI
+ SUBL 16(SP), SI
+ MOVQ src_len+32(FP), R9
+ SUBL DX, R9
+ LEAQ (BX)(DX*1), R10
+ LEAQ (BX)(SI*1), SI
// matchLen
- XORL R11, R11
+ XORL R12, R12
matchlen_loopback_16_repeat_extend_encodeBlockAsm4MB:
- CMPL R8, $0x10
+ CMPL R9, $0x10
JB matchlen_match8_repeat_extend_encodeBlockAsm4MB
- MOVQ (R9)(R11*1), R10
- MOVQ 8(R9)(R11*1), R12
- XORQ (BX)(R11*1), R10
+ MOVQ (R10)(R12*1), R11
+ MOVQ 8(R10)(R12*1), R13
+ XORQ (SI)(R12*1), R11
JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm4MB
- XORQ 8(BX)(R11*1), R12
+ XORQ 8(SI)(R12*1), R13
JNZ matchlen_bsf_16repeat_extend_encodeBlockAsm4MB
- LEAL -16(R8), R8
- LEAL 16(R11), R11
+ LEAL -16(R9), R9
+ LEAL 16(R12), R12
JMP matchlen_loopback_16_repeat_extend_encodeBlockAsm4MB
matchlen_bsf_16repeat_extend_encodeBlockAsm4MB:
#ifdef GOAMD64_v3
- TZCNTQ R12, R12
+ TZCNTQ R13, R13
#else
- BSFQ R12, R12
+ BSFQ R13, R13
#endif
- SARQ $0x03, R12
- LEAL 8(R11)(R12*1), R11
+ SARQ $0x03, R13
+ LEAL 8(R12)(R13*1), R12
JMP repeat_extend_forward_end_encodeBlockAsm4MB
matchlen_match8_repeat_extend_encodeBlockAsm4MB:
- CMPL R8, $0x08
+ CMPL R9, $0x08
JB matchlen_match4_repeat_extend_encodeBlockAsm4MB
- MOVQ (R9)(R11*1), R10
- XORQ (BX)(R11*1), R10
+ MOVQ (R10)(R12*1), R11
+ XORQ (SI)(R12*1), R11
JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm4MB
- LEAL -8(R8), R8
- LEAL 8(R11), R11
+ LEAL -8(R9), R9
+ LEAL 8(R12), R12
JMP matchlen_match4_repeat_extend_encodeBlockAsm4MB
matchlen_bsf_8_repeat_extend_encodeBlockAsm4MB:
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R11, R11
#else
- BSFQ R10, R10
+ BSFQ R11, R11
#endif
- SARQ $0x03, R10
- LEAL (R11)(R10*1), R11
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
JMP repeat_extend_forward_end_encodeBlockAsm4MB
matchlen_match4_repeat_extend_encodeBlockAsm4MB:
- CMPL R8, $0x04
+ CMPL R9, $0x04
JB matchlen_match2_repeat_extend_encodeBlockAsm4MB
- MOVL (R9)(R11*1), R10
- CMPL (BX)(R11*1), R10
+ MOVL (R10)(R12*1), R11
+ CMPL (SI)(R12*1), R11
JNE matchlen_match2_repeat_extend_encodeBlockAsm4MB
- LEAL -4(R8), R8
- LEAL 4(R11), R11
+ LEAL -4(R9), R9
+ LEAL 4(R12), R12
matchlen_match2_repeat_extend_encodeBlockAsm4MB:
- CMPL R8, $0x01
+ CMPL R9, $0x01
JE matchlen_match1_repeat_extend_encodeBlockAsm4MB
JB repeat_extend_forward_end_encodeBlockAsm4MB
- MOVW (R9)(R11*1), R10
- CMPW (BX)(R11*1), R10
+ MOVW (R10)(R12*1), R11
+ CMPW (SI)(R12*1), R11
JNE matchlen_match1_repeat_extend_encodeBlockAsm4MB
- LEAL 2(R11), R11
- SUBL $0x02, R8
+ LEAL 2(R12), R12
+ SUBL $0x02, R9
JZ repeat_extend_forward_end_encodeBlockAsm4MB
matchlen_match1_repeat_extend_encodeBlockAsm4MB:
- MOVB (R9)(R11*1), R10
- CMPB (BX)(R11*1), R10
+ MOVB (R10)(R12*1), R11
+ CMPB (SI)(R12*1), R11
JNE repeat_extend_forward_end_encodeBlockAsm4MB
- LEAL 1(R11), R11
+ LEAL 1(R12), R12
repeat_extend_forward_end_encodeBlockAsm4MB:
- ADDL R11, CX
- MOVL CX, BX
- SUBL SI, BX
- MOVL 16(SP), SI
- TESTL DI, DI
+ ADDL R12, DX
+ MOVL DX, SI
+ SUBL DI, SI
+ MOVL 16(SP), DI
+ TESTL R8, R8
JZ repeat_as_copy_encodeBlockAsm4MB
// emitRepeat
- MOVL BX, DI
- LEAL -4(BX), BX
- CMPL DI, $0x08
+ MOVL SI, R8
+ LEAL -4(SI), SI
+ CMPL R8, $0x08
JBE repeat_two_match_repeat_encodeBlockAsm4MB
- CMPL DI, $0x0c
+ CMPL R8, $0x0c
JAE cant_repeat_two_offset_match_repeat_encodeBlockAsm4MB
- CMPL SI, $0x00000800
+ CMPL DI, $0x00000800
JB repeat_two_offset_match_repeat_encodeBlockAsm4MB
cant_repeat_two_offset_match_repeat_encodeBlockAsm4MB:
- CMPL BX, $0x00000104
+ CMPL SI, $0x00000104
JB repeat_three_match_repeat_encodeBlockAsm4MB
- CMPL BX, $0x00010100
+ CMPL SI, $0x00010100
JB repeat_four_match_repeat_encodeBlockAsm4MB
- LEAL -65536(BX), BX
- MOVL BX, SI
- MOVW $0x001d, (AX)
- MOVW BX, 2(AX)
- SARL $0x10, SI
- MOVB SI, 4(AX)
- ADDQ $0x05, AX
+ LEAL -65536(SI), SI
+ MOVL SI, DI
+ MOVW $0x001d, (CX)
+ MOVW SI, 2(CX)
+ SARL $0x10, DI
+ MOVB DI, 4(CX)
+ ADDQ $0x05, CX
JMP repeat_end_emit_encodeBlockAsm4MB
repeat_four_match_repeat_encodeBlockAsm4MB:
- LEAL -256(BX), BX
- MOVW $0x0019, (AX)
- MOVW BX, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(SI), SI
+ MOVW $0x0019, (CX)
+ MOVW SI, 2(CX)
+ ADDQ $0x04, CX
JMP repeat_end_emit_encodeBlockAsm4MB
repeat_three_match_repeat_encodeBlockAsm4MB:
- LEAL -4(BX), BX
- MOVW $0x0015, (AX)
- MOVB BL, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(SI), SI
+ MOVW $0x0015, (CX)
+ MOVB SI, 2(CX)
+ ADDQ $0x03, CX
JMP repeat_end_emit_encodeBlockAsm4MB
repeat_two_match_repeat_encodeBlockAsm4MB:
- SHLL $0x02, BX
- ORL $0x01, BX
- MOVW BX, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, SI
+ ORL $0x01, SI
+ MOVW SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm4MB
repeat_two_offset_match_repeat_encodeBlockAsm4MB:
- XORQ DI, DI
- LEAL 1(DI)(BX*4), BX
- MOVB SI, 1(AX)
- SARL $0x08, SI
- SHLL $0x05, SI
- ORL SI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
+ XORQ R8, R8
+ LEAL 1(R8)(SI*4), SI
+ MOVB DI, 1(CX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm4MB
repeat_as_copy_encodeBlockAsm4MB:
// emitCopy
- CMPL SI, $0x00010000
+ CMPL DI, $0x00010000
JB two_byte_offset_repeat_as_copy_encodeBlockAsm4MB
- CMPL BX, $0x40
+ CMPL SI, $0x40
JBE four_bytes_remain_repeat_as_copy_encodeBlockAsm4MB
- MOVB $0xff, (AX)
- MOVL SI, 1(AX)
- LEAL -64(BX), BX
- ADDQ $0x05, AX
- CMPL BX, $0x04
+ MOVB $0xff, (CX)
+ MOVL DI, 1(CX)
+ LEAL -64(SI), SI
+ ADDQ $0x05, CX
+ CMPL SI, $0x04
JB four_bytes_remain_repeat_as_copy_encodeBlockAsm4MB
// emitRepeat
- MOVL BX, DI
- LEAL -4(BX), BX
- CMPL DI, $0x08
+ MOVL SI, R8
+ LEAL -4(SI), SI
+ CMPL R8, $0x08
JBE repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy
- CMPL DI, $0x0c
+ CMPL R8, $0x0c
JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy
- CMPL SI, $0x00000800
+ CMPL DI, $0x00000800
JB repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy
cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy:
- CMPL BX, $0x00000104
+ CMPL SI, $0x00000104
JB repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy
- CMPL BX, $0x00010100
+ CMPL SI, $0x00010100
JB repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy
- LEAL -65536(BX), BX
- MOVL BX, SI
- MOVW $0x001d, (AX)
- MOVW BX, 2(AX)
- SARL $0x10, SI
- MOVB SI, 4(AX)
- ADDQ $0x05, AX
+ LEAL -65536(SI), SI
+ MOVL SI, DI
+ MOVW $0x001d, (CX)
+ MOVW SI, 2(CX)
+ SARL $0x10, DI
+ MOVB DI, 4(CX)
+ ADDQ $0x05, CX
JMP repeat_end_emit_encodeBlockAsm4MB
repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy:
- LEAL -256(BX), BX
- MOVW $0x0019, (AX)
- MOVW BX, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(SI), SI
+ MOVW $0x0019, (CX)
+ MOVW SI, 2(CX)
+ ADDQ $0x04, CX
JMP repeat_end_emit_encodeBlockAsm4MB
repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy:
- LEAL -4(BX), BX
- MOVW $0x0015, (AX)
- MOVB BL, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(SI), SI
+ MOVW $0x0015, (CX)
+ MOVB SI, 2(CX)
+ ADDQ $0x03, CX
JMP repeat_end_emit_encodeBlockAsm4MB
repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy:
- SHLL $0x02, BX
- ORL $0x01, BX
- MOVW BX, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, SI
+ ORL $0x01, SI
+ MOVW SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm4MB
repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy:
- XORQ DI, DI
- LEAL 1(DI)(BX*4), BX
- MOVB SI, 1(AX)
- SARL $0x08, SI
- SHLL $0x05, SI
- ORL SI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
+ XORQ R8, R8
+ LEAL 1(R8)(SI*4), SI
+ MOVB DI, 1(CX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm4MB
four_bytes_remain_repeat_as_copy_encodeBlockAsm4MB:
- TESTL BX, BX
+ TESTL SI, SI
JZ repeat_end_emit_encodeBlockAsm4MB
- XORL DI, DI
- LEAL -1(DI)(BX*4), BX
- MOVB BL, (AX)
- MOVL SI, 1(AX)
- ADDQ $0x05, AX
+ XORL R8, R8
+ LEAL -1(R8)(SI*4), SI
+ MOVB SI, (CX)
+ MOVL DI, 1(CX)
+ ADDQ $0x05, CX
JMP repeat_end_emit_encodeBlockAsm4MB
two_byte_offset_repeat_as_copy_encodeBlockAsm4MB:
- CMPL BX, $0x40
+ CMPL SI, $0x40
JBE two_byte_offset_short_repeat_as_copy_encodeBlockAsm4MB
- CMPL SI, $0x00000800
+ CMPL DI, $0x00000800
JAE long_offset_short_repeat_as_copy_encodeBlockAsm4MB
- MOVL $0x00000001, DI
- LEAL 16(DI), DI
- MOVB SI, 1(AX)
- SHRL $0x08, SI
- SHLL $0x05, SI
- ORL SI, DI
- MOVB DI, (AX)
- ADDQ $0x02, AX
- SUBL $0x08, BX
+ MOVL $0x00000001, R8
+ LEAL 16(R8), R8
+ MOVB DI, 1(CX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R8
+ MOVB R8, (CX)
+ ADDQ $0x02, CX
+ SUBL $0x08, SI
// emitRepeat
- LEAL -4(BX), BX
+ LEAL -4(SI), SI
JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b
- MOVL BX, DI
- LEAL -4(BX), BX
- CMPL DI, $0x08
+ MOVL SI, R8
+ LEAL -4(SI), SI
+ CMPL R8, $0x08
JBE repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b
- CMPL DI, $0x0c
+ CMPL R8, $0x0c
JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b
- CMPL SI, $0x00000800
+ CMPL DI, $0x00000800
JB repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b
cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b:
- CMPL BX, $0x00000104
+ CMPL SI, $0x00000104
JB repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b
- CMPL BX, $0x00010100
+ CMPL SI, $0x00010100
JB repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b
- LEAL -65536(BX), BX
- MOVL BX, SI
- MOVW $0x001d, (AX)
- MOVW BX, 2(AX)
- SARL $0x10, SI
- MOVB SI, 4(AX)
- ADDQ $0x05, AX
+ LEAL -65536(SI), SI
+ MOVL SI, DI
+ MOVW $0x001d, (CX)
+ MOVW SI, 2(CX)
+ SARL $0x10, DI
+ MOVB DI, 4(CX)
+ ADDQ $0x05, CX
JMP repeat_end_emit_encodeBlockAsm4MB
repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b:
- LEAL -256(BX), BX
- MOVW $0x0019, (AX)
- MOVW BX, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(SI), SI
+ MOVW $0x0019, (CX)
+ MOVW SI, 2(CX)
+ ADDQ $0x04, CX
JMP repeat_end_emit_encodeBlockAsm4MB
repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b:
- LEAL -4(BX), BX
- MOVW $0x0015, (AX)
- MOVB BL, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(SI), SI
+ MOVW $0x0015, (CX)
+ MOVB SI, 2(CX)
+ ADDQ $0x03, CX
JMP repeat_end_emit_encodeBlockAsm4MB
repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b:
- SHLL $0x02, BX
- ORL $0x01, BX
- MOVW BX, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, SI
+ ORL $0x01, SI
+ MOVW SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm4MB
repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b:
- XORQ DI, DI
- LEAL 1(DI)(BX*4), BX
- MOVB SI, 1(AX)
- SARL $0x08, SI
- SHLL $0x05, SI
- ORL SI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
+ XORQ R8, R8
+ LEAL 1(R8)(SI*4), SI
+ MOVB DI, 1(CX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm4MB
long_offset_short_repeat_as_copy_encodeBlockAsm4MB:
- MOVB $0xee, (AX)
- MOVW SI, 1(AX)
- LEAL -60(BX), BX
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW DI, 1(CX)
+ LEAL -60(SI), SI
+ ADDQ $0x03, CX
// emitRepeat
- MOVL BX, DI
- LEAL -4(BX), BX
- CMPL DI, $0x08
+ MOVL SI, R8
+ LEAL -4(SI), SI
+ CMPL R8, $0x08
JBE repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short
- CMPL DI, $0x0c
+ CMPL R8, $0x0c
JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short
- CMPL SI, $0x00000800
+ CMPL DI, $0x00000800
JB repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short
cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short:
- CMPL BX, $0x00000104
+ CMPL SI, $0x00000104
JB repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short
- CMPL BX, $0x00010100
+ CMPL SI, $0x00010100
JB repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short
- LEAL -65536(BX), BX
- MOVL BX, SI
- MOVW $0x001d, (AX)
- MOVW BX, 2(AX)
- SARL $0x10, SI
- MOVB SI, 4(AX)
- ADDQ $0x05, AX
+ LEAL -65536(SI), SI
+ MOVL SI, DI
+ MOVW $0x001d, (CX)
+ MOVW SI, 2(CX)
+ SARL $0x10, DI
+ MOVB DI, 4(CX)
+ ADDQ $0x05, CX
JMP repeat_end_emit_encodeBlockAsm4MB
repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short:
- LEAL -256(BX), BX
- MOVW $0x0019, (AX)
- MOVW BX, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(SI), SI
+ MOVW $0x0019, (CX)
+ MOVW SI, 2(CX)
+ ADDQ $0x04, CX
JMP repeat_end_emit_encodeBlockAsm4MB
repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short:
- LEAL -4(BX), BX
- MOVW $0x0015, (AX)
- MOVB BL, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(SI), SI
+ MOVW $0x0015, (CX)
+ MOVB SI, 2(CX)
+ ADDQ $0x03, CX
JMP repeat_end_emit_encodeBlockAsm4MB
repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short:
- SHLL $0x02, BX
- ORL $0x01, BX
- MOVW BX, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, SI
+ ORL $0x01, SI
+ MOVW SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm4MB
repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short:
- XORQ DI, DI
- LEAL 1(DI)(BX*4), BX
- MOVB SI, 1(AX)
- SARL $0x08, SI
- SHLL $0x05, SI
- ORL SI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
+ XORQ R8, R8
+ LEAL 1(R8)(SI*4), SI
+ MOVB DI, 1(CX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm4MB
two_byte_offset_short_repeat_as_copy_encodeBlockAsm4MB:
- MOVL BX, DI
- SHLL $0x02, DI
- CMPL BX, $0x0c
+ MOVL SI, R8
+ SHLL $0x02, R8
+ CMPL SI, $0x0c
JAE emit_copy_three_repeat_as_copy_encodeBlockAsm4MB
- CMPL SI, $0x00000800
+ CMPL DI, $0x00000800
JAE emit_copy_three_repeat_as_copy_encodeBlockAsm4MB
- LEAL -15(DI), DI
- MOVB SI, 1(AX)
- SHRL $0x08, SI
- SHLL $0x05, SI
- ORL SI, DI
- MOVB DI, (AX)
- ADDQ $0x02, AX
+ LEAL -15(R8), R8
+ MOVB DI, 1(CX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R8
+ MOVB R8, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm4MB
emit_copy_three_repeat_as_copy_encodeBlockAsm4MB:
- LEAL -2(DI), DI
- MOVB DI, (AX)
- MOVW SI, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(R8), R8
+ MOVB R8, (CX)
+ MOVW DI, 1(CX)
+ ADDQ $0x03, CX
repeat_end_emit_encodeBlockAsm4MB:
- MOVL CX, 12(SP)
+ MOVL DX, 12(SP)
JMP search_loop_encodeBlockAsm4MB
no_repeat_found_encodeBlockAsm4MB:
- CMPL (DX)(BX*1), SI
+ CMPL (BX)(SI*1), DI
JEQ candidate_match_encodeBlockAsm4MB
- SHRQ $0x08, SI
- MOVL 24(SP)(R9*4), BX
- LEAL 2(CX), R8
- CMPL (DX)(DI*1), SI
+ SHRQ $0x08, DI
+ MOVL (AX)(R10*4), SI
+ LEAL 2(DX), R9
+ CMPL (BX)(R8*1), DI
JEQ candidate2_match_encodeBlockAsm4MB
- MOVL R8, 24(SP)(R9*4)
- SHRQ $0x08, SI
- CMPL (DX)(BX*1), SI
+ MOVL R9, (AX)(R10*4)
+ SHRQ $0x08, DI
+ CMPL (BX)(SI*1), DI
JEQ candidate3_match_encodeBlockAsm4MB
- MOVL 20(SP), CX
+ MOVL 20(SP), DX
JMP search_loop_encodeBlockAsm4MB
candidate3_match_encodeBlockAsm4MB:
- ADDL $0x02, CX
+ ADDL $0x02, DX
JMP candidate_match_encodeBlockAsm4MB
candidate2_match_encodeBlockAsm4MB:
- MOVL R8, 24(SP)(R9*4)
- INCL CX
- MOVL DI, BX
+ MOVL R9, (AX)(R10*4)
+ INCL DX
+ MOVL R8, SI
candidate_match_encodeBlockAsm4MB:
- MOVL 12(SP), SI
- TESTL BX, BX
+ MOVL 12(SP), DI
+ TESTL SI, SI
JZ match_extend_back_end_encodeBlockAsm4MB
match_extend_back_loop_encodeBlockAsm4MB:
- CMPL CX, SI
+ CMPL DX, DI
JBE match_extend_back_end_encodeBlockAsm4MB
- MOVB -1(DX)(BX*1), DI
- MOVB -1(DX)(CX*1), R8
- CMPB DI, R8
+ MOVB -1(BX)(SI*1), R8
+ MOVB -1(BX)(DX*1), R9
+ CMPB R8, R9
JNE match_extend_back_end_encodeBlockAsm4MB
- LEAL -1(CX), CX
- DECL BX
+ LEAL -1(DX), DX
+ DECL SI
JZ match_extend_back_end_encodeBlockAsm4MB
JMP match_extend_back_loop_encodeBlockAsm4MB
match_extend_back_end_encodeBlockAsm4MB:
- MOVL CX, SI
- SUBL 12(SP), SI
- LEAQ 4(AX)(SI*1), SI
- CMPQ SI, (SP)
+ MOVL DX, DI
+ SUBL 12(SP), DI
+ LEAQ 4(CX)(DI*1), DI
+ CMPQ DI, (SP)
JB match_dst_size_check_encodeBlockAsm4MB
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_dst_size_check_encodeBlockAsm4MB:
- MOVL CX, SI
- MOVL 12(SP), DI
- CMPL DI, SI
+ MOVL DX, DI
+ MOVL 12(SP), R8
+ CMPL R8, DI
JEQ emit_literal_done_match_emit_encodeBlockAsm4MB
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(DI*1), SI
- SUBL DI, R8
- LEAL -1(R8), DI
- CMPL DI, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(R8*1), DI
+ SUBL R8, R9
+ LEAL -1(R9), R8
+ CMPL R8, $0x3c
JB one_byte_match_emit_encodeBlockAsm4MB
- CMPL DI, $0x00000100
+ CMPL R8, $0x00000100
JB two_bytes_match_emit_encodeBlockAsm4MB
- CMPL DI, $0x00010000
+ CMPL R8, $0x00010000
JB three_bytes_match_emit_encodeBlockAsm4MB
- MOVL DI, R9
- SHRL $0x10, R9
- MOVB $0xf8, (AX)
- MOVW DI, 1(AX)
- MOVB R9, 3(AX)
- ADDQ $0x04, AX
+ MOVL R8, R10
+ SHRL $0x10, R10
+ MOVB $0xf8, (CX)
+ MOVW R8, 1(CX)
+ MOVB R10, 3(CX)
+ ADDQ $0x04, CX
JMP memmove_long_match_emit_encodeBlockAsm4MB
three_bytes_match_emit_encodeBlockAsm4MB:
- MOVB $0xf4, (AX)
- MOVW DI, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW R8, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_match_emit_encodeBlockAsm4MB
two_bytes_match_emit_encodeBlockAsm4MB:
- MOVB $0xf0, (AX)
- MOVB DI, 1(AX)
- ADDQ $0x02, AX
- CMPL DI, $0x40
+ MOVB $0xf0, (CX)
+ MOVB R8, 1(CX)
+ ADDQ $0x02, CX
+ CMPL R8, $0x40
JB memmove_match_emit_encodeBlockAsm4MB
JMP memmove_long_match_emit_encodeBlockAsm4MB
one_byte_match_emit_encodeBlockAsm4MB:
- SHLB $0x02, DI
- MOVB DI, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, R8
+ MOVB R8, (CX)
+ ADDQ $0x01, CX
memmove_match_emit_encodeBlockAsm4MB:
- LEAQ (AX)(R8*1), DI
+ LEAQ (CX)(R9*1), R8
// genMemMoveShort
- CMPQ R8, $0x08
+ CMPQ R9, $0x08
JBE emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_8
- CMPQ R8, $0x10
+ CMPQ R9, $0x10
JBE emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ R9, $0x20
JBE emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_33through64
emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_8:
- MOVQ (SI), R9
- MOVQ R9, (AX)
+ MOVQ (DI), R10
+ MOVQ R10, (CX)
JMP memmove_end_copy_match_emit_encodeBlockAsm4MB
emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_8through16:
- MOVQ (SI), R9
- MOVQ -8(SI)(R8*1), SI
- MOVQ R9, (AX)
- MOVQ SI, -8(AX)(R8*1)
+ MOVQ (DI), R10
+ MOVQ -8(DI)(R9*1), DI
+ MOVQ R10, (CX)
+ MOVQ DI, -8(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeBlockAsm4MB
emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_17through32:
- MOVOU (SI), X0
- MOVOU -16(SI)(R8*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU (DI), X0
+ MOVOU -16(DI)(R9*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeBlockAsm4MB
emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_33through64:
- MOVOU (SI), X0
- MOVOU 16(SI), X1
- MOVOU -32(SI)(R8*1), X2
- MOVOU -16(SI)(R8*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU (DI), X0
+ MOVOU 16(DI), X1
+ MOVOU -32(DI)(R9*1), X2
+ MOVOU -16(DI)(R9*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
memmove_end_copy_match_emit_encodeBlockAsm4MB:
- MOVQ DI, AX
+ MOVQ R8, CX
JMP emit_literal_done_match_emit_encodeBlockAsm4MB
memmove_long_match_emit_encodeBlockAsm4MB:
- LEAQ (AX)(R8*1), DI
+ LEAQ (CX)(R9*1), R8
// genMemMoveLong
- MOVOU (SI), X0
- MOVOU 16(SI), X1
- MOVOU -32(SI)(R8*1), X2
- MOVOU -16(SI)(R8*1), X3
- MOVQ R8, R10
- SHRQ $0x05, R10
- MOVQ AX, R9
- ANDL $0x0000001f, R9
- MOVQ $0x00000040, R11
- SUBQ R9, R11
- DECQ R10
+ MOVOU (DI), X0
+ MOVOU 16(DI), X1
+ MOVOU -32(DI)(R9*1), X2
+ MOVOU -16(DI)(R9*1), X3
+ MOVQ R9, R11
+ SHRQ $0x05, R11
+ MOVQ CX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R12
+ SUBQ R10, R12
+ DECQ R11
JA emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32
- LEAQ -32(SI)(R11*1), R9
- LEAQ -32(AX)(R11*1), R12
+ LEAQ -32(DI)(R12*1), R10
+ LEAQ -32(CX)(R12*1), R13
emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_big_loop_back:
- MOVOU (R9), X4
- MOVOU 16(R9), X5
- MOVOA X4, (R12)
- MOVOA X5, 16(R12)
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R13)
+ MOVOA X5, 16(R13)
+ ADDQ $0x20, R13
+ ADDQ $0x20, R10
ADDQ $0x20, R12
- ADDQ $0x20, R9
- ADDQ $0x20, R11
- DECQ R10
+ DECQ R11
JNA emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_big_loop_back
emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32:
- MOVOU -32(SI)(R11*1), X4
- MOVOU -16(SI)(R11*1), X5
- MOVOA X4, -32(AX)(R11*1)
- MOVOA X5, -16(AX)(R11*1)
- ADDQ $0x20, R11
- CMPQ R8, R11
+ MOVOU -32(DI)(R12*1), X4
+ MOVOU -16(DI)(R12*1), X5
+ MOVOA X4, -32(CX)(R12*1)
+ MOVOA X5, -16(CX)(R12*1)
+ ADDQ $0x20, R12
+ CMPQ R9, R12
JAE emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ DI, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
+ MOVQ R8, CX
emit_literal_done_match_emit_encodeBlockAsm4MB:
match_nolit_loop_encodeBlockAsm4MB:
- MOVL CX, SI
- SUBL BX, SI
- MOVL SI, 16(SP)
- ADDL $0x04, CX
- ADDL $0x04, BX
- MOVQ src_len+32(FP), SI
- SUBL CX, SI
- LEAQ (DX)(CX*1), DI
- LEAQ (DX)(BX*1), BX
+ MOVL DX, DI
+ SUBL SI, DI
+ MOVL DI, 16(SP)
+ ADDL $0x04, DX
+ ADDL $0x04, SI
+ MOVQ src_len+32(FP), DI
+ SUBL DX, DI
+ LEAQ (BX)(DX*1), R8
+ LEAQ (BX)(SI*1), SI
// matchLen
- XORL R9, R9
+ XORL R10, R10
matchlen_loopback_16_match_nolit_encodeBlockAsm4MB:
- CMPL SI, $0x10
+ CMPL DI, $0x10
JB matchlen_match8_match_nolit_encodeBlockAsm4MB
- MOVQ (DI)(R9*1), R8
- MOVQ 8(DI)(R9*1), R10
- XORQ (BX)(R9*1), R8
+ MOVQ (R8)(R10*1), R9
+ MOVQ 8(R8)(R10*1), R11
+ XORQ (SI)(R10*1), R9
JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm4MB
- XORQ 8(BX)(R9*1), R10
+ XORQ 8(SI)(R10*1), R11
JNZ matchlen_bsf_16match_nolit_encodeBlockAsm4MB
- LEAL -16(SI), SI
- LEAL 16(R9), R9
+ LEAL -16(DI), DI
+ LEAL 16(R10), R10
JMP matchlen_loopback_16_match_nolit_encodeBlockAsm4MB
matchlen_bsf_16match_nolit_encodeBlockAsm4MB:
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R11, R11
#else
- BSFQ R10, R10
+ BSFQ R11, R11
#endif
- SARQ $0x03, R10
- LEAL 8(R9)(R10*1), R9
+ SARQ $0x03, R11
+ LEAL 8(R10)(R11*1), R10
JMP match_nolit_end_encodeBlockAsm4MB
matchlen_match8_match_nolit_encodeBlockAsm4MB:
- CMPL SI, $0x08
+ CMPL DI, $0x08
JB matchlen_match4_match_nolit_encodeBlockAsm4MB
- MOVQ (DI)(R9*1), R8
- XORQ (BX)(R9*1), R8
+ MOVQ (R8)(R10*1), R9
+ XORQ (SI)(R10*1), R9
JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm4MB
- LEAL -8(SI), SI
- LEAL 8(R9), R9
+ LEAL -8(DI), DI
+ LEAL 8(R10), R10
JMP matchlen_match4_match_nolit_encodeBlockAsm4MB
matchlen_bsf_8_match_nolit_encodeBlockAsm4MB:
#ifdef GOAMD64_v3
- TZCNTQ R8, R8
+ TZCNTQ R9, R9
#else
- BSFQ R8, R8
+ BSFQ R9, R9
#endif
- SARQ $0x03, R8
- LEAL (R9)(R8*1), R9
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
JMP match_nolit_end_encodeBlockAsm4MB
matchlen_match4_match_nolit_encodeBlockAsm4MB:
- CMPL SI, $0x04
+ CMPL DI, $0x04
JB matchlen_match2_match_nolit_encodeBlockAsm4MB
- MOVL (DI)(R9*1), R8
- CMPL (BX)(R9*1), R8
+ MOVL (R8)(R10*1), R9
+ CMPL (SI)(R10*1), R9
JNE matchlen_match2_match_nolit_encodeBlockAsm4MB
- LEAL -4(SI), SI
- LEAL 4(R9), R9
+ LEAL -4(DI), DI
+ LEAL 4(R10), R10
matchlen_match2_match_nolit_encodeBlockAsm4MB:
- CMPL SI, $0x01
+ CMPL DI, $0x01
JE matchlen_match1_match_nolit_encodeBlockAsm4MB
JB match_nolit_end_encodeBlockAsm4MB
- MOVW (DI)(R9*1), R8
- CMPW (BX)(R9*1), R8
+ MOVW (R8)(R10*1), R9
+ CMPW (SI)(R10*1), R9
JNE matchlen_match1_match_nolit_encodeBlockAsm4MB
- LEAL 2(R9), R9
- SUBL $0x02, SI
+ LEAL 2(R10), R10
+ SUBL $0x02, DI
JZ match_nolit_end_encodeBlockAsm4MB
matchlen_match1_match_nolit_encodeBlockAsm4MB:
- MOVB (DI)(R9*1), R8
- CMPB (BX)(R9*1), R8
+ MOVB (R8)(R10*1), R9
+ CMPB (SI)(R10*1), R9
JNE match_nolit_end_encodeBlockAsm4MB
- LEAL 1(R9), R9
+ LEAL 1(R10), R10
match_nolit_end_encodeBlockAsm4MB:
- ADDL R9, CX
- MOVL 16(SP), BX
- ADDL $0x04, R9
- MOVL CX, 12(SP)
+ ADDL R10, DX
+ MOVL 16(SP), SI
+ ADDL $0x04, R10
+ MOVL DX, 12(SP)
// emitCopy
- CMPL BX, $0x00010000
+ CMPL SI, $0x00010000
JB two_byte_offset_match_nolit_encodeBlockAsm4MB
- CMPL R9, $0x40
+ CMPL R10, $0x40
JBE four_bytes_remain_match_nolit_encodeBlockAsm4MB
- MOVB $0xff, (AX)
- MOVL BX, 1(AX)
- LEAL -64(R9), R9
- ADDQ $0x05, AX
- CMPL R9, $0x04
+ MOVB $0xff, (CX)
+ MOVL SI, 1(CX)
+ LEAL -64(R10), R10
+ ADDQ $0x05, CX
+ CMPL R10, $0x04
JB four_bytes_remain_match_nolit_encodeBlockAsm4MB
// emitRepeat
- MOVL R9, SI
- LEAL -4(R9), R9
- CMPL SI, $0x08
+ MOVL R10, DI
+ LEAL -4(R10), R10
+ CMPL DI, $0x08
JBE repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy
- CMPL SI, $0x0c
+ CMPL DI, $0x0c
JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy
- CMPL BX, $0x00000800
+ CMPL SI, $0x00000800
JB repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy
cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy:
- CMPL R9, $0x00000104
+ CMPL R10, $0x00000104
JB repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy
- CMPL R9, $0x00010100
+ CMPL R10, $0x00010100
JB repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy
- LEAL -65536(R9), R9
- MOVL R9, BX
- MOVW $0x001d, (AX)
- MOVW R9, 2(AX)
- SARL $0x10, BX
- MOVB BL, 4(AX)
- ADDQ $0x05, AX
+ LEAL -65536(R10), R10
+ MOVL R10, SI
+ MOVW $0x001d, (CX)
+ MOVW R10, 2(CX)
+ SARL $0x10, SI
+ MOVB SI, 4(CX)
+ ADDQ $0x05, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy:
- LEAL -256(R9), R9
- MOVW $0x0019, (AX)
- MOVW R9, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(R10), R10
+ MOVW $0x0019, (CX)
+ MOVW R10, 2(CX)
+ ADDQ $0x04, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy:
- LEAL -4(R9), R9
- MOVW $0x0015, (AX)
- MOVB R9, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(R10), R10
+ MOVW $0x0015, (CX)
+ MOVB R10, 2(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy:
- SHLL $0x02, R9
- ORL $0x01, R9
- MOVW R9, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, R10
+ ORL $0x01, R10
+ MOVW R10, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy:
- XORQ SI, SI
- LEAL 1(SI)(R9*4), R9
- MOVB BL, 1(AX)
- SARL $0x08, BX
- SHLL $0x05, BX
- ORL BX, R9
- MOVB R9, (AX)
- ADDQ $0x02, AX
+ XORQ DI, DI
+ LEAL 1(DI)(R10*4), R10
+ MOVB SI, 1(CX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, R10
+ MOVB R10, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
four_bytes_remain_match_nolit_encodeBlockAsm4MB:
- TESTL R9, R9
+ TESTL R10, R10
JZ match_nolit_emitcopy_end_encodeBlockAsm4MB
- XORL SI, SI
- LEAL -1(SI)(R9*4), R9
- MOVB R9, (AX)
- MOVL BX, 1(AX)
- ADDQ $0x05, AX
+ XORL DI, DI
+ LEAL -1(DI)(R10*4), R10
+ MOVB R10, (CX)
+ MOVL SI, 1(CX)
+ ADDQ $0x05, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
two_byte_offset_match_nolit_encodeBlockAsm4MB:
- CMPL R9, $0x40
+ CMPL R10, $0x40
JBE two_byte_offset_short_match_nolit_encodeBlockAsm4MB
- CMPL BX, $0x00000800
+ CMPL SI, $0x00000800
JAE long_offset_short_match_nolit_encodeBlockAsm4MB
- MOVL $0x00000001, SI
- LEAL 16(SI), SI
- MOVB BL, 1(AX)
- SHRL $0x08, BX
- SHLL $0x05, BX
- ORL BX, SI
- MOVB SI, (AX)
- ADDQ $0x02, AX
- SUBL $0x08, R9
-
+ MOVL $0x00000001, DI
+ LEAL 16(DI), DI
+ MOVB SI, 1(CX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (CX)
+ ADDQ $0x02, CX
+ SUBL $0x08, R10
+
// emitRepeat
- LEAL -4(R9), R9
+ LEAL -4(R10), R10
JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b
- MOVL R9, SI
- LEAL -4(R9), R9
- CMPL SI, $0x08
+ MOVL R10, DI
+ LEAL -4(R10), R10
+ CMPL DI, $0x08
JBE repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b
- CMPL SI, $0x0c
+ CMPL DI, $0x0c
JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b
- CMPL BX, $0x00000800
+ CMPL SI, $0x00000800
JB repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b
cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b:
- CMPL R9, $0x00000104
+ CMPL R10, $0x00000104
JB repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b
- CMPL R9, $0x00010100
+ CMPL R10, $0x00010100
JB repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b
- LEAL -65536(R9), R9
- MOVL R9, BX
- MOVW $0x001d, (AX)
- MOVW R9, 2(AX)
- SARL $0x10, BX
- MOVB BL, 4(AX)
- ADDQ $0x05, AX
+ LEAL -65536(R10), R10
+ MOVL R10, SI
+ MOVW $0x001d, (CX)
+ MOVW R10, 2(CX)
+ SARL $0x10, SI
+ MOVB SI, 4(CX)
+ ADDQ $0x05, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b:
- LEAL -256(R9), R9
- MOVW $0x0019, (AX)
- MOVW R9, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(R10), R10
+ MOVW $0x0019, (CX)
+ MOVW R10, 2(CX)
+ ADDQ $0x04, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b:
- LEAL -4(R9), R9
- MOVW $0x0015, (AX)
- MOVB R9, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(R10), R10
+ MOVW $0x0015, (CX)
+ MOVB R10, 2(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b:
- SHLL $0x02, R9
- ORL $0x01, R9
- MOVW R9, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, R10
+ ORL $0x01, R10
+ MOVW R10, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b:
- XORQ SI, SI
- LEAL 1(SI)(R9*4), R9
- MOVB BL, 1(AX)
- SARL $0x08, BX
- SHLL $0x05, BX
- ORL BX, R9
- MOVB R9, (AX)
- ADDQ $0x02, AX
+ XORQ DI, DI
+ LEAL 1(DI)(R10*4), R10
+ MOVB SI, 1(CX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, R10
+ MOVB R10, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
long_offset_short_match_nolit_encodeBlockAsm4MB:
- MOVB $0xee, (AX)
- MOVW BX, 1(AX)
- LEAL -60(R9), R9
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW SI, 1(CX)
+ LEAL -60(R10), R10
+ ADDQ $0x03, CX
// emitRepeat
- MOVL R9, SI
- LEAL -4(R9), R9
- CMPL SI, $0x08
+ MOVL R10, DI
+ LEAL -4(R10), R10
+ CMPL DI, $0x08
JBE repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy_short
- CMPL SI, $0x0c
+ CMPL DI, $0x0c
JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short
- CMPL BX, $0x00000800
+ CMPL SI, $0x00000800
JB repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short
cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short:
- CMPL R9, $0x00000104
+ CMPL R10, $0x00000104
JB repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy_short
- CMPL R9, $0x00010100
+ CMPL R10, $0x00010100
JB repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy_short
- LEAL -65536(R9), R9
- MOVL R9, BX
- MOVW $0x001d, (AX)
- MOVW R9, 2(AX)
- SARL $0x10, BX
- MOVB BL, 4(AX)
- ADDQ $0x05, AX
+ LEAL -65536(R10), R10
+ MOVL R10, SI
+ MOVW $0x001d, (CX)
+ MOVW R10, 2(CX)
+ SARL $0x10, SI
+ MOVB SI, 4(CX)
+ ADDQ $0x05, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy_short:
- LEAL -256(R9), R9
- MOVW $0x0019, (AX)
- MOVW R9, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(R10), R10
+ MOVW $0x0019, (CX)
+ MOVW R10, 2(CX)
+ ADDQ $0x04, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy_short:
- LEAL -4(R9), R9
- MOVW $0x0015, (AX)
- MOVB R9, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(R10), R10
+ MOVW $0x0015, (CX)
+ MOVB R10, 2(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy_short:
- SHLL $0x02, R9
- ORL $0x01, R9
- MOVW R9, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, R10
+ ORL $0x01, R10
+ MOVW R10, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short:
- XORQ SI, SI
- LEAL 1(SI)(R9*4), R9
- MOVB BL, 1(AX)
- SARL $0x08, BX
- SHLL $0x05, BX
- ORL BX, R9
- MOVB R9, (AX)
- ADDQ $0x02, AX
+ XORQ DI, DI
+ LEAL 1(DI)(R10*4), R10
+ MOVB SI, 1(CX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, R10
+ MOVB R10, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
two_byte_offset_short_match_nolit_encodeBlockAsm4MB:
- MOVL R9, SI
- SHLL $0x02, SI
- CMPL R9, $0x0c
+ MOVL R10, DI
+ SHLL $0x02, DI
+ CMPL R10, $0x0c
JAE emit_copy_three_match_nolit_encodeBlockAsm4MB
- CMPL BX, $0x00000800
+ CMPL SI, $0x00000800
JAE emit_copy_three_match_nolit_encodeBlockAsm4MB
- LEAL -15(SI), SI
- MOVB BL, 1(AX)
- SHRL $0x08, BX
- SHLL $0x05, BX
- ORL BX, SI
- MOVB SI, (AX)
- ADDQ $0x02, AX
+ LEAL -15(DI), DI
+ MOVB SI, 1(CX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
emit_copy_three_match_nolit_encodeBlockAsm4MB:
- LEAL -2(SI), SI
- MOVB SI, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(DI), DI
+ MOVB DI, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
match_nolit_emitcopy_end_encodeBlockAsm4MB:
- CMPL CX, 8(SP)
+ CMPL DX, 8(SP)
JAE emit_remainder_encodeBlockAsm4MB
- MOVQ -2(DX)(CX*1), SI
- CMPQ AX, (SP)
+ MOVQ -2(BX)(DX*1), DI
+ CMPQ CX, (SP)
JB match_nolit_dst_ok_encodeBlockAsm4MB
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_nolit_dst_ok_encodeBlockAsm4MB:
- MOVQ $0x0000cf1bbcdcbf9b, R8
- MOVQ SI, DI
- SHRQ $0x10, SI
- MOVQ SI, BX
- SHLQ $0x10, DI
- IMULQ R8, DI
- SHRQ $0x32, DI
- SHLQ $0x10, BX
- IMULQ R8, BX
- SHRQ $0x32, BX
- LEAL -2(CX), R8
- LEAQ 24(SP)(BX*4), R9
- MOVL (R9), BX
- MOVL R8, 24(SP)(DI*4)
- MOVL CX, (R9)
- CMPL (DX)(BX*1), SI
+ MOVQ $0x0000cf1bbcdcbf9b, R9
+ MOVQ DI, R8
+ SHRQ $0x10, DI
+ MOVQ DI, SI
+ SHLQ $0x10, R8
+ IMULQ R9, R8
+ SHRQ $0x32, R8
+ SHLQ $0x10, SI
+ IMULQ R9, SI
+ SHRQ $0x32, SI
+ LEAL -2(DX), R9
+ LEAQ (AX)(SI*4), R10
+ MOVL (R10), SI
+ MOVL R9, (AX)(R8*4)
+ MOVL DX, (R10)
+ CMPL (BX)(SI*1), DI
JEQ match_nolit_loop_encodeBlockAsm4MB
- INCL CX
+ INCL DX
JMP search_loop_encodeBlockAsm4MB
emit_remainder_encodeBlockAsm4MB:
- MOVQ src_len+32(FP), CX
- SUBL 12(SP), CX
- LEAQ 4(AX)(CX*1), CX
- CMPQ CX, (SP)
+ MOVQ src_len+32(FP), AX
+ SUBL 12(SP), AX
+ LEAQ 4(CX)(AX*1), AX
+ CMPQ AX, (SP)
JB emit_remainder_ok_encodeBlockAsm4MB
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
emit_remainder_ok_encodeBlockAsm4MB:
- MOVQ src_len+32(FP), CX
- MOVL 12(SP), BX
- CMPL BX, CX
+ MOVQ src_len+32(FP), AX
+ MOVL 12(SP), DX
+ CMPL DX, AX
JEQ emit_literal_done_emit_remainder_encodeBlockAsm4MB
- MOVL CX, SI
- MOVL CX, 12(SP)
- LEAQ (DX)(BX*1), CX
- SUBL BX, SI
+ MOVL AX, SI
+ MOVL AX, 12(SP)
+ LEAQ (BX)(DX*1), AX
+ SUBL DX, SI
LEAL -1(SI), DX
CMPL DX, $0x3c
JB one_byte_emit_remainder_encodeBlockAsm4MB
@@ -2618,33 +2620,33 @@ emit_remainder_ok_encodeBlockAsm4MB:
JB three_bytes_emit_remainder_encodeBlockAsm4MB
MOVL DX, BX
SHRL $0x10, BX
- MOVB $0xf8, (AX)
- MOVW DX, 1(AX)
- MOVB BL, 3(AX)
- ADDQ $0x04, AX
+ MOVB $0xf8, (CX)
+ MOVW DX, 1(CX)
+ MOVB BL, 3(CX)
+ ADDQ $0x04, CX
JMP memmove_long_emit_remainder_encodeBlockAsm4MB
three_bytes_emit_remainder_encodeBlockAsm4MB:
- MOVB $0xf4, (AX)
- MOVW DX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW DX, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_emit_remainder_encodeBlockAsm4MB
two_bytes_emit_remainder_encodeBlockAsm4MB:
- MOVB $0xf0, (AX)
- MOVB DL, 1(AX)
- ADDQ $0x02, AX
+ MOVB $0xf0, (CX)
+ MOVB DL, 1(CX)
+ ADDQ $0x02, CX
CMPL DX, $0x40
JB memmove_emit_remainder_encodeBlockAsm4MB
JMP memmove_long_emit_remainder_encodeBlockAsm4MB
one_byte_emit_remainder_encodeBlockAsm4MB:
SHLB $0x02, DL
- MOVB DL, (AX)
- ADDQ $0x01, AX
+ MOVB DL, (CX)
+ ADDQ $0x01, CX
memmove_emit_remainder_encodeBlockAsm4MB:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveShort
@@ -2660,73 +2662,73 @@ memmove_emit_remainder_encodeBlockAsm4MB:
JMP emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_33through64
emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_1or2:
- MOVB (CX), SI
- MOVB -1(CX)(BX*1), CL
- MOVB SI, (AX)
- MOVB CL, -1(AX)(BX*1)
+ MOVB (AX), SI
+ MOVB -1(AX)(BX*1), AL
+ MOVB SI, (CX)
+ MOVB AL, -1(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB
emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_3:
- MOVW (CX), SI
- MOVB 2(CX), CL
- MOVW SI, (AX)
- MOVB CL, 2(AX)
+ MOVW (AX), SI
+ MOVB 2(AX), AL
+ MOVW SI, (CX)
+ MOVB AL, 2(CX)
JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB
emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_4through7:
- MOVL (CX), SI
- MOVL -4(CX)(BX*1), CX
- MOVL SI, (AX)
- MOVL CX, -4(AX)(BX*1)
+ MOVL (AX), SI
+ MOVL -4(AX)(BX*1), AX
+ MOVL SI, (CX)
+ MOVL AX, -4(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB
emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_8through16:
- MOVQ (CX), SI
- MOVQ -8(CX)(BX*1), CX
- MOVQ SI, (AX)
- MOVQ CX, -8(AX)(BX*1)
+ MOVQ (AX), SI
+ MOVQ -8(AX)(BX*1), AX
+ MOVQ SI, (CX)
+ MOVQ AX, -8(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB
emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_17through32:
- MOVOU (CX), X0
- MOVOU -16(CX)(BX*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU -16(AX)(BX*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB
emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_33through64:
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
memmove_end_copy_emit_remainder_encodeBlockAsm4MB:
- MOVQ DX, AX
+ MOVQ DX, CX
JMP emit_literal_done_emit_remainder_encodeBlockAsm4MB
memmove_long_emit_remainder_encodeBlockAsm4MB:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveLong
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
MOVQ BX, DI
SHRQ $0x05, DI
- MOVQ AX, SI
+ MOVQ CX, SI
ANDL $0x0000001f, SI
MOVQ $0x00000040, R8
SUBQ SI, R8
DECQ DI
JA emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_forward_sse_loop_32
- LEAQ -32(CX)(R8*1), SI
- LEAQ -32(AX)(R8*1), R9
+ LEAQ -32(AX)(R8*1), SI
+ LEAQ -32(CX)(R8*1), R9
emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_big_loop_back:
MOVOU (SI), X4
@@ -2740,967 +2742,968 @@ emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_big_loop_back:
JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_big_loop_back
emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_forward_sse_loop_32:
- MOVOU -32(CX)(R8*1), X4
- MOVOU -16(CX)(R8*1), X5
- MOVOA X4, -32(AX)(R8*1)
- MOVOA X5, -16(AX)(R8*1)
+ MOVOU -32(AX)(R8*1), X4
+ MOVOU -16(AX)(R8*1), X5
+ MOVOA X4, -32(CX)(R8*1)
+ MOVOA X5, -16(CX)(R8*1)
ADDQ $0x20, R8
CMPQ BX, R8
JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
- MOVQ DX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
+ MOVQ DX, CX
emit_literal_done_emit_remainder_encodeBlockAsm4MB:
- MOVQ dst_base+0(FP), CX
- SUBQ CX, AX
- MOVQ AX, ret+48(FP)
+ MOVQ dst_base+0(FP), AX
+ SUBQ AX, CX
+ MOVQ CX, ret+56(FP)
RET
-// func encodeBlockAsm12B(dst []byte, src []byte) int
+// func encodeBlockAsm12B(dst []byte, src []byte, tmp *[16384]byte) int
// Requires: BMI, SSE2
-TEXT ·encodeBlockAsm12B(SB), $16408-56
- MOVQ dst_base+0(FP), AX
- MOVQ $0x00000080, CX
- LEAQ 24(SP), DX
+TEXT ·encodeBlockAsm12B(SB), $24-64
+ MOVQ tmp+48(FP), AX
+ MOVQ dst_base+0(FP), CX
+ MOVQ $0x00000080, DX
+ MOVQ AX, BX
PXOR X0, X0
zero_loop_encodeBlockAsm12B:
- MOVOU X0, (DX)
- MOVOU X0, 16(DX)
- MOVOU X0, 32(DX)
- MOVOU X0, 48(DX)
- MOVOU X0, 64(DX)
- MOVOU X0, 80(DX)
- MOVOU X0, 96(DX)
- MOVOU X0, 112(DX)
- ADDQ $0x80, DX
- DECQ CX
+ MOVOU X0, (BX)
+ MOVOU X0, 16(BX)
+ MOVOU X0, 32(BX)
+ MOVOU X0, 48(BX)
+ MOVOU X0, 64(BX)
+ MOVOU X0, 80(BX)
+ MOVOU X0, 96(BX)
+ MOVOU X0, 112(BX)
+ ADDQ $0x80, BX
+ DECQ DX
JNZ zero_loop_encodeBlockAsm12B
MOVL $0x00000000, 12(SP)
- MOVQ src_len+32(FP), CX
- LEAQ -9(CX), DX
- LEAQ -8(CX), BX
- MOVL BX, 8(SP)
- SHRQ $0x05, CX
- SUBL CX, DX
- LEAQ (AX)(DX*1), DX
- MOVQ DX, (SP)
- MOVL $0x00000001, CX
- MOVL CX, 16(SP)
- MOVQ src_base+24(FP), DX
+ MOVQ src_len+32(FP), DX
+ LEAQ -9(DX), BX
+ LEAQ -8(DX), SI
+ MOVL SI, 8(SP)
+ SHRQ $0x05, DX
+ SUBL DX, BX
+ LEAQ (CX)(BX*1), BX
+ MOVQ BX, (SP)
+ MOVL $0x00000001, DX
+ MOVL DX, 16(SP)
+ MOVQ src_base+24(FP), BX
search_loop_encodeBlockAsm12B:
- MOVL CX, BX
- SUBL 12(SP), BX
- SHRL $0x05, BX
- LEAL 4(CX)(BX*1), BX
- CMPL BX, 8(SP)
+ MOVL DX, SI
+ SUBL 12(SP), SI
+ SHRL $0x05, SI
+ LEAL 4(DX)(SI*1), SI
+ CMPL SI, 8(SP)
JAE emit_remainder_encodeBlockAsm12B
- MOVQ (DX)(CX*1), SI
- MOVL BX, 20(SP)
- MOVQ $0x000000cf1bbcdcbb, R8
- MOVQ SI, R9
- MOVQ SI, R10
- SHRQ $0x08, R10
- SHLQ $0x18, R9
- IMULQ R8, R9
- SHRQ $0x34, R9
+ MOVQ (BX)(DX*1), DI
+ MOVL SI, 20(SP)
+ MOVQ $0x000000cf1bbcdcbb, R9
+ MOVQ DI, R10
+ MOVQ DI, R11
+ SHRQ $0x08, R11
SHLQ $0x18, R10
- IMULQ R8, R10
+ IMULQ R9, R10
SHRQ $0x34, R10
- MOVL 24(SP)(R9*4), BX
- MOVL 24(SP)(R10*4), DI
- MOVL CX, 24(SP)(R9*4)
- LEAL 1(CX), R9
- MOVL R9, 24(SP)(R10*4)
- MOVQ SI, R9
- SHRQ $0x10, R9
- SHLQ $0x18, R9
- IMULQ R8, R9
- SHRQ $0x34, R9
- MOVL CX, R8
- SUBL 16(SP), R8
- MOVL 1(DX)(R8*1), R10
- MOVQ SI, R8
- SHRQ $0x08, R8
- CMPL R8, R10
+ SHLQ $0x18, R11
+ IMULQ R9, R11
+ SHRQ $0x34, R11
+ MOVL (AX)(R10*4), SI
+ MOVL (AX)(R11*4), R8
+ MOVL DX, (AX)(R10*4)
+ LEAL 1(DX), R10
+ MOVL R10, (AX)(R11*4)
+ MOVQ DI, R10
+ SHRQ $0x10, R10
+ SHLQ $0x18, R10
+ IMULQ R9, R10
+ SHRQ $0x34, R10
+ MOVL DX, R9
+ SUBL 16(SP), R9
+ MOVL 1(BX)(R9*1), R11
+ MOVQ DI, R9
+ SHRQ $0x08, R9
+ CMPL R9, R11
JNE no_repeat_found_encodeBlockAsm12B
- LEAL 1(CX), SI
- MOVL 12(SP), DI
- MOVL SI, BX
- SUBL 16(SP), BX
+ LEAL 1(DX), DI
+ MOVL 12(SP), R8
+ MOVL DI, SI
+ SUBL 16(SP), SI
JZ repeat_extend_back_end_encodeBlockAsm12B
repeat_extend_back_loop_encodeBlockAsm12B:
- CMPL SI, DI
+ CMPL DI, R8
JBE repeat_extend_back_end_encodeBlockAsm12B
- MOVB -1(DX)(BX*1), R8
- MOVB -1(DX)(SI*1), R9
- CMPB R8, R9
+ MOVB -1(BX)(SI*1), R9
+ MOVB -1(BX)(DI*1), R10
+ CMPB R9, R10
JNE repeat_extend_back_end_encodeBlockAsm12B
- LEAL -1(SI), SI
- DECL BX
+ LEAL -1(DI), DI
+ DECL SI
JNZ repeat_extend_back_loop_encodeBlockAsm12B
repeat_extend_back_end_encodeBlockAsm12B:
- MOVL SI, BX
- SUBL 12(SP), BX
- LEAQ 3(AX)(BX*1), BX
- CMPQ BX, (SP)
+ MOVL DI, SI
+ SUBL 12(SP), SI
+ LEAQ 3(CX)(SI*1), SI
+ CMPQ SI, (SP)
JB repeat_dst_size_check_encodeBlockAsm12B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
repeat_dst_size_check_encodeBlockAsm12B:
- MOVL 12(SP), BX
- CMPL BX, SI
+ MOVL 12(SP), SI
+ CMPL SI, DI
JEQ emit_literal_done_repeat_emit_encodeBlockAsm12B
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(BX*1), R9
- SUBL BX, R8
- LEAL -1(R8), BX
- CMPL BX, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(SI*1), R10
+ SUBL SI, R9
+ LEAL -1(R9), SI
+ CMPL SI, $0x3c
JB one_byte_repeat_emit_encodeBlockAsm12B
- CMPL BX, $0x00000100
+ CMPL SI, $0x00000100
JB two_bytes_repeat_emit_encodeBlockAsm12B
JB three_bytes_repeat_emit_encodeBlockAsm12B
three_bytes_repeat_emit_encodeBlockAsm12B:
- MOVB $0xf4, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_repeat_emit_encodeBlockAsm12B
two_bytes_repeat_emit_encodeBlockAsm12B:
- MOVB $0xf0, (AX)
- MOVB BL, 1(AX)
- ADDQ $0x02, AX
- CMPL BX, $0x40
+ MOVB $0xf0, (CX)
+ MOVB SI, 1(CX)
+ ADDQ $0x02, CX
+ CMPL SI, $0x40
JB memmove_repeat_emit_encodeBlockAsm12B
JMP memmove_long_repeat_emit_encodeBlockAsm12B
one_byte_repeat_emit_encodeBlockAsm12B:
- SHLB $0x02, BL
- MOVB BL, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, SI
+ MOVB SI, (CX)
+ ADDQ $0x01, CX
memmove_repeat_emit_encodeBlockAsm12B:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveShort
- CMPQ R8, $0x08
+ CMPQ R9, $0x08
JBE emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_8
- CMPQ R8, $0x10
+ CMPQ R9, $0x10
JBE emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ R9, $0x20
JBE emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_17through32
JMP emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_33through64
emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_8:
- MOVQ (R9), R10
- MOVQ R10, (AX)
+ MOVQ (R10), R11
+ MOVQ R11, (CX)
JMP memmove_end_copy_repeat_emit_encodeBlockAsm12B
emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_8through16:
- MOVQ (R9), R10
- MOVQ -8(R9)(R8*1), R9
- MOVQ R10, (AX)
- MOVQ R9, -8(AX)(R8*1)
+ MOVQ (R10), R11
+ MOVQ -8(R10)(R9*1), R10
+ MOVQ R11, (CX)
+ MOVQ R10, -8(CX)(R9*1)
JMP memmove_end_copy_repeat_emit_encodeBlockAsm12B
emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_17through32:
- MOVOU (R9), X0
- MOVOU -16(R9)(R8*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU -16(R10)(R9*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R9*1)
JMP memmove_end_copy_repeat_emit_encodeBlockAsm12B
emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_33through64:
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
memmove_end_copy_repeat_emit_encodeBlockAsm12B:
- MOVQ BX, AX
+ MOVQ SI, CX
JMP emit_literal_done_repeat_emit_encodeBlockAsm12B
memmove_long_repeat_emit_encodeBlockAsm12B:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveLong
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVQ R8, R11
- SHRQ $0x05, R11
- MOVQ AX, R10
- ANDL $0x0000001f, R10
- MOVQ $0x00000040, R12
- SUBQ R10, R12
- DECQ R11
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVQ R9, R12
+ SHRQ $0x05, R12
+ MOVQ CX, R11
+ ANDL $0x0000001f, R11
+ MOVQ $0x00000040, R13
+ SUBQ R11, R13
+ DECQ R12
JA emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_forward_sse_loop_32
- LEAQ -32(R9)(R12*1), R10
- LEAQ -32(AX)(R12*1), R13
+ LEAQ -32(R10)(R13*1), R11
+ LEAQ -32(CX)(R13*1), R14
emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_big_loop_back:
- MOVOU (R10), X4
- MOVOU 16(R10), X5
- MOVOA X4, (R13)
- MOVOA X5, 16(R13)
+ MOVOU (R11), X4
+ MOVOU 16(R11), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
+ ADDQ $0x20, R14
+ ADDQ $0x20, R11
ADDQ $0x20, R13
- ADDQ $0x20, R10
- ADDQ $0x20, R12
- DECQ R11
+ DECQ R12
JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_big_loop_back
emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_forward_sse_loop_32:
- MOVOU -32(R9)(R12*1), X4
- MOVOU -16(R9)(R12*1), X5
- MOVOA X4, -32(AX)(R12*1)
- MOVOA X5, -16(AX)(R12*1)
- ADDQ $0x20, R12
- CMPQ R8, R12
+ MOVOU -32(R10)(R13*1), X4
+ MOVOU -16(R10)(R13*1), X5
+ MOVOA X4, -32(CX)(R13*1)
+ MOVOA X5, -16(CX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R9, R13
JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ BX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
+ MOVQ SI, CX
emit_literal_done_repeat_emit_encodeBlockAsm12B:
- ADDL $0x05, CX
- MOVL CX, BX
- SUBL 16(SP), BX
- MOVQ src_len+32(FP), R8
- SUBL CX, R8
- LEAQ (DX)(CX*1), R9
- LEAQ (DX)(BX*1), BX
+ ADDL $0x05, DX
+ MOVL DX, SI
+ SUBL 16(SP), SI
+ MOVQ src_len+32(FP), R9
+ SUBL DX, R9
+ LEAQ (BX)(DX*1), R10
+ LEAQ (BX)(SI*1), SI
// matchLen
- XORL R11, R11
+ XORL R12, R12
matchlen_loopback_16_repeat_extend_encodeBlockAsm12B:
- CMPL R8, $0x10
+ CMPL R9, $0x10
JB matchlen_match8_repeat_extend_encodeBlockAsm12B
- MOVQ (R9)(R11*1), R10
- MOVQ 8(R9)(R11*1), R12
- XORQ (BX)(R11*1), R10
+ MOVQ (R10)(R12*1), R11
+ MOVQ 8(R10)(R12*1), R13
+ XORQ (SI)(R12*1), R11
JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm12B
- XORQ 8(BX)(R11*1), R12
+ XORQ 8(SI)(R12*1), R13
JNZ matchlen_bsf_16repeat_extend_encodeBlockAsm12B
- LEAL -16(R8), R8
- LEAL 16(R11), R11
+ LEAL -16(R9), R9
+ LEAL 16(R12), R12
JMP matchlen_loopback_16_repeat_extend_encodeBlockAsm12B
matchlen_bsf_16repeat_extend_encodeBlockAsm12B:
#ifdef GOAMD64_v3
- TZCNTQ R12, R12
+ TZCNTQ R13, R13
#else
- BSFQ R12, R12
+ BSFQ R13, R13
#endif
- SARQ $0x03, R12
- LEAL 8(R11)(R12*1), R11
+ SARQ $0x03, R13
+ LEAL 8(R12)(R13*1), R12
JMP repeat_extend_forward_end_encodeBlockAsm12B
matchlen_match8_repeat_extend_encodeBlockAsm12B:
- CMPL R8, $0x08
+ CMPL R9, $0x08
JB matchlen_match4_repeat_extend_encodeBlockAsm12B
- MOVQ (R9)(R11*1), R10
- XORQ (BX)(R11*1), R10
+ MOVQ (R10)(R12*1), R11
+ XORQ (SI)(R12*1), R11
JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm12B
- LEAL -8(R8), R8
- LEAL 8(R11), R11
+ LEAL -8(R9), R9
+ LEAL 8(R12), R12
JMP matchlen_match4_repeat_extend_encodeBlockAsm12B
matchlen_bsf_8_repeat_extend_encodeBlockAsm12B:
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R11, R11
#else
- BSFQ R10, R10
+ BSFQ R11, R11
#endif
- SARQ $0x03, R10
- LEAL (R11)(R10*1), R11
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
JMP repeat_extend_forward_end_encodeBlockAsm12B
matchlen_match4_repeat_extend_encodeBlockAsm12B:
- CMPL R8, $0x04
+ CMPL R9, $0x04
JB matchlen_match2_repeat_extend_encodeBlockAsm12B
- MOVL (R9)(R11*1), R10
- CMPL (BX)(R11*1), R10
+ MOVL (R10)(R12*1), R11
+ CMPL (SI)(R12*1), R11
JNE matchlen_match2_repeat_extend_encodeBlockAsm12B
- LEAL -4(R8), R8
- LEAL 4(R11), R11
+ LEAL -4(R9), R9
+ LEAL 4(R12), R12
matchlen_match2_repeat_extend_encodeBlockAsm12B:
- CMPL R8, $0x01
+ CMPL R9, $0x01
JE matchlen_match1_repeat_extend_encodeBlockAsm12B
JB repeat_extend_forward_end_encodeBlockAsm12B
- MOVW (R9)(R11*1), R10
- CMPW (BX)(R11*1), R10
+ MOVW (R10)(R12*1), R11
+ CMPW (SI)(R12*1), R11
JNE matchlen_match1_repeat_extend_encodeBlockAsm12B
- LEAL 2(R11), R11
- SUBL $0x02, R8
+ LEAL 2(R12), R12
+ SUBL $0x02, R9
JZ repeat_extend_forward_end_encodeBlockAsm12B
matchlen_match1_repeat_extend_encodeBlockAsm12B:
- MOVB (R9)(R11*1), R10
- CMPB (BX)(R11*1), R10
+ MOVB (R10)(R12*1), R11
+ CMPB (SI)(R12*1), R11
JNE repeat_extend_forward_end_encodeBlockAsm12B
- LEAL 1(R11), R11
+ LEAL 1(R12), R12
repeat_extend_forward_end_encodeBlockAsm12B:
- ADDL R11, CX
- MOVL CX, BX
- SUBL SI, BX
- MOVL 16(SP), SI
- TESTL DI, DI
+ ADDL R12, DX
+ MOVL DX, SI
+ SUBL DI, SI
+ MOVL 16(SP), DI
+ TESTL R8, R8
JZ repeat_as_copy_encodeBlockAsm12B
// emitRepeat
- MOVL BX, DI
- LEAL -4(BX), BX
- CMPL DI, $0x08
+ MOVL SI, R8
+ LEAL -4(SI), SI
+ CMPL R8, $0x08
JBE repeat_two_match_repeat_encodeBlockAsm12B
- CMPL DI, $0x0c
+ CMPL R8, $0x0c
JAE cant_repeat_two_offset_match_repeat_encodeBlockAsm12B
- CMPL SI, $0x00000800
+ CMPL DI, $0x00000800
JB repeat_two_offset_match_repeat_encodeBlockAsm12B
cant_repeat_two_offset_match_repeat_encodeBlockAsm12B:
- CMPL BX, $0x00000104
+ CMPL SI, $0x00000104
JB repeat_three_match_repeat_encodeBlockAsm12B
- LEAL -256(BX), BX
- MOVW $0x0019, (AX)
- MOVW BX, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(SI), SI
+ MOVW $0x0019, (CX)
+ MOVW SI, 2(CX)
+ ADDQ $0x04, CX
JMP repeat_end_emit_encodeBlockAsm12B
repeat_three_match_repeat_encodeBlockAsm12B:
- LEAL -4(BX), BX
- MOVW $0x0015, (AX)
- MOVB BL, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(SI), SI
+ MOVW $0x0015, (CX)
+ MOVB SI, 2(CX)
+ ADDQ $0x03, CX
JMP repeat_end_emit_encodeBlockAsm12B
repeat_two_match_repeat_encodeBlockAsm12B:
- SHLL $0x02, BX
- ORL $0x01, BX
- MOVW BX, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, SI
+ ORL $0x01, SI
+ MOVW SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm12B
repeat_two_offset_match_repeat_encodeBlockAsm12B:
- XORQ DI, DI
- LEAL 1(DI)(BX*4), BX
- MOVB SI, 1(AX)
- SARL $0x08, SI
- SHLL $0x05, SI
- ORL SI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
+ XORQ R8, R8
+ LEAL 1(R8)(SI*4), SI
+ MOVB DI, 1(CX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm12B
repeat_as_copy_encodeBlockAsm12B:
// emitCopy
- CMPL BX, $0x40
+ CMPL SI, $0x40
JBE two_byte_offset_short_repeat_as_copy_encodeBlockAsm12B
- CMPL SI, $0x00000800
+ CMPL DI, $0x00000800
JAE long_offset_short_repeat_as_copy_encodeBlockAsm12B
- MOVL $0x00000001, DI
- LEAL 16(DI), DI
- MOVB SI, 1(AX)
- SHRL $0x08, SI
- SHLL $0x05, SI
- ORL SI, DI
- MOVB DI, (AX)
- ADDQ $0x02, AX
- SUBL $0x08, BX
+ MOVL $0x00000001, R8
+ LEAL 16(R8), R8
+ MOVB DI, 1(CX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R8
+ MOVB R8, (CX)
+ ADDQ $0x02, CX
+ SUBL $0x08, SI
// emitRepeat
- LEAL -4(BX), BX
+ LEAL -4(SI), SI
JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b
- MOVL BX, DI
- LEAL -4(BX), BX
- CMPL DI, $0x08
+ MOVL SI, R8
+ LEAL -4(SI), SI
+ CMPL R8, $0x08
JBE repeat_two_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b
- CMPL DI, $0x0c
+ CMPL R8, $0x0c
JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b
- CMPL SI, $0x00000800
+ CMPL DI, $0x00000800
JB repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b
cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b:
- CMPL BX, $0x00000104
+ CMPL SI, $0x00000104
JB repeat_three_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b
- LEAL -256(BX), BX
- MOVW $0x0019, (AX)
- MOVW BX, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(SI), SI
+ MOVW $0x0019, (CX)
+ MOVW SI, 2(CX)
+ ADDQ $0x04, CX
JMP repeat_end_emit_encodeBlockAsm12B
repeat_three_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b:
- LEAL -4(BX), BX
- MOVW $0x0015, (AX)
- MOVB BL, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(SI), SI
+ MOVW $0x0015, (CX)
+ MOVB SI, 2(CX)
+ ADDQ $0x03, CX
JMP repeat_end_emit_encodeBlockAsm12B
repeat_two_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b:
- SHLL $0x02, BX
- ORL $0x01, BX
- MOVW BX, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, SI
+ ORL $0x01, SI
+ MOVW SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm12B
repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b:
- XORQ DI, DI
- LEAL 1(DI)(BX*4), BX
- MOVB SI, 1(AX)
- SARL $0x08, SI
- SHLL $0x05, SI
- ORL SI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
+ XORQ R8, R8
+ LEAL 1(R8)(SI*4), SI
+ MOVB DI, 1(CX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm12B
long_offset_short_repeat_as_copy_encodeBlockAsm12B:
- MOVB $0xee, (AX)
- MOVW SI, 1(AX)
- LEAL -60(BX), BX
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW DI, 1(CX)
+ LEAL -60(SI), SI
+ ADDQ $0x03, CX
// emitRepeat
- MOVL BX, DI
- LEAL -4(BX), BX
- CMPL DI, $0x08
+ MOVL SI, R8
+ LEAL -4(SI), SI
+ CMPL R8, $0x08
JBE repeat_two_repeat_as_copy_encodeBlockAsm12B_emit_copy_short
- CMPL DI, $0x0c
+ CMPL R8, $0x0c
JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short
- CMPL SI, $0x00000800
+ CMPL DI, $0x00000800
JB repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short
cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short:
- CMPL BX, $0x00000104
+ CMPL SI, $0x00000104
JB repeat_three_repeat_as_copy_encodeBlockAsm12B_emit_copy_short
- LEAL -256(BX), BX
- MOVW $0x0019, (AX)
- MOVW BX, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(SI), SI
+ MOVW $0x0019, (CX)
+ MOVW SI, 2(CX)
+ ADDQ $0x04, CX
JMP repeat_end_emit_encodeBlockAsm12B
repeat_three_repeat_as_copy_encodeBlockAsm12B_emit_copy_short:
- LEAL -4(BX), BX
- MOVW $0x0015, (AX)
- MOVB BL, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(SI), SI
+ MOVW $0x0015, (CX)
+ MOVB SI, 2(CX)
+ ADDQ $0x03, CX
JMP repeat_end_emit_encodeBlockAsm12B
repeat_two_repeat_as_copy_encodeBlockAsm12B_emit_copy_short:
- SHLL $0x02, BX
- ORL $0x01, BX
- MOVW BX, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, SI
+ ORL $0x01, SI
+ MOVW SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm12B
repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short:
- XORQ DI, DI
- LEAL 1(DI)(BX*4), BX
- MOVB SI, 1(AX)
- SARL $0x08, SI
- SHLL $0x05, SI
- ORL SI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
+ XORQ R8, R8
+ LEAL 1(R8)(SI*4), SI
+ MOVB DI, 1(CX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm12B
two_byte_offset_short_repeat_as_copy_encodeBlockAsm12B:
- MOVL BX, DI
- SHLL $0x02, DI
- CMPL BX, $0x0c
+ MOVL SI, R8
+ SHLL $0x02, R8
+ CMPL SI, $0x0c
JAE emit_copy_three_repeat_as_copy_encodeBlockAsm12B
- CMPL SI, $0x00000800
+ CMPL DI, $0x00000800
JAE emit_copy_three_repeat_as_copy_encodeBlockAsm12B
- LEAL -15(DI), DI
- MOVB SI, 1(AX)
- SHRL $0x08, SI
- SHLL $0x05, SI
- ORL SI, DI
- MOVB DI, (AX)
- ADDQ $0x02, AX
+ LEAL -15(R8), R8
+ MOVB DI, 1(CX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R8
+ MOVB R8, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm12B
emit_copy_three_repeat_as_copy_encodeBlockAsm12B:
- LEAL -2(DI), DI
- MOVB DI, (AX)
- MOVW SI, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(R8), R8
+ MOVB R8, (CX)
+ MOVW DI, 1(CX)
+ ADDQ $0x03, CX
repeat_end_emit_encodeBlockAsm12B:
- MOVL CX, 12(SP)
+ MOVL DX, 12(SP)
JMP search_loop_encodeBlockAsm12B
no_repeat_found_encodeBlockAsm12B:
- CMPL (DX)(BX*1), SI
+ CMPL (BX)(SI*1), DI
JEQ candidate_match_encodeBlockAsm12B
- SHRQ $0x08, SI
- MOVL 24(SP)(R9*4), BX
- LEAL 2(CX), R8
- CMPL (DX)(DI*1), SI
+ SHRQ $0x08, DI
+ MOVL (AX)(R10*4), SI
+ LEAL 2(DX), R9
+ CMPL (BX)(R8*1), DI
JEQ candidate2_match_encodeBlockAsm12B
- MOVL R8, 24(SP)(R9*4)
- SHRQ $0x08, SI
- CMPL (DX)(BX*1), SI
+ MOVL R9, (AX)(R10*4)
+ SHRQ $0x08, DI
+ CMPL (BX)(SI*1), DI
JEQ candidate3_match_encodeBlockAsm12B
- MOVL 20(SP), CX
+ MOVL 20(SP), DX
JMP search_loop_encodeBlockAsm12B
candidate3_match_encodeBlockAsm12B:
- ADDL $0x02, CX
+ ADDL $0x02, DX
JMP candidate_match_encodeBlockAsm12B
candidate2_match_encodeBlockAsm12B:
- MOVL R8, 24(SP)(R9*4)
- INCL CX
- MOVL DI, BX
+ MOVL R9, (AX)(R10*4)
+ INCL DX
+ MOVL R8, SI
candidate_match_encodeBlockAsm12B:
- MOVL 12(SP), SI
- TESTL BX, BX
+ MOVL 12(SP), DI
+ TESTL SI, SI
JZ match_extend_back_end_encodeBlockAsm12B
match_extend_back_loop_encodeBlockAsm12B:
- CMPL CX, SI
+ CMPL DX, DI
JBE match_extend_back_end_encodeBlockAsm12B
- MOVB -1(DX)(BX*1), DI
- MOVB -1(DX)(CX*1), R8
- CMPB DI, R8
+ MOVB -1(BX)(SI*1), R8
+ MOVB -1(BX)(DX*1), R9
+ CMPB R8, R9
JNE match_extend_back_end_encodeBlockAsm12B
- LEAL -1(CX), CX
- DECL BX
+ LEAL -1(DX), DX
+ DECL SI
JZ match_extend_back_end_encodeBlockAsm12B
JMP match_extend_back_loop_encodeBlockAsm12B
match_extend_back_end_encodeBlockAsm12B:
- MOVL CX, SI
- SUBL 12(SP), SI
- LEAQ 3(AX)(SI*1), SI
- CMPQ SI, (SP)
+ MOVL DX, DI
+ SUBL 12(SP), DI
+ LEAQ 3(CX)(DI*1), DI
+ CMPQ DI, (SP)
JB match_dst_size_check_encodeBlockAsm12B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_dst_size_check_encodeBlockAsm12B:
- MOVL CX, SI
- MOVL 12(SP), DI
- CMPL DI, SI
+ MOVL DX, DI
+ MOVL 12(SP), R8
+ CMPL R8, DI
JEQ emit_literal_done_match_emit_encodeBlockAsm12B
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(DI*1), SI
- SUBL DI, R8
- LEAL -1(R8), DI
- CMPL DI, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(R8*1), DI
+ SUBL R8, R9
+ LEAL -1(R9), R8
+ CMPL R8, $0x3c
JB one_byte_match_emit_encodeBlockAsm12B
- CMPL DI, $0x00000100
+ CMPL R8, $0x00000100
JB two_bytes_match_emit_encodeBlockAsm12B
JB three_bytes_match_emit_encodeBlockAsm12B
three_bytes_match_emit_encodeBlockAsm12B:
- MOVB $0xf4, (AX)
- MOVW DI, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW R8, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_match_emit_encodeBlockAsm12B
two_bytes_match_emit_encodeBlockAsm12B:
- MOVB $0xf0, (AX)
- MOVB DI, 1(AX)
- ADDQ $0x02, AX
- CMPL DI, $0x40
+ MOVB $0xf0, (CX)
+ MOVB R8, 1(CX)
+ ADDQ $0x02, CX
+ CMPL R8, $0x40
JB memmove_match_emit_encodeBlockAsm12B
JMP memmove_long_match_emit_encodeBlockAsm12B
one_byte_match_emit_encodeBlockAsm12B:
- SHLB $0x02, DI
- MOVB DI, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, R8
+ MOVB R8, (CX)
+ ADDQ $0x01, CX
memmove_match_emit_encodeBlockAsm12B:
- LEAQ (AX)(R8*1), DI
+ LEAQ (CX)(R9*1), R8
// genMemMoveShort
- CMPQ R8, $0x08
+ CMPQ R9, $0x08
JBE emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_8
- CMPQ R8, $0x10
+ CMPQ R9, $0x10
JBE emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ R9, $0x20
JBE emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_33through64
emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_8:
- MOVQ (SI), R9
- MOVQ R9, (AX)
+ MOVQ (DI), R10
+ MOVQ R10, (CX)
JMP memmove_end_copy_match_emit_encodeBlockAsm12B
emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_8through16:
- MOVQ (SI), R9
- MOVQ -8(SI)(R8*1), SI
- MOVQ R9, (AX)
- MOVQ SI, -8(AX)(R8*1)
+ MOVQ (DI), R10
+ MOVQ -8(DI)(R9*1), DI
+ MOVQ R10, (CX)
+ MOVQ DI, -8(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeBlockAsm12B
emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_17through32:
- MOVOU (SI), X0
- MOVOU -16(SI)(R8*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU (DI), X0
+ MOVOU -16(DI)(R9*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeBlockAsm12B
emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_33through64:
- MOVOU (SI), X0
- MOVOU 16(SI), X1
- MOVOU -32(SI)(R8*1), X2
- MOVOU -16(SI)(R8*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU (DI), X0
+ MOVOU 16(DI), X1
+ MOVOU -32(DI)(R9*1), X2
+ MOVOU -16(DI)(R9*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
memmove_end_copy_match_emit_encodeBlockAsm12B:
- MOVQ DI, AX
+ MOVQ R8, CX
JMP emit_literal_done_match_emit_encodeBlockAsm12B
memmove_long_match_emit_encodeBlockAsm12B:
- LEAQ (AX)(R8*1), DI
+ LEAQ (CX)(R9*1), R8
// genMemMoveLong
- MOVOU (SI), X0
- MOVOU 16(SI), X1
- MOVOU -32(SI)(R8*1), X2
- MOVOU -16(SI)(R8*1), X3
- MOVQ R8, R10
- SHRQ $0x05, R10
- MOVQ AX, R9
- ANDL $0x0000001f, R9
- MOVQ $0x00000040, R11
- SUBQ R9, R11
- DECQ R10
+ MOVOU (DI), X0
+ MOVOU 16(DI), X1
+ MOVOU -32(DI)(R9*1), X2
+ MOVOU -16(DI)(R9*1), X3
+ MOVQ R9, R11
+ SHRQ $0x05, R11
+ MOVQ CX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R12
+ SUBQ R10, R12
+ DECQ R11
JA emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_forward_sse_loop_32
- LEAQ -32(SI)(R11*1), R9
- LEAQ -32(AX)(R11*1), R12
+ LEAQ -32(DI)(R12*1), R10
+ LEAQ -32(CX)(R12*1), R13
emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_big_loop_back:
- MOVOU (R9), X4
- MOVOU 16(R9), X5
- MOVOA X4, (R12)
- MOVOA X5, 16(R12)
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R13)
+ MOVOA X5, 16(R13)
+ ADDQ $0x20, R13
+ ADDQ $0x20, R10
ADDQ $0x20, R12
- ADDQ $0x20, R9
- ADDQ $0x20, R11
- DECQ R10
+ DECQ R11
JNA emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_big_loop_back
emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_forward_sse_loop_32:
- MOVOU -32(SI)(R11*1), X4
- MOVOU -16(SI)(R11*1), X5
- MOVOA X4, -32(AX)(R11*1)
- MOVOA X5, -16(AX)(R11*1)
- ADDQ $0x20, R11
- CMPQ R8, R11
+ MOVOU -32(DI)(R12*1), X4
+ MOVOU -16(DI)(R12*1), X5
+ MOVOA X4, -32(CX)(R12*1)
+ MOVOA X5, -16(CX)(R12*1)
+ ADDQ $0x20, R12
+ CMPQ R9, R12
JAE emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ DI, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
+ MOVQ R8, CX
emit_literal_done_match_emit_encodeBlockAsm12B:
match_nolit_loop_encodeBlockAsm12B:
- MOVL CX, SI
- SUBL BX, SI
- MOVL SI, 16(SP)
- ADDL $0x04, CX
- ADDL $0x04, BX
- MOVQ src_len+32(FP), SI
- SUBL CX, SI
- LEAQ (DX)(CX*1), DI
- LEAQ (DX)(BX*1), BX
+ MOVL DX, DI
+ SUBL SI, DI
+ MOVL DI, 16(SP)
+ ADDL $0x04, DX
+ ADDL $0x04, SI
+ MOVQ src_len+32(FP), DI
+ SUBL DX, DI
+ LEAQ (BX)(DX*1), R8
+ LEAQ (BX)(SI*1), SI
// matchLen
- XORL R9, R9
+ XORL R10, R10
matchlen_loopback_16_match_nolit_encodeBlockAsm12B:
- CMPL SI, $0x10
+ CMPL DI, $0x10
JB matchlen_match8_match_nolit_encodeBlockAsm12B
- MOVQ (DI)(R9*1), R8
- MOVQ 8(DI)(R9*1), R10
- XORQ (BX)(R9*1), R8
+ MOVQ (R8)(R10*1), R9
+ MOVQ 8(R8)(R10*1), R11
+ XORQ (SI)(R10*1), R9
JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm12B
- XORQ 8(BX)(R9*1), R10
+ XORQ 8(SI)(R10*1), R11
JNZ matchlen_bsf_16match_nolit_encodeBlockAsm12B
- LEAL -16(SI), SI
- LEAL 16(R9), R9
+ LEAL -16(DI), DI
+ LEAL 16(R10), R10
JMP matchlen_loopback_16_match_nolit_encodeBlockAsm12B
matchlen_bsf_16match_nolit_encodeBlockAsm12B:
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R11, R11
#else
- BSFQ R10, R10
+ BSFQ R11, R11
#endif
- SARQ $0x03, R10
- LEAL 8(R9)(R10*1), R9
+ SARQ $0x03, R11
+ LEAL 8(R10)(R11*1), R10
JMP match_nolit_end_encodeBlockAsm12B
matchlen_match8_match_nolit_encodeBlockAsm12B:
- CMPL SI, $0x08
+ CMPL DI, $0x08
JB matchlen_match4_match_nolit_encodeBlockAsm12B
- MOVQ (DI)(R9*1), R8
- XORQ (BX)(R9*1), R8
+ MOVQ (R8)(R10*1), R9
+ XORQ (SI)(R10*1), R9
JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm12B
- LEAL -8(SI), SI
- LEAL 8(R9), R9
+ LEAL -8(DI), DI
+ LEAL 8(R10), R10
JMP matchlen_match4_match_nolit_encodeBlockAsm12B
matchlen_bsf_8_match_nolit_encodeBlockAsm12B:
#ifdef GOAMD64_v3
- TZCNTQ R8, R8
+ TZCNTQ R9, R9
#else
- BSFQ R8, R8
+ BSFQ R9, R9
#endif
- SARQ $0x03, R8
- LEAL (R9)(R8*1), R9
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
JMP match_nolit_end_encodeBlockAsm12B
matchlen_match4_match_nolit_encodeBlockAsm12B:
- CMPL SI, $0x04
+ CMPL DI, $0x04
JB matchlen_match2_match_nolit_encodeBlockAsm12B
- MOVL (DI)(R9*1), R8
- CMPL (BX)(R9*1), R8
+ MOVL (R8)(R10*1), R9
+ CMPL (SI)(R10*1), R9
JNE matchlen_match2_match_nolit_encodeBlockAsm12B
- LEAL -4(SI), SI
- LEAL 4(R9), R9
+ LEAL -4(DI), DI
+ LEAL 4(R10), R10
matchlen_match2_match_nolit_encodeBlockAsm12B:
- CMPL SI, $0x01
+ CMPL DI, $0x01
JE matchlen_match1_match_nolit_encodeBlockAsm12B
JB match_nolit_end_encodeBlockAsm12B
- MOVW (DI)(R9*1), R8
- CMPW (BX)(R9*1), R8
+ MOVW (R8)(R10*1), R9
+ CMPW (SI)(R10*1), R9
JNE matchlen_match1_match_nolit_encodeBlockAsm12B
- LEAL 2(R9), R9
- SUBL $0x02, SI
+ LEAL 2(R10), R10
+ SUBL $0x02, DI
JZ match_nolit_end_encodeBlockAsm12B
matchlen_match1_match_nolit_encodeBlockAsm12B:
- MOVB (DI)(R9*1), R8
- CMPB (BX)(R9*1), R8
+ MOVB (R8)(R10*1), R9
+ CMPB (SI)(R10*1), R9
JNE match_nolit_end_encodeBlockAsm12B
- LEAL 1(R9), R9
+ LEAL 1(R10), R10
match_nolit_end_encodeBlockAsm12B:
- ADDL R9, CX
- MOVL 16(SP), BX
- ADDL $0x04, R9
- MOVL CX, 12(SP)
+ ADDL R10, DX
+ MOVL 16(SP), SI
+ ADDL $0x04, R10
+ MOVL DX, 12(SP)
// emitCopy
- CMPL R9, $0x40
+ CMPL R10, $0x40
JBE two_byte_offset_short_match_nolit_encodeBlockAsm12B
- CMPL BX, $0x00000800
+ CMPL SI, $0x00000800
JAE long_offset_short_match_nolit_encodeBlockAsm12B
- MOVL $0x00000001, SI
- LEAL 16(SI), SI
- MOVB BL, 1(AX)
- SHRL $0x08, BX
- SHLL $0x05, BX
- ORL BX, SI
- MOVB SI, (AX)
- ADDQ $0x02, AX
- SUBL $0x08, R9
+ MOVL $0x00000001, DI
+ LEAL 16(DI), DI
+ MOVB SI, 1(CX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (CX)
+ ADDQ $0x02, CX
+ SUBL $0x08, R10
// emitRepeat
- LEAL -4(R9), R9
+ LEAL -4(R10), R10
JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b
- MOVL R9, SI
- LEAL -4(R9), R9
- CMPL SI, $0x08
+ MOVL R10, DI
+ LEAL -4(R10), R10
+ CMPL DI, $0x08
JBE repeat_two_match_nolit_encodeBlockAsm12B_emit_copy_short_2b
- CMPL SI, $0x0c
+ CMPL DI, $0x0c
JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b
- CMPL BX, $0x00000800
+ CMPL SI, $0x00000800
JB repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b
cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b:
- CMPL R9, $0x00000104
+ CMPL R10, $0x00000104
JB repeat_three_match_nolit_encodeBlockAsm12B_emit_copy_short_2b
- LEAL -256(R9), R9
- MOVW $0x0019, (AX)
- MOVW R9, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(R10), R10
+ MOVW $0x0019, (CX)
+ MOVW R10, 2(CX)
+ ADDQ $0x04, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm12B
repeat_three_match_nolit_encodeBlockAsm12B_emit_copy_short_2b:
- LEAL -4(R9), R9
- MOVW $0x0015, (AX)
- MOVB R9, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(R10), R10
+ MOVW $0x0015, (CX)
+ MOVB R10, 2(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm12B
repeat_two_match_nolit_encodeBlockAsm12B_emit_copy_short_2b:
- SHLL $0x02, R9
- ORL $0x01, R9
- MOVW R9, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, R10
+ ORL $0x01, R10
+ MOVW R10, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm12B
repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b:
- XORQ SI, SI
- LEAL 1(SI)(R9*4), R9
- MOVB BL, 1(AX)
- SARL $0x08, BX
- SHLL $0x05, BX
- ORL BX, R9
- MOVB R9, (AX)
- ADDQ $0x02, AX
+ XORQ DI, DI
+ LEAL 1(DI)(R10*4), R10
+ MOVB SI, 1(CX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, R10
+ MOVB R10, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm12B
long_offset_short_match_nolit_encodeBlockAsm12B:
- MOVB $0xee, (AX)
- MOVW BX, 1(AX)
- LEAL -60(R9), R9
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW SI, 1(CX)
+ LEAL -60(R10), R10
+ ADDQ $0x03, CX
// emitRepeat
- MOVL R9, SI
- LEAL -4(R9), R9
- CMPL SI, $0x08
+ MOVL R10, DI
+ LEAL -4(R10), R10
+ CMPL DI, $0x08
JBE repeat_two_match_nolit_encodeBlockAsm12B_emit_copy_short
- CMPL SI, $0x0c
+ CMPL DI, $0x0c
JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short
- CMPL BX, $0x00000800
+ CMPL SI, $0x00000800
JB repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short
cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short:
- CMPL R9, $0x00000104
+ CMPL R10, $0x00000104
JB repeat_three_match_nolit_encodeBlockAsm12B_emit_copy_short
- LEAL -256(R9), R9
- MOVW $0x0019, (AX)
- MOVW R9, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(R10), R10
+ MOVW $0x0019, (CX)
+ MOVW R10, 2(CX)
+ ADDQ $0x04, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm12B
repeat_three_match_nolit_encodeBlockAsm12B_emit_copy_short:
- LEAL -4(R9), R9
- MOVW $0x0015, (AX)
- MOVB R9, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(R10), R10
+ MOVW $0x0015, (CX)
+ MOVB R10, 2(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm12B
repeat_two_match_nolit_encodeBlockAsm12B_emit_copy_short:
- SHLL $0x02, R9
- ORL $0x01, R9
- MOVW R9, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, R10
+ ORL $0x01, R10
+ MOVW R10, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm12B
repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short:
- XORQ SI, SI
- LEAL 1(SI)(R9*4), R9
- MOVB BL, 1(AX)
- SARL $0x08, BX
- SHLL $0x05, BX
- ORL BX, R9
- MOVB R9, (AX)
- ADDQ $0x02, AX
- JMP match_nolit_emitcopy_end_encodeBlockAsm12B
-
+ XORQ DI, DI
+ LEAL 1(DI)(R10*4), R10
+ MOVB SI, 1(CX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, R10
+ MOVB R10, (CX)
+ ADDQ $0x02, CX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm12B
+
two_byte_offset_short_match_nolit_encodeBlockAsm12B:
- MOVL R9, SI
- SHLL $0x02, SI
- CMPL R9, $0x0c
+ MOVL R10, DI
+ SHLL $0x02, DI
+ CMPL R10, $0x0c
JAE emit_copy_three_match_nolit_encodeBlockAsm12B
- CMPL BX, $0x00000800
+ CMPL SI, $0x00000800
JAE emit_copy_three_match_nolit_encodeBlockAsm12B
- LEAL -15(SI), SI
- MOVB BL, 1(AX)
- SHRL $0x08, BX
- SHLL $0x05, BX
- ORL BX, SI
- MOVB SI, (AX)
- ADDQ $0x02, AX
+ LEAL -15(DI), DI
+ MOVB SI, 1(CX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm12B
emit_copy_three_match_nolit_encodeBlockAsm12B:
- LEAL -2(SI), SI
- MOVB SI, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(DI), DI
+ MOVB DI, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
match_nolit_emitcopy_end_encodeBlockAsm12B:
- CMPL CX, 8(SP)
+ CMPL DX, 8(SP)
JAE emit_remainder_encodeBlockAsm12B
- MOVQ -2(DX)(CX*1), SI
- CMPQ AX, (SP)
+ MOVQ -2(BX)(DX*1), DI
+ CMPQ CX, (SP)
JB match_nolit_dst_ok_encodeBlockAsm12B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_nolit_dst_ok_encodeBlockAsm12B:
- MOVQ $0x000000cf1bbcdcbb, R8
- MOVQ SI, DI
- SHRQ $0x10, SI
- MOVQ SI, BX
- SHLQ $0x18, DI
- IMULQ R8, DI
- SHRQ $0x34, DI
- SHLQ $0x18, BX
- IMULQ R8, BX
- SHRQ $0x34, BX
- LEAL -2(CX), R8
- LEAQ 24(SP)(BX*4), R9
- MOVL (R9), BX
- MOVL R8, 24(SP)(DI*4)
- MOVL CX, (R9)
- CMPL (DX)(BX*1), SI
+ MOVQ $0x000000cf1bbcdcbb, R9
+ MOVQ DI, R8
+ SHRQ $0x10, DI
+ MOVQ DI, SI
+ SHLQ $0x18, R8
+ IMULQ R9, R8
+ SHRQ $0x34, R8
+ SHLQ $0x18, SI
+ IMULQ R9, SI
+ SHRQ $0x34, SI
+ LEAL -2(DX), R9
+ LEAQ (AX)(SI*4), R10
+ MOVL (R10), SI
+ MOVL R9, (AX)(R8*4)
+ MOVL DX, (R10)
+ CMPL (BX)(SI*1), DI
JEQ match_nolit_loop_encodeBlockAsm12B
- INCL CX
+ INCL DX
JMP search_loop_encodeBlockAsm12B
emit_remainder_encodeBlockAsm12B:
- MOVQ src_len+32(FP), CX
- SUBL 12(SP), CX
- LEAQ 3(AX)(CX*1), CX
- CMPQ CX, (SP)
+ MOVQ src_len+32(FP), AX
+ SUBL 12(SP), AX
+ LEAQ 3(CX)(AX*1), AX
+ CMPQ AX, (SP)
JB emit_remainder_ok_encodeBlockAsm12B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
emit_remainder_ok_encodeBlockAsm12B:
- MOVQ src_len+32(FP), CX
- MOVL 12(SP), BX
- CMPL BX, CX
+ MOVQ src_len+32(FP), AX
+ MOVL 12(SP), DX
+ CMPL DX, AX
JEQ emit_literal_done_emit_remainder_encodeBlockAsm12B
- MOVL CX, SI
- MOVL CX, 12(SP)
- LEAQ (DX)(BX*1), CX
- SUBL BX, SI
+ MOVL AX, SI
+ MOVL AX, 12(SP)
+ LEAQ (BX)(DX*1), AX
+ SUBL DX, SI
LEAL -1(SI), DX
CMPL DX, $0x3c
JB one_byte_emit_remainder_encodeBlockAsm12B
@@ -3709,26 +3712,26 @@ emit_remainder_ok_encodeBlockAsm12B:
JB three_bytes_emit_remainder_encodeBlockAsm12B
three_bytes_emit_remainder_encodeBlockAsm12B:
- MOVB $0xf4, (AX)
- MOVW DX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW DX, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_emit_remainder_encodeBlockAsm12B
two_bytes_emit_remainder_encodeBlockAsm12B:
- MOVB $0xf0, (AX)
- MOVB DL, 1(AX)
- ADDQ $0x02, AX
+ MOVB $0xf0, (CX)
+ MOVB DL, 1(CX)
+ ADDQ $0x02, CX
CMPL DX, $0x40
JB memmove_emit_remainder_encodeBlockAsm12B
JMP memmove_long_emit_remainder_encodeBlockAsm12B
one_byte_emit_remainder_encodeBlockAsm12B:
SHLB $0x02, DL
- MOVB DL, (AX)
- ADDQ $0x01, AX
+ MOVB DL, (CX)
+ ADDQ $0x01, CX
memmove_emit_remainder_encodeBlockAsm12B:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveShort
@@ -3744,73 +3747,73 @@ memmove_emit_remainder_encodeBlockAsm12B:
JMP emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_33through64
emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_1or2:
- MOVB (CX), SI
- MOVB -1(CX)(BX*1), CL
- MOVB SI, (AX)
- MOVB CL, -1(AX)(BX*1)
+ MOVB (AX), SI
+ MOVB -1(AX)(BX*1), AL
+ MOVB SI, (CX)
+ MOVB AL, -1(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B
emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_3:
- MOVW (CX), SI
- MOVB 2(CX), CL
- MOVW SI, (AX)
- MOVB CL, 2(AX)
+ MOVW (AX), SI
+ MOVB 2(AX), AL
+ MOVW SI, (CX)
+ MOVB AL, 2(CX)
JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B
emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_4through7:
- MOVL (CX), SI
- MOVL -4(CX)(BX*1), CX
- MOVL SI, (AX)
- MOVL CX, -4(AX)(BX*1)
+ MOVL (AX), SI
+ MOVL -4(AX)(BX*1), AX
+ MOVL SI, (CX)
+ MOVL AX, -4(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B
emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_8through16:
- MOVQ (CX), SI
- MOVQ -8(CX)(BX*1), CX
- MOVQ SI, (AX)
- MOVQ CX, -8(AX)(BX*1)
+ MOVQ (AX), SI
+ MOVQ -8(AX)(BX*1), AX
+ MOVQ SI, (CX)
+ MOVQ AX, -8(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B
emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_17through32:
- MOVOU (CX), X0
- MOVOU -16(CX)(BX*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU -16(AX)(BX*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B
emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_33through64:
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
memmove_end_copy_emit_remainder_encodeBlockAsm12B:
- MOVQ DX, AX
+ MOVQ DX, CX
JMP emit_literal_done_emit_remainder_encodeBlockAsm12B
memmove_long_emit_remainder_encodeBlockAsm12B:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveLong
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
MOVQ BX, DI
SHRQ $0x05, DI
- MOVQ AX, SI
+ MOVQ CX, SI
ANDL $0x0000001f, SI
MOVQ $0x00000040, R8
SUBQ SI, R8
DECQ DI
JA emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_forward_sse_loop_32
- LEAQ -32(CX)(R8*1), SI
- LEAQ -32(AX)(R8*1), R9
+ LEAQ -32(AX)(R8*1), SI
+ LEAQ -32(CX)(R8*1), R9
emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_big_loop_back:
MOVOU (SI), X4
@@ -3824,967 +3827,968 @@ emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_big_loop_back:
JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_big_loop_back
emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_forward_sse_loop_32:
- MOVOU -32(CX)(R8*1), X4
- MOVOU -16(CX)(R8*1), X5
- MOVOA X4, -32(AX)(R8*1)
- MOVOA X5, -16(AX)(R8*1)
+ MOVOU -32(AX)(R8*1), X4
+ MOVOU -16(AX)(R8*1), X5
+ MOVOA X4, -32(CX)(R8*1)
+ MOVOA X5, -16(CX)(R8*1)
ADDQ $0x20, R8
CMPQ BX, R8
JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
- MOVQ DX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
+ MOVQ DX, CX
emit_literal_done_emit_remainder_encodeBlockAsm12B:
- MOVQ dst_base+0(FP), CX
- SUBQ CX, AX
- MOVQ AX, ret+48(FP)
+ MOVQ dst_base+0(FP), AX
+ SUBQ AX, CX
+ MOVQ CX, ret+56(FP)
RET
-// func encodeBlockAsm10B(dst []byte, src []byte) int
+// func encodeBlockAsm10B(dst []byte, src []byte, tmp *[4096]byte) int
// Requires: BMI, SSE2
-TEXT ·encodeBlockAsm10B(SB), $4120-56
- MOVQ dst_base+0(FP), AX
- MOVQ $0x00000020, CX
- LEAQ 24(SP), DX
+TEXT ·encodeBlockAsm10B(SB), $24-64
+ MOVQ tmp+48(FP), AX
+ MOVQ dst_base+0(FP), CX
+ MOVQ $0x00000020, DX
+ MOVQ AX, BX
PXOR X0, X0
zero_loop_encodeBlockAsm10B:
- MOVOU X0, (DX)
- MOVOU X0, 16(DX)
- MOVOU X0, 32(DX)
- MOVOU X0, 48(DX)
- MOVOU X0, 64(DX)
- MOVOU X0, 80(DX)
- MOVOU X0, 96(DX)
- MOVOU X0, 112(DX)
- ADDQ $0x80, DX
- DECQ CX
+ MOVOU X0, (BX)
+ MOVOU X0, 16(BX)
+ MOVOU X0, 32(BX)
+ MOVOU X0, 48(BX)
+ MOVOU X0, 64(BX)
+ MOVOU X0, 80(BX)
+ MOVOU X0, 96(BX)
+ MOVOU X0, 112(BX)
+ ADDQ $0x80, BX
+ DECQ DX
JNZ zero_loop_encodeBlockAsm10B
MOVL $0x00000000, 12(SP)
- MOVQ src_len+32(FP), CX
- LEAQ -9(CX), DX
- LEAQ -8(CX), BX
- MOVL BX, 8(SP)
- SHRQ $0x05, CX
- SUBL CX, DX
- LEAQ (AX)(DX*1), DX
- MOVQ DX, (SP)
- MOVL $0x00000001, CX
- MOVL CX, 16(SP)
- MOVQ src_base+24(FP), DX
+ MOVQ src_len+32(FP), DX
+ LEAQ -9(DX), BX
+ LEAQ -8(DX), SI
+ MOVL SI, 8(SP)
+ SHRQ $0x05, DX
+ SUBL DX, BX
+ LEAQ (CX)(BX*1), BX
+ MOVQ BX, (SP)
+ MOVL $0x00000001, DX
+ MOVL DX, 16(SP)
+ MOVQ src_base+24(FP), BX
search_loop_encodeBlockAsm10B:
- MOVL CX, BX
- SUBL 12(SP), BX
- SHRL $0x05, BX
- LEAL 4(CX)(BX*1), BX
- CMPL BX, 8(SP)
+ MOVL DX, SI
+ SUBL 12(SP), SI
+ SHRL $0x05, SI
+ LEAL 4(DX)(SI*1), SI
+ CMPL SI, 8(SP)
JAE emit_remainder_encodeBlockAsm10B
- MOVQ (DX)(CX*1), SI
- MOVL BX, 20(SP)
- MOVQ $0x9e3779b1, R8
- MOVQ SI, R9
- MOVQ SI, R10
- SHRQ $0x08, R10
- SHLQ $0x20, R9
- IMULQ R8, R9
- SHRQ $0x36, R9
+ MOVQ (BX)(DX*1), DI
+ MOVL SI, 20(SP)
+ MOVQ $0x9e3779b1, R9
+ MOVQ DI, R10
+ MOVQ DI, R11
+ SHRQ $0x08, R11
SHLQ $0x20, R10
- IMULQ R8, R10
+ IMULQ R9, R10
SHRQ $0x36, R10
- MOVL 24(SP)(R9*4), BX
- MOVL 24(SP)(R10*4), DI
- MOVL CX, 24(SP)(R9*4)
- LEAL 1(CX), R9
- MOVL R9, 24(SP)(R10*4)
- MOVQ SI, R9
- SHRQ $0x10, R9
- SHLQ $0x20, R9
- IMULQ R8, R9
- SHRQ $0x36, R9
- MOVL CX, R8
- SUBL 16(SP), R8
- MOVL 1(DX)(R8*1), R10
- MOVQ SI, R8
- SHRQ $0x08, R8
- CMPL R8, R10
+ SHLQ $0x20, R11
+ IMULQ R9, R11
+ SHRQ $0x36, R11
+ MOVL (AX)(R10*4), SI
+ MOVL (AX)(R11*4), R8
+ MOVL DX, (AX)(R10*4)
+ LEAL 1(DX), R10
+ MOVL R10, (AX)(R11*4)
+ MOVQ DI, R10
+ SHRQ $0x10, R10
+ SHLQ $0x20, R10
+ IMULQ R9, R10
+ SHRQ $0x36, R10
+ MOVL DX, R9
+ SUBL 16(SP), R9
+ MOVL 1(BX)(R9*1), R11
+ MOVQ DI, R9
+ SHRQ $0x08, R9
+ CMPL R9, R11
JNE no_repeat_found_encodeBlockAsm10B
- LEAL 1(CX), SI
- MOVL 12(SP), DI
- MOVL SI, BX
- SUBL 16(SP), BX
+ LEAL 1(DX), DI
+ MOVL 12(SP), R8
+ MOVL DI, SI
+ SUBL 16(SP), SI
JZ repeat_extend_back_end_encodeBlockAsm10B
repeat_extend_back_loop_encodeBlockAsm10B:
- CMPL SI, DI
+ CMPL DI, R8
JBE repeat_extend_back_end_encodeBlockAsm10B
- MOVB -1(DX)(BX*1), R8
- MOVB -1(DX)(SI*1), R9
- CMPB R8, R9
+ MOVB -1(BX)(SI*1), R9
+ MOVB -1(BX)(DI*1), R10
+ CMPB R9, R10
JNE repeat_extend_back_end_encodeBlockAsm10B
- LEAL -1(SI), SI
- DECL BX
+ LEAL -1(DI), DI
+ DECL SI
JNZ repeat_extend_back_loop_encodeBlockAsm10B
repeat_extend_back_end_encodeBlockAsm10B:
- MOVL SI, BX
- SUBL 12(SP), BX
- LEAQ 3(AX)(BX*1), BX
- CMPQ BX, (SP)
+ MOVL DI, SI
+ SUBL 12(SP), SI
+ LEAQ 3(CX)(SI*1), SI
+ CMPQ SI, (SP)
JB repeat_dst_size_check_encodeBlockAsm10B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
repeat_dst_size_check_encodeBlockAsm10B:
- MOVL 12(SP), BX
- CMPL BX, SI
+ MOVL 12(SP), SI
+ CMPL SI, DI
JEQ emit_literal_done_repeat_emit_encodeBlockAsm10B
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(BX*1), R9
- SUBL BX, R8
- LEAL -1(R8), BX
- CMPL BX, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(SI*1), R10
+ SUBL SI, R9
+ LEAL -1(R9), SI
+ CMPL SI, $0x3c
JB one_byte_repeat_emit_encodeBlockAsm10B
- CMPL BX, $0x00000100
+ CMPL SI, $0x00000100
JB two_bytes_repeat_emit_encodeBlockAsm10B
JB three_bytes_repeat_emit_encodeBlockAsm10B
three_bytes_repeat_emit_encodeBlockAsm10B:
- MOVB $0xf4, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_repeat_emit_encodeBlockAsm10B
two_bytes_repeat_emit_encodeBlockAsm10B:
- MOVB $0xf0, (AX)
- MOVB BL, 1(AX)
- ADDQ $0x02, AX
- CMPL BX, $0x40
+ MOVB $0xf0, (CX)
+ MOVB SI, 1(CX)
+ ADDQ $0x02, CX
+ CMPL SI, $0x40
JB memmove_repeat_emit_encodeBlockAsm10B
JMP memmove_long_repeat_emit_encodeBlockAsm10B
one_byte_repeat_emit_encodeBlockAsm10B:
- SHLB $0x02, BL
- MOVB BL, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, SI
+ MOVB SI, (CX)
+ ADDQ $0x01, CX
memmove_repeat_emit_encodeBlockAsm10B:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveShort
- CMPQ R8, $0x08
+ CMPQ R9, $0x08
JBE emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_8
- CMPQ R8, $0x10
+ CMPQ R9, $0x10
JBE emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ R9, $0x20
JBE emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_17through32
JMP emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_33through64
emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_8:
- MOVQ (R9), R10
- MOVQ R10, (AX)
+ MOVQ (R10), R11
+ MOVQ R11, (CX)
JMP memmove_end_copy_repeat_emit_encodeBlockAsm10B
emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_8through16:
- MOVQ (R9), R10
- MOVQ -8(R9)(R8*1), R9
- MOVQ R10, (AX)
- MOVQ R9, -8(AX)(R8*1)
+ MOVQ (R10), R11
+ MOVQ -8(R10)(R9*1), R10
+ MOVQ R11, (CX)
+ MOVQ R10, -8(CX)(R9*1)
JMP memmove_end_copy_repeat_emit_encodeBlockAsm10B
emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_17through32:
- MOVOU (R9), X0
- MOVOU -16(R9)(R8*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU -16(R10)(R9*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R9*1)
JMP memmove_end_copy_repeat_emit_encodeBlockAsm10B
emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_33through64:
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
memmove_end_copy_repeat_emit_encodeBlockAsm10B:
- MOVQ BX, AX
+ MOVQ SI, CX
JMP emit_literal_done_repeat_emit_encodeBlockAsm10B
memmove_long_repeat_emit_encodeBlockAsm10B:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveLong
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVQ R8, R11
- SHRQ $0x05, R11
- MOVQ AX, R10
- ANDL $0x0000001f, R10
- MOVQ $0x00000040, R12
- SUBQ R10, R12
- DECQ R11
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVQ R9, R12
+ SHRQ $0x05, R12
+ MOVQ CX, R11
+ ANDL $0x0000001f, R11
+ MOVQ $0x00000040, R13
+ SUBQ R11, R13
+ DECQ R12
JA emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_forward_sse_loop_32
- LEAQ -32(R9)(R12*1), R10
- LEAQ -32(AX)(R12*1), R13
+ LEAQ -32(R10)(R13*1), R11
+ LEAQ -32(CX)(R13*1), R14
emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_big_loop_back:
- MOVOU (R10), X4
- MOVOU 16(R10), X5
- MOVOA X4, (R13)
- MOVOA X5, 16(R13)
+ MOVOU (R11), X4
+ MOVOU 16(R11), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
+ ADDQ $0x20, R14
+ ADDQ $0x20, R11
ADDQ $0x20, R13
- ADDQ $0x20, R10
- ADDQ $0x20, R12
- DECQ R11
+ DECQ R12
JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_big_loop_back
emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_forward_sse_loop_32:
- MOVOU -32(R9)(R12*1), X4
- MOVOU -16(R9)(R12*1), X5
- MOVOA X4, -32(AX)(R12*1)
- MOVOA X5, -16(AX)(R12*1)
- ADDQ $0x20, R12
- CMPQ R8, R12
+ MOVOU -32(R10)(R13*1), X4
+ MOVOU -16(R10)(R13*1), X5
+ MOVOA X4, -32(CX)(R13*1)
+ MOVOA X5, -16(CX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R9, R13
JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ BX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
+ MOVQ SI, CX
emit_literal_done_repeat_emit_encodeBlockAsm10B:
- ADDL $0x05, CX
- MOVL CX, BX
- SUBL 16(SP), BX
- MOVQ src_len+32(FP), R8
- SUBL CX, R8
- LEAQ (DX)(CX*1), R9
- LEAQ (DX)(BX*1), BX
+ ADDL $0x05, DX
+ MOVL DX, SI
+ SUBL 16(SP), SI
+ MOVQ src_len+32(FP), R9
+ SUBL DX, R9
+ LEAQ (BX)(DX*1), R10
+ LEAQ (BX)(SI*1), SI
// matchLen
- XORL R11, R11
+ XORL R12, R12
matchlen_loopback_16_repeat_extend_encodeBlockAsm10B:
- CMPL R8, $0x10
+ CMPL R9, $0x10
JB matchlen_match8_repeat_extend_encodeBlockAsm10B
- MOVQ (R9)(R11*1), R10
- MOVQ 8(R9)(R11*1), R12
- XORQ (BX)(R11*1), R10
+ MOVQ (R10)(R12*1), R11
+ MOVQ 8(R10)(R12*1), R13
+ XORQ (SI)(R12*1), R11
JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm10B
- XORQ 8(BX)(R11*1), R12
+ XORQ 8(SI)(R12*1), R13
JNZ matchlen_bsf_16repeat_extend_encodeBlockAsm10B
- LEAL -16(R8), R8
- LEAL 16(R11), R11
+ LEAL -16(R9), R9
+ LEAL 16(R12), R12
JMP matchlen_loopback_16_repeat_extend_encodeBlockAsm10B
matchlen_bsf_16repeat_extend_encodeBlockAsm10B:
#ifdef GOAMD64_v3
- TZCNTQ R12, R12
+ TZCNTQ R13, R13
#else
- BSFQ R12, R12
+ BSFQ R13, R13
#endif
- SARQ $0x03, R12
- LEAL 8(R11)(R12*1), R11
+ SARQ $0x03, R13
+ LEAL 8(R12)(R13*1), R12
JMP repeat_extend_forward_end_encodeBlockAsm10B
matchlen_match8_repeat_extend_encodeBlockAsm10B:
- CMPL R8, $0x08
+ CMPL R9, $0x08
JB matchlen_match4_repeat_extend_encodeBlockAsm10B
- MOVQ (R9)(R11*1), R10
- XORQ (BX)(R11*1), R10
+ MOVQ (R10)(R12*1), R11
+ XORQ (SI)(R12*1), R11
JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm10B
- LEAL -8(R8), R8
- LEAL 8(R11), R11
+ LEAL -8(R9), R9
+ LEAL 8(R12), R12
JMP matchlen_match4_repeat_extend_encodeBlockAsm10B
matchlen_bsf_8_repeat_extend_encodeBlockAsm10B:
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R11, R11
#else
- BSFQ R10, R10
+ BSFQ R11, R11
#endif
- SARQ $0x03, R10
- LEAL (R11)(R10*1), R11
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
JMP repeat_extend_forward_end_encodeBlockAsm10B
matchlen_match4_repeat_extend_encodeBlockAsm10B:
- CMPL R8, $0x04
+ CMPL R9, $0x04
JB matchlen_match2_repeat_extend_encodeBlockAsm10B
- MOVL (R9)(R11*1), R10
- CMPL (BX)(R11*1), R10
+ MOVL (R10)(R12*1), R11
+ CMPL (SI)(R12*1), R11
JNE matchlen_match2_repeat_extend_encodeBlockAsm10B
- LEAL -4(R8), R8
- LEAL 4(R11), R11
+ LEAL -4(R9), R9
+ LEAL 4(R12), R12
matchlen_match2_repeat_extend_encodeBlockAsm10B:
- CMPL R8, $0x01
+ CMPL R9, $0x01
JE matchlen_match1_repeat_extend_encodeBlockAsm10B
JB repeat_extend_forward_end_encodeBlockAsm10B
- MOVW (R9)(R11*1), R10
- CMPW (BX)(R11*1), R10
+ MOVW (R10)(R12*1), R11
+ CMPW (SI)(R12*1), R11
JNE matchlen_match1_repeat_extend_encodeBlockAsm10B
- LEAL 2(R11), R11
- SUBL $0x02, R8
+ LEAL 2(R12), R12
+ SUBL $0x02, R9
JZ repeat_extend_forward_end_encodeBlockAsm10B
matchlen_match1_repeat_extend_encodeBlockAsm10B:
- MOVB (R9)(R11*1), R10
- CMPB (BX)(R11*1), R10
+ MOVB (R10)(R12*1), R11
+ CMPB (SI)(R12*1), R11
JNE repeat_extend_forward_end_encodeBlockAsm10B
- LEAL 1(R11), R11
+ LEAL 1(R12), R12
repeat_extend_forward_end_encodeBlockAsm10B:
- ADDL R11, CX
- MOVL CX, BX
- SUBL SI, BX
- MOVL 16(SP), SI
- TESTL DI, DI
+ ADDL R12, DX
+ MOVL DX, SI
+ SUBL DI, SI
+ MOVL 16(SP), DI
+ TESTL R8, R8
JZ repeat_as_copy_encodeBlockAsm10B
// emitRepeat
- MOVL BX, DI
- LEAL -4(BX), BX
- CMPL DI, $0x08
+ MOVL SI, R8
+ LEAL -4(SI), SI
+ CMPL R8, $0x08
JBE repeat_two_match_repeat_encodeBlockAsm10B
- CMPL DI, $0x0c
+ CMPL R8, $0x0c
JAE cant_repeat_two_offset_match_repeat_encodeBlockAsm10B
- CMPL SI, $0x00000800
+ CMPL DI, $0x00000800
JB repeat_two_offset_match_repeat_encodeBlockAsm10B
cant_repeat_two_offset_match_repeat_encodeBlockAsm10B:
- CMPL BX, $0x00000104
+ CMPL SI, $0x00000104
JB repeat_three_match_repeat_encodeBlockAsm10B
- LEAL -256(BX), BX
- MOVW $0x0019, (AX)
- MOVW BX, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(SI), SI
+ MOVW $0x0019, (CX)
+ MOVW SI, 2(CX)
+ ADDQ $0x04, CX
JMP repeat_end_emit_encodeBlockAsm10B
repeat_three_match_repeat_encodeBlockAsm10B:
- LEAL -4(BX), BX
- MOVW $0x0015, (AX)
- MOVB BL, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(SI), SI
+ MOVW $0x0015, (CX)
+ MOVB SI, 2(CX)
+ ADDQ $0x03, CX
JMP repeat_end_emit_encodeBlockAsm10B
repeat_two_match_repeat_encodeBlockAsm10B:
- SHLL $0x02, BX
- ORL $0x01, BX
- MOVW BX, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, SI
+ ORL $0x01, SI
+ MOVW SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm10B
repeat_two_offset_match_repeat_encodeBlockAsm10B:
- XORQ DI, DI
- LEAL 1(DI)(BX*4), BX
- MOVB SI, 1(AX)
- SARL $0x08, SI
- SHLL $0x05, SI
- ORL SI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
+ XORQ R8, R8
+ LEAL 1(R8)(SI*4), SI
+ MOVB DI, 1(CX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm10B
repeat_as_copy_encodeBlockAsm10B:
// emitCopy
- CMPL BX, $0x40
+ CMPL SI, $0x40
JBE two_byte_offset_short_repeat_as_copy_encodeBlockAsm10B
- CMPL SI, $0x00000800
+ CMPL DI, $0x00000800
JAE long_offset_short_repeat_as_copy_encodeBlockAsm10B
- MOVL $0x00000001, DI
- LEAL 16(DI), DI
- MOVB SI, 1(AX)
- SHRL $0x08, SI
- SHLL $0x05, SI
- ORL SI, DI
- MOVB DI, (AX)
- ADDQ $0x02, AX
- SUBL $0x08, BX
+ MOVL $0x00000001, R8
+ LEAL 16(R8), R8
+ MOVB DI, 1(CX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R8
+ MOVB R8, (CX)
+ ADDQ $0x02, CX
+ SUBL $0x08, SI
// emitRepeat
- LEAL -4(BX), BX
+ LEAL -4(SI), SI
JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b
- MOVL BX, DI
- LEAL -4(BX), BX
- CMPL DI, $0x08
+ MOVL SI, R8
+ LEAL -4(SI), SI
+ CMPL R8, $0x08
JBE repeat_two_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b
- CMPL DI, $0x0c
+ CMPL R8, $0x0c
JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b
- CMPL SI, $0x00000800
+ CMPL DI, $0x00000800
JB repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b
cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b:
- CMPL BX, $0x00000104
+ CMPL SI, $0x00000104
JB repeat_three_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b
- LEAL -256(BX), BX
- MOVW $0x0019, (AX)
- MOVW BX, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(SI), SI
+ MOVW $0x0019, (CX)
+ MOVW SI, 2(CX)
+ ADDQ $0x04, CX
JMP repeat_end_emit_encodeBlockAsm10B
repeat_three_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b:
- LEAL -4(BX), BX
- MOVW $0x0015, (AX)
- MOVB BL, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(SI), SI
+ MOVW $0x0015, (CX)
+ MOVB SI, 2(CX)
+ ADDQ $0x03, CX
JMP repeat_end_emit_encodeBlockAsm10B
repeat_two_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b:
- SHLL $0x02, BX
- ORL $0x01, BX
- MOVW BX, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, SI
+ ORL $0x01, SI
+ MOVW SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm10B
repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b:
- XORQ DI, DI
- LEAL 1(DI)(BX*4), BX
- MOVB SI, 1(AX)
- SARL $0x08, SI
- SHLL $0x05, SI
- ORL SI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
+ XORQ R8, R8
+ LEAL 1(R8)(SI*4), SI
+ MOVB DI, 1(CX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm10B
long_offset_short_repeat_as_copy_encodeBlockAsm10B:
- MOVB $0xee, (AX)
- MOVW SI, 1(AX)
- LEAL -60(BX), BX
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW DI, 1(CX)
+ LEAL -60(SI), SI
+ ADDQ $0x03, CX
// emitRepeat
- MOVL BX, DI
- LEAL -4(BX), BX
- CMPL DI, $0x08
+ MOVL SI, R8
+ LEAL -4(SI), SI
+ CMPL R8, $0x08
JBE repeat_two_repeat_as_copy_encodeBlockAsm10B_emit_copy_short
- CMPL DI, $0x0c
+ CMPL R8, $0x0c
JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short
- CMPL SI, $0x00000800
+ CMPL DI, $0x00000800
JB repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short
cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short:
- CMPL BX, $0x00000104
+ CMPL SI, $0x00000104
JB repeat_three_repeat_as_copy_encodeBlockAsm10B_emit_copy_short
- LEAL -256(BX), BX
- MOVW $0x0019, (AX)
- MOVW BX, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(SI), SI
+ MOVW $0x0019, (CX)
+ MOVW SI, 2(CX)
+ ADDQ $0x04, CX
JMP repeat_end_emit_encodeBlockAsm10B
repeat_three_repeat_as_copy_encodeBlockAsm10B_emit_copy_short:
- LEAL -4(BX), BX
- MOVW $0x0015, (AX)
- MOVB BL, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(SI), SI
+ MOVW $0x0015, (CX)
+ MOVB SI, 2(CX)
+ ADDQ $0x03, CX
JMP repeat_end_emit_encodeBlockAsm10B
repeat_two_repeat_as_copy_encodeBlockAsm10B_emit_copy_short:
- SHLL $0x02, BX
- ORL $0x01, BX
- MOVW BX, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, SI
+ ORL $0x01, SI
+ MOVW SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm10B
repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short:
- XORQ DI, DI
- LEAL 1(DI)(BX*4), BX
- MOVB SI, 1(AX)
- SARL $0x08, SI
- SHLL $0x05, SI
- ORL SI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
+ XORQ R8, R8
+ LEAL 1(R8)(SI*4), SI
+ MOVB DI, 1(CX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm10B
two_byte_offset_short_repeat_as_copy_encodeBlockAsm10B:
- MOVL BX, DI
- SHLL $0x02, DI
- CMPL BX, $0x0c
+ MOVL SI, R8
+ SHLL $0x02, R8
+ CMPL SI, $0x0c
JAE emit_copy_three_repeat_as_copy_encodeBlockAsm10B
- CMPL SI, $0x00000800
+ CMPL DI, $0x00000800
JAE emit_copy_three_repeat_as_copy_encodeBlockAsm10B
- LEAL -15(DI), DI
- MOVB SI, 1(AX)
- SHRL $0x08, SI
- SHLL $0x05, SI
- ORL SI, DI
- MOVB DI, (AX)
- ADDQ $0x02, AX
+ LEAL -15(R8), R8
+ MOVB DI, 1(CX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R8
+ MOVB R8, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm10B
emit_copy_three_repeat_as_copy_encodeBlockAsm10B:
- LEAL -2(DI), DI
- MOVB DI, (AX)
- MOVW SI, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(R8), R8
+ MOVB R8, (CX)
+ MOVW DI, 1(CX)
+ ADDQ $0x03, CX
repeat_end_emit_encodeBlockAsm10B:
- MOVL CX, 12(SP)
+ MOVL DX, 12(SP)
JMP search_loop_encodeBlockAsm10B
no_repeat_found_encodeBlockAsm10B:
- CMPL (DX)(BX*1), SI
+ CMPL (BX)(SI*1), DI
JEQ candidate_match_encodeBlockAsm10B
- SHRQ $0x08, SI
- MOVL 24(SP)(R9*4), BX
- LEAL 2(CX), R8
- CMPL (DX)(DI*1), SI
+ SHRQ $0x08, DI
+ MOVL (AX)(R10*4), SI
+ LEAL 2(DX), R9
+ CMPL (BX)(R8*1), DI
JEQ candidate2_match_encodeBlockAsm10B
- MOVL R8, 24(SP)(R9*4)
- SHRQ $0x08, SI
- CMPL (DX)(BX*1), SI
+ MOVL R9, (AX)(R10*4)
+ SHRQ $0x08, DI
+ CMPL (BX)(SI*1), DI
JEQ candidate3_match_encodeBlockAsm10B
- MOVL 20(SP), CX
+ MOVL 20(SP), DX
JMP search_loop_encodeBlockAsm10B
candidate3_match_encodeBlockAsm10B:
- ADDL $0x02, CX
+ ADDL $0x02, DX
JMP candidate_match_encodeBlockAsm10B
candidate2_match_encodeBlockAsm10B:
- MOVL R8, 24(SP)(R9*4)
- INCL CX
- MOVL DI, BX
+ MOVL R9, (AX)(R10*4)
+ INCL DX
+ MOVL R8, SI
candidate_match_encodeBlockAsm10B:
- MOVL 12(SP), SI
- TESTL BX, BX
+ MOVL 12(SP), DI
+ TESTL SI, SI
JZ match_extend_back_end_encodeBlockAsm10B
match_extend_back_loop_encodeBlockAsm10B:
- CMPL CX, SI
+ CMPL DX, DI
JBE match_extend_back_end_encodeBlockAsm10B
- MOVB -1(DX)(BX*1), DI
- MOVB -1(DX)(CX*1), R8
- CMPB DI, R8
+ MOVB -1(BX)(SI*1), R8
+ MOVB -1(BX)(DX*1), R9
+ CMPB R8, R9
JNE match_extend_back_end_encodeBlockAsm10B
- LEAL -1(CX), CX
- DECL BX
+ LEAL -1(DX), DX
+ DECL SI
JZ match_extend_back_end_encodeBlockAsm10B
JMP match_extend_back_loop_encodeBlockAsm10B
match_extend_back_end_encodeBlockAsm10B:
- MOVL CX, SI
- SUBL 12(SP), SI
- LEAQ 3(AX)(SI*1), SI
- CMPQ SI, (SP)
+ MOVL DX, DI
+ SUBL 12(SP), DI
+ LEAQ 3(CX)(DI*1), DI
+ CMPQ DI, (SP)
JB match_dst_size_check_encodeBlockAsm10B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_dst_size_check_encodeBlockAsm10B:
- MOVL CX, SI
- MOVL 12(SP), DI
- CMPL DI, SI
+ MOVL DX, DI
+ MOVL 12(SP), R8
+ CMPL R8, DI
JEQ emit_literal_done_match_emit_encodeBlockAsm10B
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(DI*1), SI
- SUBL DI, R8
- LEAL -1(R8), DI
- CMPL DI, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(R8*1), DI
+ SUBL R8, R9
+ LEAL -1(R9), R8
+ CMPL R8, $0x3c
JB one_byte_match_emit_encodeBlockAsm10B
- CMPL DI, $0x00000100
+ CMPL R8, $0x00000100
JB two_bytes_match_emit_encodeBlockAsm10B
JB three_bytes_match_emit_encodeBlockAsm10B
three_bytes_match_emit_encodeBlockAsm10B:
- MOVB $0xf4, (AX)
- MOVW DI, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW R8, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_match_emit_encodeBlockAsm10B
two_bytes_match_emit_encodeBlockAsm10B:
- MOVB $0xf0, (AX)
- MOVB DI, 1(AX)
- ADDQ $0x02, AX
- CMPL DI, $0x40
+ MOVB $0xf0, (CX)
+ MOVB R8, 1(CX)
+ ADDQ $0x02, CX
+ CMPL R8, $0x40
JB memmove_match_emit_encodeBlockAsm10B
JMP memmove_long_match_emit_encodeBlockAsm10B
one_byte_match_emit_encodeBlockAsm10B:
- SHLB $0x02, DI
- MOVB DI, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, R8
+ MOVB R8, (CX)
+ ADDQ $0x01, CX
memmove_match_emit_encodeBlockAsm10B:
- LEAQ (AX)(R8*1), DI
+ LEAQ (CX)(R9*1), R8
// genMemMoveShort
- CMPQ R8, $0x08
+ CMPQ R9, $0x08
JBE emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_8
- CMPQ R8, $0x10
+ CMPQ R9, $0x10
JBE emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ R9, $0x20
JBE emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_33through64
emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_8:
- MOVQ (SI), R9
- MOVQ R9, (AX)
+ MOVQ (DI), R10
+ MOVQ R10, (CX)
JMP memmove_end_copy_match_emit_encodeBlockAsm10B
emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_8through16:
- MOVQ (SI), R9
- MOVQ -8(SI)(R8*1), SI
- MOVQ R9, (AX)
- MOVQ SI, -8(AX)(R8*1)
+ MOVQ (DI), R10
+ MOVQ -8(DI)(R9*1), DI
+ MOVQ R10, (CX)
+ MOVQ DI, -8(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeBlockAsm10B
emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_17through32:
- MOVOU (SI), X0
- MOVOU -16(SI)(R8*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU (DI), X0
+ MOVOU -16(DI)(R9*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeBlockAsm10B
emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_33through64:
- MOVOU (SI), X0
- MOVOU 16(SI), X1
- MOVOU -32(SI)(R8*1), X2
- MOVOU -16(SI)(R8*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU (DI), X0
+ MOVOU 16(DI), X1
+ MOVOU -32(DI)(R9*1), X2
+ MOVOU -16(DI)(R9*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
memmove_end_copy_match_emit_encodeBlockAsm10B:
- MOVQ DI, AX
+ MOVQ R8, CX
JMP emit_literal_done_match_emit_encodeBlockAsm10B
memmove_long_match_emit_encodeBlockAsm10B:
- LEAQ (AX)(R8*1), DI
+ LEAQ (CX)(R9*1), R8
// genMemMoveLong
- MOVOU (SI), X0
- MOVOU 16(SI), X1
- MOVOU -32(SI)(R8*1), X2
- MOVOU -16(SI)(R8*1), X3
- MOVQ R8, R10
- SHRQ $0x05, R10
- MOVQ AX, R9
- ANDL $0x0000001f, R9
- MOVQ $0x00000040, R11
- SUBQ R9, R11
- DECQ R10
+ MOVOU (DI), X0
+ MOVOU 16(DI), X1
+ MOVOU -32(DI)(R9*1), X2
+ MOVOU -16(DI)(R9*1), X3
+ MOVQ R9, R11
+ SHRQ $0x05, R11
+ MOVQ CX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R12
+ SUBQ R10, R12
+ DECQ R11
JA emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_forward_sse_loop_32
- LEAQ -32(SI)(R11*1), R9
- LEAQ -32(AX)(R11*1), R12
+ LEAQ -32(DI)(R12*1), R10
+ LEAQ -32(CX)(R12*1), R13
emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_big_loop_back:
- MOVOU (R9), X4
- MOVOU 16(R9), X5
- MOVOA X4, (R12)
- MOVOA X5, 16(R12)
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R13)
+ MOVOA X5, 16(R13)
+ ADDQ $0x20, R13
+ ADDQ $0x20, R10
ADDQ $0x20, R12
- ADDQ $0x20, R9
- ADDQ $0x20, R11
- DECQ R10
+ DECQ R11
JNA emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_big_loop_back
emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_forward_sse_loop_32:
- MOVOU -32(SI)(R11*1), X4
- MOVOU -16(SI)(R11*1), X5
- MOVOA X4, -32(AX)(R11*1)
- MOVOA X5, -16(AX)(R11*1)
- ADDQ $0x20, R11
- CMPQ R8, R11
+ MOVOU -32(DI)(R12*1), X4
+ MOVOU -16(DI)(R12*1), X5
+ MOVOA X4, -32(CX)(R12*1)
+ MOVOA X5, -16(CX)(R12*1)
+ ADDQ $0x20, R12
+ CMPQ R9, R12
JAE emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ DI, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
+ MOVQ R8, CX
emit_literal_done_match_emit_encodeBlockAsm10B:
match_nolit_loop_encodeBlockAsm10B:
- MOVL CX, SI
- SUBL BX, SI
- MOVL SI, 16(SP)
- ADDL $0x04, CX
- ADDL $0x04, BX
- MOVQ src_len+32(FP), SI
- SUBL CX, SI
- LEAQ (DX)(CX*1), DI
- LEAQ (DX)(BX*1), BX
+ MOVL DX, DI
+ SUBL SI, DI
+ MOVL DI, 16(SP)
+ ADDL $0x04, DX
+ ADDL $0x04, SI
+ MOVQ src_len+32(FP), DI
+ SUBL DX, DI
+ LEAQ (BX)(DX*1), R8
+ LEAQ (BX)(SI*1), SI
// matchLen
- XORL R9, R9
+ XORL R10, R10
matchlen_loopback_16_match_nolit_encodeBlockAsm10B:
- CMPL SI, $0x10
+ CMPL DI, $0x10
JB matchlen_match8_match_nolit_encodeBlockAsm10B
- MOVQ (DI)(R9*1), R8
- MOVQ 8(DI)(R9*1), R10
- XORQ (BX)(R9*1), R8
+ MOVQ (R8)(R10*1), R9
+ MOVQ 8(R8)(R10*1), R11
+ XORQ (SI)(R10*1), R9
JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm10B
- XORQ 8(BX)(R9*1), R10
+ XORQ 8(SI)(R10*1), R11
JNZ matchlen_bsf_16match_nolit_encodeBlockAsm10B
- LEAL -16(SI), SI
- LEAL 16(R9), R9
+ LEAL -16(DI), DI
+ LEAL 16(R10), R10
JMP matchlen_loopback_16_match_nolit_encodeBlockAsm10B
matchlen_bsf_16match_nolit_encodeBlockAsm10B:
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R11, R11
#else
- BSFQ R10, R10
+ BSFQ R11, R11
#endif
- SARQ $0x03, R10
- LEAL 8(R9)(R10*1), R9
+ SARQ $0x03, R11
+ LEAL 8(R10)(R11*1), R10
JMP match_nolit_end_encodeBlockAsm10B
matchlen_match8_match_nolit_encodeBlockAsm10B:
- CMPL SI, $0x08
+ CMPL DI, $0x08
JB matchlen_match4_match_nolit_encodeBlockAsm10B
- MOVQ (DI)(R9*1), R8
- XORQ (BX)(R9*1), R8
+ MOVQ (R8)(R10*1), R9
+ XORQ (SI)(R10*1), R9
JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm10B
- LEAL -8(SI), SI
- LEAL 8(R9), R9
+ LEAL -8(DI), DI
+ LEAL 8(R10), R10
JMP matchlen_match4_match_nolit_encodeBlockAsm10B
matchlen_bsf_8_match_nolit_encodeBlockAsm10B:
#ifdef GOAMD64_v3
- TZCNTQ R8, R8
+ TZCNTQ R9, R9
#else
- BSFQ R8, R8
+ BSFQ R9, R9
#endif
- SARQ $0x03, R8
- LEAL (R9)(R8*1), R9
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
JMP match_nolit_end_encodeBlockAsm10B
matchlen_match4_match_nolit_encodeBlockAsm10B:
- CMPL SI, $0x04
+ CMPL DI, $0x04
JB matchlen_match2_match_nolit_encodeBlockAsm10B
- MOVL (DI)(R9*1), R8
- CMPL (BX)(R9*1), R8
+ MOVL (R8)(R10*1), R9
+ CMPL (SI)(R10*1), R9
JNE matchlen_match2_match_nolit_encodeBlockAsm10B
- LEAL -4(SI), SI
- LEAL 4(R9), R9
+ LEAL -4(DI), DI
+ LEAL 4(R10), R10
matchlen_match2_match_nolit_encodeBlockAsm10B:
- CMPL SI, $0x01
+ CMPL DI, $0x01
JE matchlen_match1_match_nolit_encodeBlockAsm10B
JB match_nolit_end_encodeBlockAsm10B
- MOVW (DI)(R9*1), R8
- CMPW (BX)(R9*1), R8
+ MOVW (R8)(R10*1), R9
+ CMPW (SI)(R10*1), R9
JNE matchlen_match1_match_nolit_encodeBlockAsm10B
- LEAL 2(R9), R9
- SUBL $0x02, SI
+ LEAL 2(R10), R10
+ SUBL $0x02, DI
JZ match_nolit_end_encodeBlockAsm10B
matchlen_match1_match_nolit_encodeBlockAsm10B:
- MOVB (DI)(R9*1), R8
- CMPB (BX)(R9*1), R8
+ MOVB (R8)(R10*1), R9
+ CMPB (SI)(R10*1), R9
JNE match_nolit_end_encodeBlockAsm10B
- LEAL 1(R9), R9
+ LEAL 1(R10), R10
match_nolit_end_encodeBlockAsm10B:
- ADDL R9, CX
- MOVL 16(SP), BX
- ADDL $0x04, R9
- MOVL CX, 12(SP)
+ ADDL R10, DX
+ MOVL 16(SP), SI
+ ADDL $0x04, R10
+ MOVL DX, 12(SP)
// emitCopy
- CMPL R9, $0x40
+ CMPL R10, $0x40
JBE two_byte_offset_short_match_nolit_encodeBlockAsm10B
- CMPL BX, $0x00000800
+ CMPL SI, $0x00000800
JAE long_offset_short_match_nolit_encodeBlockAsm10B
- MOVL $0x00000001, SI
- LEAL 16(SI), SI
- MOVB BL, 1(AX)
- SHRL $0x08, BX
- SHLL $0x05, BX
- ORL BX, SI
- MOVB SI, (AX)
- ADDQ $0x02, AX
- SUBL $0x08, R9
+ MOVL $0x00000001, DI
+ LEAL 16(DI), DI
+ MOVB SI, 1(CX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (CX)
+ ADDQ $0x02, CX
+ SUBL $0x08, R10
// emitRepeat
- LEAL -4(R9), R9
+ LEAL -4(R10), R10
JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b
- MOVL R9, SI
- LEAL -4(R9), R9
- CMPL SI, $0x08
+ MOVL R10, DI
+ LEAL -4(R10), R10
+ CMPL DI, $0x08
JBE repeat_two_match_nolit_encodeBlockAsm10B_emit_copy_short_2b
- CMPL SI, $0x0c
+ CMPL DI, $0x0c
JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b
- CMPL BX, $0x00000800
+ CMPL SI, $0x00000800
JB repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b
cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b:
- CMPL R9, $0x00000104
+ CMPL R10, $0x00000104
JB repeat_three_match_nolit_encodeBlockAsm10B_emit_copy_short_2b
- LEAL -256(R9), R9
- MOVW $0x0019, (AX)
- MOVW R9, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(R10), R10
+ MOVW $0x0019, (CX)
+ MOVW R10, 2(CX)
+ ADDQ $0x04, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm10B
repeat_three_match_nolit_encodeBlockAsm10B_emit_copy_short_2b:
- LEAL -4(R9), R9
- MOVW $0x0015, (AX)
- MOVB R9, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(R10), R10
+ MOVW $0x0015, (CX)
+ MOVB R10, 2(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm10B
repeat_two_match_nolit_encodeBlockAsm10B_emit_copy_short_2b:
- SHLL $0x02, R9
- ORL $0x01, R9
- MOVW R9, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, R10
+ ORL $0x01, R10
+ MOVW R10, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm10B
repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b:
- XORQ SI, SI
- LEAL 1(SI)(R9*4), R9
- MOVB BL, 1(AX)
- SARL $0x08, BX
- SHLL $0x05, BX
- ORL BX, R9
- MOVB R9, (AX)
- ADDQ $0x02, AX
+ XORQ DI, DI
+ LEAL 1(DI)(R10*4), R10
+ MOVB SI, 1(CX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, R10
+ MOVB R10, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm10B
long_offset_short_match_nolit_encodeBlockAsm10B:
- MOVB $0xee, (AX)
- MOVW BX, 1(AX)
- LEAL -60(R9), R9
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW SI, 1(CX)
+ LEAL -60(R10), R10
+ ADDQ $0x03, CX
// emitRepeat
- MOVL R9, SI
- LEAL -4(R9), R9
- CMPL SI, $0x08
+ MOVL R10, DI
+ LEAL -4(R10), R10
+ CMPL DI, $0x08
JBE repeat_two_match_nolit_encodeBlockAsm10B_emit_copy_short
- CMPL SI, $0x0c
+ CMPL DI, $0x0c
JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short
- CMPL BX, $0x00000800
+ CMPL SI, $0x00000800
JB repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short
cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short:
- CMPL R9, $0x00000104
+ CMPL R10, $0x00000104
JB repeat_three_match_nolit_encodeBlockAsm10B_emit_copy_short
- LEAL -256(R9), R9
- MOVW $0x0019, (AX)
- MOVW R9, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(R10), R10
+ MOVW $0x0019, (CX)
+ MOVW R10, 2(CX)
+ ADDQ $0x04, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm10B
repeat_three_match_nolit_encodeBlockAsm10B_emit_copy_short:
- LEAL -4(R9), R9
- MOVW $0x0015, (AX)
- MOVB R9, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(R10), R10
+ MOVW $0x0015, (CX)
+ MOVB R10, 2(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm10B
repeat_two_match_nolit_encodeBlockAsm10B_emit_copy_short:
- SHLL $0x02, R9
- ORL $0x01, R9
- MOVW R9, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, R10
+ ORL $0x01, R10
+ MOVW R10, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm10B
repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short:
- XORQ SI, SI
- LEAL 1(SI)(R9*4), R9
- MOVB BL, 1(AX)
- SARL $0x08, BX
- SHLL $0x05, BX
- ORL BX, R9
- MOVB R9, (AX)
- ADDQ $0x02, AX
+ XORQ DI, DI
+ LEAL 1(DI)(R10*4), R10
+ MOVB SI, 1(CX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, R10
+ MOVB R10, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm10B
two_byte_offset_short_match_nolit_encodeBlockAsm10B:
- MOVL R9, SI
- SHLL $0x02, SI
- CMPL R9, $0x0c
+ MOVL R10, DI
+ SHLL $0x02, DI
+ CMPL R10, $0x0c
JAE emit_copy_three_match_nolit_encodeBlockAsm10B
- CMPL BX, $0x00000800
+ CMPL SI, $0x00000800
JAE emit_copy_three_match_nolit_encodeBlockAsm10B
- LEAL -15(SI), SI
- MOVB BL, 1(AX)
- SHRL $0x08, BX
- SHLL $0x05, BX
- ORL BX, SI
- MOVB SI, (AX)
- ADDQ $0x02, AX
+ LEAL -15(DI), DI
+ MOVB SI, 1(CX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm10B
emit_copy_three_match_nolit_encodeBlockAsm10B:
- LEAL -2(SI), SI
- MOVB SI, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(DI), DI
+ MOVB DI, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
match_nolit_emitcopy_end_encodeBlockAsm10B:
- CMPL CX, 8(SP)
+ CMPL DX, 8(SP)
JAE emit_remainder_encodeBlockAsm10B
- MOVQ -2(DX)(CX*1), SI
- CMPQ AX, (SP)
+ MOVQ -2(BX)(DX*1), DI
+ CMPQ CX, (SP)
JB match_nolit_dst_ok_encodeBlockAsm10B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_nolit_dst_ok_encodeBlockAsm10B:
- MOVQ $0x9e3779b1, R8
- MOVQ SI, DI
- SHRQ $0x10, SI
- MOVQ SI, BX
- SHLQ $0x20, DI
- IMULQ R8, DI
- SHRQ $0x36, DI
- SHLQ $0x20, BX
- IMULQ R8, BX
- SHRQ $0x36, BX
- LEAL -2(CX), R8
- LEAQ 24(SP)(BX*4), R9
- MOVL (R9), BX
- MOVL R8, 24(SP)(DI*4)
- MOVL CX, (R9)
- CMPL (DX)(BX*1), SI
+ MOVQ $0x9e3779b1, R9
+ MOVQ DI, R8
+ SHRQ $0x10, DI
+ MOVQ DI, SI
+ SHLQ $0x20, R8
+ IMULQ R9, R8
+ SHRQ $0x36, R8
+ SHLQ $0x20, SI
+ IMULQ R9, SI
+ SHRQ $0x36, SI
+ LEAL -2(DX), R9
+ LEAQ (AX)(SI*4), R10
+ MOVL (R10), SI
+ MOVL R9, (AX)(R8*4)
+ MOVL DX, (R10)
+ CMPL (BX)(SI*1), DI
JEQ match_nolit_loop_encodeBlockAsm10B
- INCL CX
+ INCL DX
JMP search_loop_encodeBlockAsm10B
emit_remainder_encodeBlockAsm10B:
- MOVQ src_len+32(FP), CX
- SUBL 12(SP), CX
- LEAQ 3(AX)(CX*1), CX
- CMPQ CX, (SP)
+ MOVQ src_len+32(FP), AX
+ SUBL 12(SP), AX
+ LEAQ 3(CX)(AX*1), AX
+ CMPQ AX, (SP)
JB emit_remainder_ok_encodeBlockAsm10B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
emit_remainder_ok_encodeBlockAsm10B:
- MOVQ src_len+32(FP), CX
- MOVL 12(SP), BX
- CMPL BX, CX
+ MOVQ src_len+32(FP), AX
+ MOVL 12(SP), DX
+ CMPL DX, AX
JEQ emit_literal_done_emit_remainder_encodeBlockAsm10B
- MOVL CX, SI
- MOVL CX, 12(SP)
- LEAQ (DX)(BX*1), CX
- SUBL BX, SI
+ MOVL AX, SI
+ MOVL AX, 12(SP)
+ LEAQ (BX)(DX*1), AX
+ SUBL DX, SI
LEAL -1(SI), DX
CMPL DX, $0x3c
JB one_byte_emit_remainder_encodeBlockAsm10B
@@ -4793,26 +4797,26 @@ emit_remainder_ok_encodeBlockAsm10B:
JB three_bytes_emit_remainder_encodeBlockAsm10B
three_bytes_emit_remainder_encodeBlockAsm10B:
- MOVB $0xf4, (AX)
- MOVW DX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW DX, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_emit_remainder_encodeBlockAsm10B
two_bytes_emit_remainder_encodeBlockAsm10B:
- MOVB $0xf0, (AX)
- MOVB DL, 1(AX)
- ADDQ $0x02, AX
+ MOVB $0xf0, (CX)
+ MOVB DL, 1(CX)
+ ADDQ $0x02, CX
CMPL DX, $0x40
JB memmove_emit_remainder_encodeBlockAsm10B
JMP memmove_long_emit_remainder_encodeBlockAsm10B
one_byte_emit_remainder_encodeBlockAsm10B:
SHLB $0x02, DL
- MOVB DL, (AX)
- ADDQ $0x01, AX
+ MOVB DL, (CX)
+ ADDQ $0x01, CX
memmove_emit_remainder_encodeBlockAsm10B:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveShort
@@ -4828,73 +4832,73 @@ memmove_emit_remainder_encodeBlockAsm10B:
JMP emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_33through64
emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_1or2:
- MOVB (CX), SI
- MOVB -1(CX)(BX*1), CL
- MOVB SI, (AX)
- MOVB CL, -1(AX)(BX*1)
+ MOVB (AX), SI
+ MOVB -1(AX)(BX*1), AL
+ MOVB SI, (CX)
+ MOVB AL, -1(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B
emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_3:
- MOVW (CX), SI
- MOVB 2(CX), CL
- MOVW SI, (AX)
- MOVB CL, 2(AX)
+ MOVW (AX), SI
+ MOVB 2(AX), AL
+ MOVW SI, (CX)
+ MOVB AL, 2(CX)
JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B
emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_4through7:
- MOVL (CX), SI
- MOVL -4(CX)(BX*1), CX
- MOVL SI, (AX)
- MOVL CX, -4(AX)(BX*1)
+ MOVL (AX), SI
+ MOVL -4(AX)(BX*1), AX
+ MOVL SI, (CX)
+ MOVL AX, -4(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B
emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_8through16:
- MOVQ (CX), SI
- MOVQ -8(CX)(BX*1), CX
- MOVQ SI, (AX)
- MOVQ CX, -8(AX)(BX*1)
+ MOVQ (AX), SI
+ MOVQ -8(AX)(BX*1), AX
+ MOVQ SI, (CX)
+ MOVQ AX, -8(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B
emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_17through32:
- MOVOU (CX), X0
- MOVOU -16(CX)(BX*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU -16(AX)(BX*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B
emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_33through64:
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
memmove_end_copy_emit_remainder_encodeBlockAsm10B:
- MOVQ DX, AX
+ MOVQ DX, CX
JMP emit_literal_done_emit_remainder_encodeBlockAsm10B
memmove_long_emit_remainder_encodeBlockAsm10B:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveLong
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
MOVQ BX, DI
SHRQ $0x05, DI
- MOVQ AX, SI
+ MOVQ CX, SI
ANDL $0x0000001f, SI
MOVQ $0x00000040, R8
SUBQ SI, R8
DECQ DI
JA emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_forward_sse_loop_32
- LEAQ -32(CX)(R8*1), SI
- LEAQ -32(AX)(R8*1), R9
+ LEAQ -32(AX)(R8*1), SI
+ LEAQ -32(CX)(R8*1), R9
emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_big_loop_back:
MOVOU (SI), X4
@@ -4908,943 +4912,944 @@ emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_big_loop_back:
JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_big_loop_back
emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_forward_sse_loop_32:
- MOVOU -32(CX)(R8*1), X4
- MOVOU -16(CX)(R8*1), X5
- MOVOA X4, -32(AX)(R8*1)
- MOVOA X5, -16(AX)(R8*1)
+ MOVOU -32(AX)(R8*1), X4
+ MOVOU -16(AX)(R8*1), X5
+ MOVOA X4, -32(CX)(R8*1)
+ MOVOA X5, -16(CX)(R8*1)
ADDQ $0x20, R8
CMPQ BX, R8
JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
- MOVQ DX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
+ MOVQ DX, CX
emit_literal_done_emit_remainder_encodeBlockAsm10B:
- MOVQ dst_base+0(FP), CX
- SUBQ CX, AX
- MOVQ AX, ret+48(FP)
+ MOVQ dst_base+0(FP), AX
+ SUBQ AX, CX
+ MOVQ CX, ret+56(FP)
RET
-// func encodeBlockAsm8B(dst []byte, src []byte) int
+// func encodeBlockAsm8B(dst []byte, src []byte, tmp *[1024]byte) int
// Requires: BMI, SSE2
-TEXT ·encodeBlockAsm8B(SB), $1048-56
- MOVQ dst_base+0(FP), AX
- MOVQ $0x00000008, CX
- LEAQ 24(SP), DX
+TEXT ·encodeBlockAsm8B(SB), $24-64
+ MOVQ tmp+48(FP), AX
+ MOVQ dst_base+0(FP), CX
+ MOVQ $0x00000008, DX
+ MOVQ AX, BX
PXOR X0, X0
zero_loop_encodeBlockAsm8B:
- MOVOU X0, (DX)
- MOVOU X0, 16(DX)
- MOVOU X0, 32(DX)
- MOVOU X0, 48(DX)
- MOVOU X0, 64(DX)
- MOVOU X0, 80(DX)
- MOVOU X0, 96(DX)
- MOVOU X0, 112(DX)
- ADDQ $0x80, DX
- DECQ CX
+ MOVOU X0, (BX)
+ MOVOU X0, 16(BX)
+ MOVOU X0, 32(BX)
+ MOVOU X0, 48(BX)
+ MOVOU X0, 64(BX)
+ MOVOU X0, 80(BX)
+ MOVOU X0, 96(BX)
+ MOVOU X0, 112(BX)
+ ADDQ $0x80, BX
+ DECQ DX
JNZ zero_loop_encodeBlockAsm8B
MOVL $0x00000000, 12(SP)
- MOVQ src_len+32(FP), CX
- LEAQ -9(CX), DX
- LEAQ -8(CX), BX
- MOVL BX, 8(SP)
- SHRQ $0x05, CX
- SUBL CX, DX
- LEAQ (AX)(DX*1), DX
- MOVQ DX, (SP)
- MOVL $0x00000001, CX
- MOVL CX, 16(SP)
- MOVQ src_base+24(FP), DX
+ MOVQ src_len+32(FP), DX
+ LEAQ -9(DX), BX
+ LEAQ -8(DX), SI
+ MOVL SI, 8(SP)
+ SHRQ $0x05, DX
+ SUBL DX, BX
+ LEAQ (CX)(BX*1), BX
+ MOVQ BX, (SP)
+ MOVL $0x00000001, DX
+ MOVL DX, 16(SP)
+ MOVQ src_base+24(FP), BX
search_loop_encodeBlockAsm8B:
- MOVL CX, BX
- SUBL 12(SP), BX
- SHRL $0x04, BX
- LEAL 4(CX)(BX*1), BX
- CMPL BX, 8(SP)
+ MOVL DX, SI
+ SUBL 12(SP), SI
+ SHRL $0x04, SI
+ LEAL 4(DX)(SI*1), SI
+ CMPL SI, 8(SP)
JAE emit_remainder_encodeBlockAsm8B
- MOVQ (DX)(CX*1), SI
- MOVL BX, 20(SP)
- MOVQ $0x9e3779b1, R8
- MOVQ SI, R9
- MOVQ SI, R10
- SHRQ $0x08, R10
- SHLQ $0x20, R9
- IMULQ R8, R9
- SHRQ $0x38, R9
+ MOVQ (BX)(DX*1), DI
+ MOVL SI, 20(SP)
+ MOVQ $0x9e3779b1, R9
+ MOVQ DI, R10
+ MOVQ DI, R11
+ SHRQ $0x08, R11
SHLQ $0x20, R10
- IMULQ R8, R10
+ IMULQ R9, R10
SHRQ $0x38, R10
- MOVL 24(SP)(R9*4), BX
- MOVL 24(SP)(R10*4), DI
- MOVL CX, 24(SP)(R9*4)
- LEAL 1(CX), R9
- MOVL R9, 24(SP)(R10*4)
- MOVQ SI, R9
- SHRQ $0x10, R9
- SHLQ $0x20, R9
- IMULQ R8, R9
- SHRQ $0x38, R9
- MOVL CX, R8
- SUBL 16(SP), R8
- MOVL 1(DX)(R8*1), R10
- MOVQ SI, R8
- SHRQ $0x08, R8
- CMPL R8, R10
- JNE no_repeat_found_encodeBlockAsm8B
- LEAL 1(CX), SI
- MOVL 12(SP), DI
- MOVL SI, BX
- SUBL 16(SP), BX
+ SHLQ $0x20, R11
+ IMULQ R9, R11
+ SHRQ $0x38, R11
+ MOVL (AX)(R10*4), SI
+ MOVL (AX)(R11*4), R8
+ MOVL DX, (AX)(R10*4)
+ LEAL 1(DX), R10
+ MOVL R10, (AX)(R11*4)
+ MOVQ DI, R10
+ SHRQ $0x10, R10
+ SHLQ $0x20, R10
+ IMULQ R9, R10
+ SHRQ $0x38, R10
+ MOVL DX, R9
+ SUBL 16(SP), R9
+ MOVL 1(BX)(R9*1), R11
+ MOVQ DI, R9
+ SHRQ $0x08, R9
+ CMPL R9, R11
+ JNE no_repeat_found_encodeBlockAsm8B
+ LEAL 1(DX), DI
+ MOVL 12(SP), R8
+ MOVL DI, SI
+ SUBL 16(SP), SI
JZ repeat_extend_back_end_encodeBlockAsm8B
repeat_extend_back_loop_encodeBlockAsm8B:
- CMPL SI, DI
+ CMPL DI, R8
JBE repeat_extend_back_end_encodeBlockAsm8B
- MOVB -1(DX)(BX*1), R8
- MOVB -1(DX)(SI*1), R9
- CMPB R8, R9
+ MOVB -1(BX)(SI*1), R9
+ MOVB -1(BX)(DI*1), R10
+ CMPB R9, R10
JNE repeat_extend_back_end_encodeBlockAsm8B
- LEAL -1(SI), SI
- DECL BX
+ LEAL -1(DI), DI
+ DECL SI
JNZ repeat_extend_back_loop_encodeBlockAsm8B
repeat_extend_back_end_encodeBlockAsm8B:
- MOVL SI, BX
- SUBL 12(SP), BX
- LEAQ 3(AX)(BX*1), BX
- CMPQ BX, (SP)
+ MOVL DI, SI
+ SUBL 12(SP), SI
+ LEAQ 3(CX)(SI*1), SI
+ CMPQ SI, (SP)
JB repeat_dst_size_check_encodeBlockAsm8B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
repeat_dst_size_check_encodeBlockAsm8B:
- MOVL 12(SP), BX
- CMPL BX, SI
+ MOVL 12(SP), SI
+ CMPL SI, DI
JEQ emit_literal_done_repeat_emit_encodeBlockAsm8B
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(BX*1), R9
- SUBL BX, R8
- LEAL -1(R8), BX
- CMPL BX, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(SI*1), R10
+ SUBL SI, R9
+ LEAL -1(R9), SI
+ CMPL SI, $0x3c
JB one_byte_repeat_emit_encodeBlockAsm8B
- CMPL BX, $0x00000100
+ CMPL SI, $0x00000100
JB two_bytes_repeat_emit_encodeBlockAsm8B
JB three_bytes_repeat_emit_encodeBlockAsm8B
three_bytes_repeat_emit_encodeBlockAsm8B:
- MOVB $0xf4, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_repeat_emit_encodeBlockAsm8B
two_bytes_repeat_emit_encodeBlockAsm8B:
- MOVB $0xf0, (AX)
- MOVB BL, 1(AX)
- ADDQ $0x02, AX
- CMPL BX, $0x40
+ MOVB $0xf0, (CX)
+ MOVB SI, 1(CX)
+ ADDQ $0x02, CX
+ CMPL SI, $0x40
JB memmove_repeat_emit_encodeBlockAsm8B
JMP memmove_long_repeat_emit_encodeBlockAsm8B
one_byte_repeat_emit_encodeBlockAsm8B:
- SHLB $0x02, BL
- MOVB BL, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, SI
+ MOVB SI, (CX)
+ ADDQ $0x01, CX
memmove_repeat_emit_encodeBlockAsm8B:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveShort
- CMPQ R8, $0x08
+ CMPQ R9, $0x08
JBE emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_8
- CMPQ R8, $0x10
+ CMPQ R9, $0x10
JBE emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ R9, $0x20
JBE emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_17through32
JMP emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_33through64
emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_8:
- MOVQ (R9), R10
- MOVQ R10, (AX)
+ MOVQ (R10), R11
+ MOVQ R11, (CX)
JMP memmove_end_copy_repeat_emit_encodeBlockAsm8B
emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_8through16:
- MOVQ (R9), R10
- MOVQ -8(R9)(R8*1), R9
- MOVQ R10, (AX)
- MOVQ R9, -8(AX)(R8*1)
+ MOVQ (R10), R11
+ MOVQ -8(R10)(R9*1), R10
+ MOVQ R11, (CX)
+ MOVQ R10, -8(CX)(R9*1)
JMP memmove_end_copy_repeat_emit_encodeBlockAsm8B
emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_17through32:
- MOVOU (R9), X0
- MOVOU -16(R9)(R8*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU -16(R10)(R9*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R9*1)
JMP memmove_end_copy_repeat_emit_encodeBlockAsm8B
emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_33through64:
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
memmove_end_copy_repeat_emit_encodeBlockAsm8B:
- MOVQ BX, AX
+ MOVQ SI, CX
JMP emit_literal_done_repeat_emit_encodeBlockAsm8B
memmove_long_repeat_emit_encodeBlockAsm8B:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveLong
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVQ R8, R11
- SHRQ $0x05, R11
- MOVQ AX, R10
- ANDL $0x0000001f, R10
- MOVQ $0x00000040, R12
- SUBQ R10, R12
- DECQ R11
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVQ R9, R12
+ SHRQ $0x05, R12
+ MOVQ CX, R11
+ ANDL $0x0000001f, R11
+ MOVQ $0x00000040, R13
+ SUBQ R11, R13
+ DECQ R12
JA emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_forward_sse_loop_32
- LEAQ -32(R9)(R12*1), R10
- LEAQ -32(AX)(R12*1), R13
+ LEAQ -32(R10)(R13*1), R11
+ LEAQ -32(CX)(R13*1), R14
emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_big_loop_back:
- MOVOU (R10), X4
- MOVOU 16(R10), X5
- MOVOA X4, (R13)
- MOVOA X5, 16(R13)
+ MOVOU (R11), X4
+ MOVOU 16(R11), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
+ ADDQ $0x20, R14
+ ADDQ $0x20, R11
ADDQ $0x20, R13
- ADDQ $0x20, R10
- ADDQ $0x20, R12
- DECQ R11
+ DECQ R12
JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_big_loop_back
emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_forward_sse_loop_32:
- MOVOU -32(R9)(R12*1), X4
- MOVOU -16(R9)(R12*1), X5
- MOVOA X4, -32(AX)(R12*1)
- MOVOA X5, -16(AX)(R12*1)
- ADDQ $0x20, R12
- CMPQ R8, R12
+ MOVOU -32(R10)(R13*1), X4
+ MOVOU -16(R10)(R13*1), X5
+ MOVOA X4, -32(CX)(R13*1)
+ MOVOA X5, -16(CX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R9, R13
JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ BX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
+ MOVQ SI, CX
emit_literal_done_repeat_emit_encodeBlockAsm8B:
- ADDL $0x05, CX
- MOVL CX, BX
- SUBL 16(SP), BX
- MOVQ src_len+32(FP), R8
- SUBL CX, R8
- LEAQ (DX)(CX*1), R9
- LEAQ (DX)(BX*1), BX
+ ADDL $0x05, DX
+ MOVL DX, SI
+ SUBL 16(SP), SI
+ MOVQ src_len+32(FP), R9
+ SUBL DX, R9
+ LEAQ (BX)(DX*1), R10
+ LEAQ (BX)(SI*1), SI
// matchLen
- XORL R11, R11
+ XORL R12, R12
matchlen_loopback_16_repeat_extend_encodeBlockAsm8B:
- CMPL R8, $0x10
+ CMPL R9, $0x10
JB matchlen_match8_repeat_extend_encodeBlockAsm8B
- MOVQ (R9)(R11*1), R10
- MOVQ 8(R9)(R11*1), R12
- XORQ (BX)(R11*1), R10
+ MOVQ (R10)(R12*1), R11
+ MOVQ 8(R10)(R12*1), R13
+ XORQ (SI)(R12*1), R11
JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm8B
- XORQ 8(BX)(R11*1), R12
+ XORQ 8(SI)(R12*1), R13
JNZ matchlen_bsf_16repeat_extend_encodeBlockAsm8B
- LEAL -16(R8), R8
- LEAL 16(R11), R11
+ LEAL -16(R9), R9
+ LEAL 16(R12), R12
JMP matchlen_loopback_16_repeat_extend_encodeBlockAsm8B
matchlen_bsf_16repeat_extend_encodeBlockAsm8B:
#ifdef GOAMD64_v3
- TZCNTQ R12, R12
+ TZCNTQ R13, R13
#else
- BSFQ R12, R12
+ BSFQ R13, R13
#endif
- SARQ $0x03, R12
- LEAL 8(R11)(R12*1), R11
+ SARQ $0x03, R13
+ LEAL 8(R12)(R13*1), R12
JMP repeat_extend_forward_end_encodeBlockAsm8B
matchlen_match8_repeat_extend_encodeBlockAsm8B:
- CMPL R8, $0x08
+ CMPL R9, $0x08
JB matchlen_match4_repeat_extend_encodeBlockAsm8B
- MOVQ (R9)(R11*1), R10
- XORQ (BX)(R11*1), R10
+ MOVQ (R10)(R12*1), R11
+ XORQ (SI)(R12*1), R11
JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm8B
- LEAL -8(R8), R8
- LEAL 8(R11), R11
+ LEAL -8(R9), R9
+ LEAL 8(R12), R12
JMP matchlen_match4_repeat_extend_encodeBlockAsm8B
matchlen_bsf_8_repeat_extend_encodeBlockAsm8B:
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R11, R11
#else
- BSFQ R10, R10
+ BSFQ R11, R11
#endif
- SARQ $0x03, R10
- LEAL (R11)(R10*1), R11
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
JMP repeat_extend_forward_end_encodeBlockAsm8B
matchlen_match4_repeat_extend_encodeBlockAsm8B:
- CMPL R8, $0x04
+ CMPL R9, $0x04
JB matchlen_match2_repeat_extend_encodeBlockAsm8B
- MOVL (R9)(R11*1), R10
- CMPL (BX)(R11*1), R10
+ MOVL (R10)(R12*1), R11
+ CMPL (SI)(R12*1), R11
JNE matchlen_match2_repeat_extend_encodeBlockAsm8B
- LEAL -4(R8), R8
- LEAL 4(R11), R11
+ LEAL -4(R9), R9
+ LEAL 4(R12), R12
matchlen_match2_repeat_extend_encodeBlockAsm8B:
- CMPL R8, $0x01
+ CMPL R9, $0x01
JE matchlen_match1_repeat_extend_encodeBlockAsm8B
JB repeat_extend_forward_end_encodeBlockAsm8B
- MOVW (R9)(R11*1), R10
- CMPW (BX)(R11*1), R10
+ MOVW (R10)(R12*1), R11
+ CMPW (SI)(R12*1), R11
JNE matchlen_match1_repeat_extend_encodeBlockAsm8B
- LEAL 2(R11), R11
- SUBL $0x02, R8
+ LEAL 2(R12), R12
+ SUBL $0x02, R9
JZ repeat_extend_forward_end_encodeBlockAsm8B
matchlen_match1_repeat_extend_encodeBlockAsm8B:
- MOVB (R9)(R11*1), R10
- CMPB (BX)(R11*1), R10
+ MOVB (R10)(R12*1), R11
+ CMPB (SI)(R12*1), R11
JNE repeat_extend_forward_end_encodeBlockAsm8B
- LEAL 1(R11), R11
+ LEAL 1(R12), R12
repeat_extend_forward_end_encodeBlockAsm8B:
- ADDL R11, CX
- MOVL CX, BX
- SUBL SI, BX
- MOVL 16(SP), SI
- TESTL DI, DI
+ ADDL R12, DX
+ MOVL DX, SI
+ SUBL DI, SI
+ MOVL 16(SP), DI
+ TESTL R8, R8
JZ repeat_as_copy_encodeBlockAsm8B
// emitRepeat
- MOVL BX, SI
- LEAL -4(BX), BX
- CMPL SI, $0x08
+ MOVL SI, DI
+ LEAL -4(SI), SI
+ CMPL DI, $0x08
JBE repeat_two_match_repeat_encodeBlockAsm8B
- CMPL SI, $0x0c
+ CMPL DI, $0x0c
JAE cant_repeat_two_offset_match_repeat_encodeBlockAsm8B
cant_repeat_two_offset_match_repeat_encodeBlockAsm8B:
- CMPL BX, $0x00000104
+ CMPL SI, $0x00000104
JB repeat_three_match_repeat_encodeBlockAsm8B
- LEAL -256(BX), BX
- MOVW $0x0019, (AX)
- MOVW BX, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(SI), SI
+ MOVW $0x0019, (CX)
+ MOVW SI, 2(CX)
+ ADDQ $0x04, CX
JMP repeat_end_emit_encodeBlockAsm8B
repeat_three_match_repeat_encodeBlockAsm8B:
- LEAL -4(BX), BX
- MOVW $0x0015, (AX)
- MOVB BL, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(SI), SI
+ MOVW $0x0015, (CX)
+ MOVB SI, 2(CX)
+ ADDQ $0x03, CX
JMP repeat_end_emit_encodeBlockAsm8B
repeat_two_match_repeat_encodeBlockAsm8B:
- SHLL $0x02, BX
- ORL $0x01, BX
- MOVW BX, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, SI
+ ORL $0x01, SI
+ MOVW SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm8B
- XORQ DI, DI
- LEAL 1(DI)(BX*4), BX
- MOVB SI, 1(AX)
- SARL $0x08, SI
- SHLL $0x05, SI
- ORL SI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
+ XORQ R8, R8
+ LEAL 1(R8)(SI*4), SI
+ MOVB DI, 1(CX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm8B
repeat_as_copy_encodeBlockAsm8B:
// emitCopy
- CMPL BX, $0x40
+ CMPL SI, $0x40
JBE two_byte_offset_short_repeat_as_copy_encodeBlockAsm8B
- CMPL SI, $0x00000800
+ CMPL DI, $0x00000800
JAE long_offset_short_repeat_as_copy_encodeBlockAsm8B
- MOVL $0x00000001, DI
- LEAL 16(DI), DI
- MOVB SI, 1(AX)
- SHRL $0x08, SI
- SHLL $0x05, SI
- ORL SI, DI
- MOVB DI, (AX)
- ADDQ $0x02, AX
- SUBL $0x08, BX
+ MOVL $0x00000001, R8
+ LEAL 16(R8), R8
+ MOVB DI, 1(CX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R8
+ MOVB R8, (CX)
+ ADDQ $0x02, CX
+ SUBL $0x08, SI
// emitRepeat
- LEAL -4(BX), BX
+ LEAL -4(SI), SI
JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b
- MOVL BX, SI
- LEAL -4(BX), BX
- CMPL SI, $0x08
+ MOVL SI, DI
+ LEAL -4(SI), SI
+ CMPL DI, $0x08
JBE repeat_two_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b
- CMPL SI, $0x0c
+ CMPL DI, $0x0c
JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b
cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b:
- CMPL BX, $0x00000104
+ CMPL SI, $0x00000104
JB repeat_three_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b
- LEAL -256(BX), BX
- MOVW $0x0019, (AX)
- MOVW BX, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(SI), SI
+ MOVW $0x0019, (CX)
+ MOVW SI, 2(CX)
+ ADDQ $0x04, CX
JMP repeat_end_emit_encodeBlockAsm8B
repeat_three_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b:
- LEAL -4(BX), BX
- MOVW $0x0015, (AX)
- MOVB BL, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(SI), SI
+ MOVW $0x0015, (CX)
+ MOVB SI, 2(CX)
+ ADDQ $0x03, CX
JMP repeat_end_emit_encodeBlockAsm8B
repeat_two_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b:
- SHLL $0x02, BX
- ORL $0x01, BX
- MOVW BX, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, SI
+ ORL $0x01, SI
+ MOVW SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm8B
- XORQ DI, DI
- LEAL 1(DI)(BX*4), BX
- MOVB SI, 1(AX)
- SARL $0x08, SI
- SHLL $0x05, SI
- ORL SI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
+ XORQ R8, R8
+ LEAL 1(R8)(SI*4), SI
+ MOVB DI, 1(CX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm8B
long_offset_short_repeat_as_copy_encodeBlockAsm8B:
- MOVB $0xee, (AX)
- MOVW SI, 1(AX)
- LEAL -60(BX), BX
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW DI, 1(CX)
+ LEAL -60(SI), SI
+ ADDQ $0x03, CX
// emitRepeat
- MOVL BX, SI
- LEAL -4(BX), BX
- CMPL SI, $0x08
+ MOVL SI, DI
+ LEAL -4(SI), SI
+ CMPL DI, $0x08
JBE repeat_two_repeat_as_copy_encodeBlockAsm8B_emit_copy_short
- CMPL SI, $0x0c
+ CMPL DI, $0x0c
JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short
cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short:
- CMPL BX, $0x00000104
+ CMPL SI, $0x00000104
JB repeat_three_repeat_as_copy_encodeBlockAsm8B_emit_copy_short
- LEAL -256(BX), BX
- MOVW $0x0019, (AX)
- MOVW BX, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(SI), SI
+ MOVW $0x0019, (CX)
+ MOVW SI, 2(CX)
+ ADDQ $0x04, CX
JMP repeat_end_emit_encodeBlockAsm8B
repeat_three_repeat_as_copy_encodeBlockAsm8B_emit_copy_short:
- LEAL -4(BX), BX
- MOVW $0x0015, (AX)
- MOVB BL, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(SI), SI
+ MOVW $0x0015, (CX)
+ MOVB SI, 2(CX)
+ ADDQ $0x03, CX
JMP repeat_end_emit_encodeBlockAsm8B
repeat_two_repeat_as_copy_encodeBlockAsm8B_emit_copy_short:
- SHLL $0x02, BX
- ORL $0x01, BX
- MOVW BX, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, SI
+ ORL $0x01, SI
+ MOVW SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm8B
- XORQ DI, DI
- LEAL 1(DI)(BX*4), BX
- MOVB SI, 1(AX)
- SARL $0x08, SI
- SHLL $0x05, SI
- ORL SI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
+ XORQ R8, R8
+ LEAL 1(R8)(SI*4), SI
+ MOVB DI, 1(CX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm8B
two_byte_offset_short_repeat_as_copy_encodeBlockAsm8B:
- MOVL BX, DI
- SHLL $0x02, DI
- CMPL BX, $0x0c
+ MOVL SI, R8
+ SHLL $0x02, R8
+ CMPL SI, $0x0c
JAE emit_copy_three_repeat_as_copy_encodeBlockAsm8B
- LEAL -15(DI), DI
- MOVB SI, 1(AX)
- SHRL $0x08, SI
- SHLL $0x05, SI
- ORL SI, DI
- MOVB DI, (AX)
- ADDQ $0x02, AX
+ LEAL -15(R8), R8
+ MOVB DI, 1(CX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R8
+ MOVB R8, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm8B
emit_copy_three_repeat_as_copy_encodeBlockAsm8B:
- LEAL -2(DI), DI
- MOVB DI, (AX)
- MOVW SI, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(R8), R8
+ MOVB R8, (CX)
+ MOVW DI, 1(CX)
+ ADDQ $0x03, CX
repeat_end_emit_encodeBlockAsm8B:
- MOVL CX, 12(SP)
+ MOVL DX, 12(SP)
JMP search_loop_encodeBlockAsm8B
no_repeat_found_encodeBlockAsm8B:
- CMPL (DX)(BX*1), SI
+ CMPL (BX)(SI*1), DI
JEQ candidate_match_encodeBlockAsm8B
- SHRQ $0x08, SI
- MOVL 24(SP)(R9*4), BX
- LEAL 2(CX), R8
- CMPL (DX)(DI*1), SI
+ SHRQ $0x08, DI
+ MOVL (AX)(R10*4), SI
+ LEAL 2(DX), R9
+ CMPL (BX)(R8*1), DI
JEQ candidate2_match_encodeBlockAsm8B
- MOVL R8, 24(SP)(R9*4)
- SHRQ $0x08, SI
- CMPL (DX)(BX*1), SI
+ MOVL R9, (AX)(R10*4)
+ SHRQ $0x08, DI
+ CMPL (BX)(SI*1), DI
JEQ candidate3_match_encodeBlockAsm8B
- MOVL 20(SP), CX
+ MOVL 20(SP), DX
JMP search_loop_encodeBlockAsm8B
candidate3_match_encodeBlockAsm8B:
- ADDL $0x02, CX
+ ADDL $0x02, DX
JMP candidate_match_encodeBlockAsm8B
candidate2_match_encodeBlockAsm8B:
- MOVL R8, 24(SP)(R9*4)
- INCL CX
- MOVL DI, BX
+ MOVL R9, (AX)(R10*4)
+ INCL DX
+ MOVL R8, SI
candidate_match_encodeBlockAsm8B:
- MOVL 12(SP), SI
- TESTL BX, BX
+ MOVL 12(SP), DI
+ TESTL SI, SI
JZ match_extend_back_end_encodeBlockAsm8B
match_extend_back_loop_encodeBlockAsm8B:
- CMPL CX, SI
+ CMPL DX, DI
JBE match_extend_back_end_encodeBlockAsm8B
- MOVB -1(DX)(BX*1), DI
- MOVB -1(DX)(CX*1), R8
- CMPB DI, R8
+ MOVB -1(BX)(SI*1), R8
+ MOVB -1(BX)(DX*1), R9
+ CMPB R8, R9
JNE match_extend_back_end_encodeBlockAsm8B
- LEAL -1(CX), CX
- DECL BX
+ LEAL -1(DX), DX
+ DECL SI
JZ match_extend_back_end_encodeBlockAsm8B
JMP match_extend_back_loop_encodeBlockAsm8B
match_extend_back_end_encodeBlockAsm8B:
- MOVL CX, SI
- SUBL 12(SP), SI
- LEAQ 3(AX)(SI*1), SI
- CMPQ SI, (SP)
+ MOVL DX, DI
+ SUBL 12(SP), DI
+ LEAQ 3(CX)(DI*1), DI
+ CMPQ DI, (SP)
JB match_dst_size_check_encodeBlockAsm8B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_dst_size_check_encodeBlockAsm8B:
- MOVL CX, SI
- MOVL 12(SP), DI
- CMPL DI, SI
+ MOVL DX, DI
+ MOVL 12(SP), R8
+ CMPL R8, DI
JEQ emit_literal_done_match_emit_encodeBlockAsm8B
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(DI*1), SI
- SUBL DI, R8
- LEAL -1(R8), DI
- CMPL DI, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(R8*1), DI
+ SUBL R8, R9
+ LEAL -1(R9), R8
+ CMPL R8, $0x3c
JB one_byte_match_emit_encodeBlockAsm8B
- CMPL DI, $0x00000100
+ CMPL R8, $0x00000100
JB two_bytes_match_emit_encodeBlockAsm8B
JB three_bytes_match_emit_encodeBlockAsm8B
three_bytes_match_emit_encodeBlockAsm8B:
- MOVB $0xf4, (AX)
- MOVW DI, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW R8, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_match_emit_encodeBlockAsm8B
two_bytes_match_emit_encodeBlockAsm8B:
- MOVB $0xf0, (AX)
- MOVB DI, 1(AX)
- ADDQ $0x02, AX
- CMPL DI, $0x40
+ MOVB $0xf0, (CX)
+ MOVB R8, 1(CX)
+ ADDQ $0x02, CX
+ CMPL R8, $0x40
JB memmove_match_emit_encodeBlockAsm8B
JMP memmove_long_match_emit_encodeBlockAsm8B
one_byte_match_emit_encodeBlockAsm8B:
- SHLB $0x02, DI
- MOVB DI, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, R8
+ MOVB R8, (CX)
+ ADDQ $0x01, CX
memmove_match_emit_encodeBlockAsm8B:
- LEAQ (AX)(R8*1), DI
+ LEAQ (CX)(R9*1), R8
// genMemMoveShort
- CMPQ R8, $0x08
+ CMPQ R9, $0x08
JBE emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_8
- CMPQ R8, $0x10
+ CMPQ R9, $0x10
JBE emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ R9, $0x20
JBE emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_33through64
emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_8:
- MOVQ (SI), R9
- MOVQ R9, (AX)
+ MOVQ (DI), R10
+ MOVQ R10, (CX)
JMP memmove_end_copy_match_emit_encodeBlockAsm8B
emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_8through16:
- MOVQ (SI), R9
- MOVQ -8(SI)(R8*1), SI
- MOVQ R9, (AX)
- MOVQ SI, -8(AX)(R8*1)
+ MOVQ (DI), R10
+ MOVQ -8(DI)(R9*1), DI
+ MOVQ R10, (CX)
+ MOVQ DI, -8(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeBlockAsm8B
emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_17through32:
- MOVOU (SI), X0
- MOVOU -16(SI)(R8*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU (DI), X0
+ MOVOU -16(DI)(R9*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeBlockAsm8B
emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_33through64:
- MOVOU (SI), X0
- MOVOU 16(SI), X1
- MOVOU -32(SI)(R8*1), X2
- MOVOU -16(SI)(R8*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU (DI), X0
+ MOVOU 16(DI), X1
+ MOVOU -32(DI)(R9*1), X2
+ MOVOU -16(DI)(R9*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
memmove_end_copy_match_emit_encodeBlockAsm8B:
- MOVQ DI, AX
+ MOVQ R8, CX
JMP emit_literal_done_match_emit_encodeBlockAsm8B
memmove_long_match_emit_encodeBlockAsm8B:
- LEAQ (AX)(R8*1), DI
+ LEAQ (CX)(R9*1), R8
// genMemMoveLong
- MOVOU (SI), X0
- MOVOU 16(SI), X1
- MOVOU -32(SI)(R8*1), X2
- MOVOU -16(SI)(R8*1), X3
- MOVQ R8, R10
- SHRQ $0x05, R10
- MOVQ AX, R9
- ANDL $0x0000001f, R9
- MOVQ $0x00000040, R11
- SUBQ R9, R11
- DECQ R10
+ MOVOU (DI), X0
+ MOVOU 16(DI), X1
+ MOVOU -32(DI)(R9*1), X2
+ MOVOU -16(DI)(R9*1), X3
+ MOVQ R9, R11
+ SHRQ $0x05, R11
+ MOVQ CX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R12
+ SUBQ R10, R12
+ DECQ R11
JA emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_forward_sse_loop_32
- LEAQ -32(SI)(R11*1), R9
- LEAQ -32(AX)(R11*1), R12
+ LEAQ -32(DI)(R12*1), R10
+ LEAQ -32(CX)(R12*1), R13
emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_big_loop_back:
- MOVOU (R9), X4
- MOVOU 16(R9), X5
- MOVOA X4, (R12)
- MOVOA X5, 16(R12)
- ADDQ $0x20, R12
- ADDQ $0x20, R9
- ADDQ $0x20, R11
- DECQ R10
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R13)
+ MOVOA X5, 16(R13)
+ ADDQ $0x20, R13
+ ADDQ $0x20, R10
+ ADDQ $0x20, R12
+ DECQ R11
JNA emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_big_loop_back
emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_forward_sse_loop_32:
- MOVOU -32(SI)(R11*1), X4
- MOVOU -16(SI)(R11*1), X5
- MOVOA X4, -32(AX)(R11*1)
- MOVOA X5, -16(AX)(R11*1)
- ADDQ $0x20, R11
- CMPQ R8, R11
+ MOVOU -32(DI)(R12*1), X4
+ MOVOU -16(DI)(R12*1), X5
+ MOVOA X4, -32(CX)(R12*1)
+ MOVOA X5, -16(CX)(R12*1)
+ ADDQ $0x20, R12
+ CMPQ R9, R12
JAE emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ DI, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
+ MOVQ R8, CX
emit_literal_done_match_emit_encodeBlockAsm8B:
match_nolit_loop_encodeBlockAsm8B:
- MOVL CX, SI
- SUBL BX, SI
- MOVL SI, 16(SP)
- ADDL $0x04, CX
- ADDL $0x04, BX
- MOVQ src_len+32(FP), SI
- SUBL CX, SI
- LEAQ (DX)(CX*1), DI
- LEAQ (DX)(BX*1), BX
+ MOVL DX, DI
+ SUBL SI, DI
+ MOVL DI, 16(SP)
+ ADDL $0x04, DX
+ ADDL $0x04, SI
+ MOVQ src_len+32(FP), DI
+ SUBL DX, DI
+ LEAQ (BX)(DX*1), R8
+ LEAQ (BX)(SI*1), SI
// matchLen
- XORL R9, R9
+ XORL R10, R10
matchlen_loopback_16_match_nolit_encodeBlockAsm8B:
- CMPL SI, $0x10
+ CMPL DI, $0x10
JB matchlen_match8_match_nolit_encodeBlockAsm8B
- MOVQ (DI)(R9*1), R8
- MOVQ 8(DI)(R9*1), R10
- XORQ (BX)(R9*1), R8
+ MOVQ (R8)(R10*1), R9
+ MOVQ 8(R8)(R10*1), R11
+ XORQ (SI)(R10*1), R9
JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm8B
- XORQ 8(BX)(R9*1), R10
+ XORQ 8(SI)(R10*1), R11
JNZ matchlen_bsf_16match_nolit_encodeBlockAsm8B
- LEAL -16(SI), SI
- LEAL 16(R9), R9
+ LEAL -16(DI), DI
+ LEAL 16(R10), R10
JMP matchlen_loopback_16_match_nolit_encodeBlockAsm8B
matchlen_bsf_16match_nolit_encodeBlockAsm8B:
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R11, R11
#else
- BSFQ R10, R10
+ BSFQ R11, R11
#endif
- SARQ $0x03, R10
- LEAL 8(R9)(R10*1), R9
+ SARQ $0x03, R11
+ LEAL 8(R10)(R11*1), R10
JMP match_nolit_end_encodeBlockAsm8B
matchlen_match8_match_nolit_encodeBlockAsm8B:
- CMPL SI, $0x08
+ CMPL DI, $0x08
JB matchlen_match4_match_nolit_encodeBlockAsm8B
- MOVQ (DI)(R9*1), R8
- XORQ (BX)(R9*1), R8
+ MOVQ (R8)(R10*1), R9
+ XORQ (SI)(R10*1), R9
JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm8B
- LEAL -8(SI), SI
- LEAL 8(R9), R9
+ LEAL -8(DI), DI
+ LEAL 8(R10), R10
JMP matchlen_match4_match_nolit_encodeBlockAsm8B
matchlen_bsf_8_match_nolit_encodeBlockAsm8B:
#ifdef GOAMD64_v3
- TZCNTQ R8, R8
+ TZCNTQ R9, R9
#else
- BSFQ R8, R8
+ BSFQ R9, R9
#endif
- SARQ $0x03, R8
- LEAL (R9)(R8*1), R9
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
JMP match_nolit_end_encodeBlockAsm8B
matchlen_match4_match_nolit_encodeBlockAsm8B:
- CMPL SI, $0x04
+ CMPL DI, $0x04
JB matchlen_match2_match_nolit_encodeBlockAsm8B
- MOVL (DI)(R9*1), R8
- CMPL (BX)(R9*1), R8
+ MOVL (R8)(R10*1), R9
+ CMPL (SI)(R10*1), R9
JNE matchlen_match2_match_nolit_encodeBlockAsm8B
- LEAL -4(SI), SI
- LEAL 4(R9), R9
+ LEAL -4(DI), DI
+ LEAL 4(R10), R10
matchlen_match2_match_nolit_encodeBlockAsm8B:
- CMPL SI, $0x01
+ CMPL DI, $0x01
JE matchlen_match1_match_nolit_encodeBlockAsm8B
JB match_nolit_end_encodeBlockAsm8B
- MOVW (DI)(R9*1), R8
- CMPW (BX)(R9*1), R8
+ MOVW (R8)(R10*1), R9
+ CMPW (SI)(R10*1), R9
JNE matchlen_match1_match_nolit_encodeBlockAsm8B
- LEAL 2(R9), R9
- SUBL $0x02, SI
+ LEAL 2(R10), R10
+ SUBL $0x02, DI
JZ match_nolit_end_encodeBlockAsm8B
matchlen_match1_match_nolit_encodeBlockAsm8B:
- MOVB (DI)(R9*1), R8
- CMPB (BX)(R9*1), R8
+ MOVB (R8)(R10*1), R9
+ CMPB (SI)(R10*1), R9
JNE match_nolit_end_encodeBlockAsm8B
- LEAL 1(R9), R9
+ LEAL 1(R10), R10
match_nolit_end_encodeBlockAsm8B:
- ADDL R9, CX
- MOVL 16(SP), BX
- ADDL $0x04, R9
- MOVL CX, 12(SP)
+ ADDL R10, DX
+ MOVL 16(SP), SI
+ ADDL $0x04, R10
+ MOVL DX, 12(SP)
// emitCopy
- CMPL R9, $0x40
+ CMPL R10, $0x40
JBE two_byte_offset_short_match_nolit_encodeBlockAsm8B
- CMPL BX, $0x00000800
+ CMPL SI, $0x00000800
JAE long_offset_short_match_nolit_encodeBlockAsm8B
- MOVL $0x00000001, SI
- LEAL 16(SI), SI
- MOVB BL, 1(AX)
- SHRL $0x08, BX
- SHLL $0x05, BX
- ORL BX, SI
- MOVB SI, (AX)
- ADDQ $0x02, AX
- SUBL $0x08, R9
+ MOVL $0x00000001, DI
+ LEAL 16(DI), DI
+ MOVB SI, 1(CX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (CX)
+ ADDQ $0x02, CX
+ SUBL $0x08, R10
// emitRepeat
- LEAL -4(R9), R9
+ LEAL -4(R10), R10
JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short_2b
- MOVL R9, BX
- LEAL -4(R9), R9
- CMPL BX, $0x08
+ MOVL R10, SI
+ LEAL -4(R10), R10
+ CMPL SI, $0x08
JBE repeat_two_match_nolit_encodeBlockAsm8B_emit_copy_short_2b
- CMPL BX, $0x0c
+ CMPL SI, $0x0c
JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short_2b
cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short_2b:
- CMPL R9, $0x00000104
+ CMPL R10, $0x00000104
JB repeat_three_match_nolit_encodeBlockAsm8B_emit_copy_short_2b
- LEAL -256(R9), R9
- MOVW $0x0019, (AX)
- MOVW R9, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(R10), R10
+ MOVW $0x0019, (CX)
+ MOVW R10, 2(CX)
+ ADDQ $0x04, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm8B
repeat_three_match_nolit_encodeBlockAsm8B_emit_copy_short_2b:
- LEAL -4(R9), R9
- MOVW $0x0015, (AX)
- MOVB R9, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(R10), R10
+ MOVW $0x0015, (CX)
+ MOVB R10, 2(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm8B
repeat_two_match_nolit_encodeBlockAsm8B_emit_copy_short_2b:
- SHLL $0x02, R9
- ORL $0x01, R9
- MOVW R9, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, R10
+ ORL $0x01, R10
+ MOVW R10, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm8B
- XORQ SI, SI
- LEAL 1(SI)(R9*4), R9
- MOVB BL, 1(AX)
- SARL $0x08, BX
- SHLL $0x05, BX
- ORL BX, R9
- MOVB R9, (AX)
- ADDQ $0x02, AX
+ XORQ DI, DI
+ LEAL 1(DI)(R10*4), R10
+ MOVB SI, 1(CX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, R10
+ MOVB R10, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm8B
long_offset_short_match_nolit_encodeBlockAsm8B:
- MOVB $0xee, (AX)
- MOVW BX, 1(AX)
- LEAL -60(R9), R9
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW SI, 1(CX)
+ LEAL -60(R10), R10
+ ADDQ $0x03, CX
// emitRepeat
- MOVL R9, BX
- LEAL -4(R9), R9
- CMPL BX, $0x08
+ MOVL R10, SI
+ LEAL -4(R10), R10
+ CMPL SI, $0x08
JBE repeat_two_match_nolit_encodeBlockAsm8B_emit_copy_short
- CMPL BX, $0x0c
+ CMPL SI, $0x0c
JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short
cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short:
- CMPL R9, $0x00000104
+ CMPL R10, $0x00000104
JB repeat_three_match_nolit_encodeBlockAsm8B_emit_copy_short
- LEAL -256(R9), R9
- MOVW $0x0019, (AX)
- MOVW R9, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(R10), R10
+ MOVW $0x0019, (CX)
+ MOVW R10, 2(CX)
+ ADDQ $0x04, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm8B
repeat_three_match_nolit_encodeBlockAsm8B_emit_copy_short:
- LEAL -4(R9), R9
- MOVW $0x0015, (AX)
- MOVB R9, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(R10), R10
+ MOVW $0x0015, (CX)
+ MOVB R10, 2(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm8B
repeat_two_match_nolit_encodeBlockAsm8B_emit_copy_short:
- SHLL $0x02, R9
- ORL $0x01, R9
- MOVW R9, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, R10
+ ORL $0x01, R10
+ MOVW R10, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm8B
- XORQ SI, SI
- LEAL 1(SI)(R9*4), R9
- MOVB BL, 1(AX)
- SARL $0x08, BX
- SHLL $0x05, BX
- ORL BX, R9
- MOVB R9, (AX)
- ADDQ $0x02, AX
+ XORQ DI, DI
+ LEAL 1(DI)(R10*4), R10
+ MOVB SI, 1(CX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, R10
+ MOVB R10, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm8B
two_byte_offset_short_match_nolit_encodeBlockAsm8B:
- MOVL R9, SI
- SHLL $0x02, SI
- CMPL R9, $0x0c
+ MOVL R10, DI
+ SHLL $0x02, DI
+ CMPL R10, $0x0c
JAE emit_copy_three_match_nolit_encodeBlockAsm8B
- LEAL -15(SI), SI
- MOVB BL, 1(AX)
- SHRL $0x08, BX
- SHLL $0x05, BX
- ORL BX, SI
- MOVB SI, (AX)
- ADDQ $0x02, AX
+ LEAL -15(DI), DI
+ MOVB SI, 1(CX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm8B
emit_copy_three_match_nolit_encodeBlockAsm8B:
- LEAL -2(SI), SI
- MOVB SI, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(DI), DI
+ MOVB DI, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
match_nolit_emitcopy_end_encodeBlockAsm8B:
- CMPL CX, 8(SP)
+ CMPL DX, 8(SP)
JAE emit_remainder_encodeBlockAsm8B
- MOVQ -2(DX)(CX*1), SI
- CMPQ AX, (SP)
+ MOVQ -2(BX)(DX*1), DI
+ CMPQ CX, (SP)
JB match_nolit_dst_ok_encodeBlockAsm8B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_nolit_dst_ok_encodeBlockAsm8B:
- MOVQ $0x9e3779b1, R8
- MOVQ SI, DI
- SHRQ $0x10, SI
- MOVQ SI, BX
- SHLQ $0x20, DI
- IMULQ R8, DI
- SHRQ $0x38, DI
- SHLQ $0x20, BX
- IMULQ R8, BX
- SHRQ $0x38, BX
- LEAL -2(CX), R8
- LEAQ 24(SP)(BX*4), R9
- MOVL (R9), BX
- MOVL R8, 24(SP)(DI*4)
- MOVL CX, (R9)
- CMPL (DX)(BX*1), SI
+ MOVQ $0x9e3779b1, R9
+ MOVQ DI, R8
+ SHRQ $0x10, DI
+ MOVQ DI, SI
+ SHLQ $0x20, R8
+ IMULQ R9, R8
+ SHRQ $0x38, R8
+ SHLQ $0x20, SI
+ IMULQ R9, SI
+ SHRQ $0x38, SI
+ LEAL -2(DX), R9
+ LEAQ (AX)(SI*4), R10
+ MOVL (R10), SI
+ MOVL R9, (AX)(R8*4)
+ MOVL DX, (R10)
+ CMPL (BX)(SI*1), DI
JEQ match_nolit_loop_encodeBlockAsm8B
- INCL CX
+ INCL DX
JMP search_loop_encodeBlockAsm8B
emit_remainder_encodeBlockAsm8B:
- MOVQ src_len+32(FP), CX
- SUBL 12(SP), CX
- LEAQ 3(AX)(CX*1), CX
- CMPQ CX, (SP)
+ MOVQ src_len+32(FP), AX
+ SUBL 12(SP), AX
+ LEAQ 3(CX)(AX*1), AX
+ CMPQ AX, (SP)
JB emit_remainder_ok_encodeBlockAsm8B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
emit_remainder_ok_encodeBlockAsm8B:
- MOVQ src_len+32(FP), CX
- MOVL 12(SP), BX
- CMPL BX, CX
+ MOVQ src_len+32(FP), AX
+ MOVL 12(SP), DX
+ CMPL DX, AX
JEQ emit_literal_done_emit_remainder_encodeBlockAsm8B
- MOVL CX, SI
- MOVL CX, 12(SP)
- LEAQ (DX)(BX*1), CX
- SUBL BX, SI
+ MOVL AX, SI
+ MOVL AX, 12(SP)
+ LEAQ (BX)(DX*1), AX
+ SUBL DX, SI
LEAL -1(SI), DX
CMPL DX, $0x3c
JB one_byte_emit_remainder_encodeBlockAsm8B
@@ -5853,26 +5858,26 @@ emit_remainder_ok_encodeBlockAsm8B:
JB three_bytes_emit_remainder_encodeBlockAsm8B
three_bytes_emit_remainder_encodeBlockAsm8B:
- MOVB $0xf4, (AX)
- MOVW DX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW DX, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_emit_remainder_encodeBlockAsm8B
two_bytes_emit_remainder_encodeBlockAsm8B:
- MOVB $0xf0, (AX)
- MOVB DL, 1(AX)
- ADDQ $0x02, AX
+ MOVB $0xf0, (CX)
+ MOVB DL, 1(CX)
+ ADDQ $0x02, CX
CMPL DX, $0x40
JB memmove_emit_remainder_encodeBlockAsm8B
JMP memmove_long_emit_remainder_encodeBlockAsm8B
one_byte_emit_remainder_encodeBlockAsm8B:
SHLB $0x02, DL
- MOVB DL, (AX)
- ADDQ $0x01, AX
+ MOVB DL, (CX)
+ ADDQ $0x01, CX
memmove_emit_remainder_encodeBlockAsm8B:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveShort
@@ -5888,73 +5893,73 @@ memmove_emit_remainder_encodeBlockAsm8B:
JMP emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_33through64
emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_1or2:
- MOVB (CX), SI
- MOVB -1(CX)(BX*1), CL
- MOVB SI, (AX)
- MOVB CL, -1(AX)(BX*1)
+ MOVB (AX), SI
+ MOVB -1(AX)(BX*1), AL
+ MOVB SI, (CX)
+ MOVB AL, -1(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B
emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_3:
- MOVW (CX), SI
- MOVB 2(CX), CL
- MOVW SI, (AX)
- MOVB CL, 2(AX)
+ MOVW (AX), SI
+ MOVB 2(AX), AL
+ MOVW SI, (CX)
+ MOVB AL, 2(CX)
JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B
emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_4through7:
- MOVL (CX), SI
- MOVL -4(CX)(BX*1), CX
- MOVL SI, (AX)
- MOVL CX, -4(AX)(BX*1)
+ MOVL (AX), SI
+ MOVL -4(AX)(BX*1), AX
+ MOVL SI, (CX)
+ MOVL AX, -4(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B
emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_8through16:
- MOVQ (CX), SI
- MOVQ -8(CX)(BX*1), CX
- MOVQ SI, (AX)
- MOVQ CX, -8(AX)(BX*1)
+ MOVQ (AX), SI
+ MOVQ -8(AX)(BX*1), AX
+ MOVQ SI, (CX)
+ MOVQ AX, -8(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B
emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_17through32:
- MOVOU (CX), X0
- MOVOU -16(CX)(BX*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU -16(AX)(BX*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B
emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_33through64:
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
memmove_end_copy_emit_remainder_encodeBlockAsm8B:
- MOVQ DX, AX
+ MOVQ DX, CX
JMP emit_literal_done_emit_remainder_encodeBlockAsm8B
memmove_long_emit_remainder_encodeBlockAsm8B:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveLong
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
MOVQ BX, DI
SHRQ $0x05, DI
- MOVQ AX, SI
+ MOVQ CX, SI
ANDL $0x0000001f, SI
MOVQ $0x00000040, R8
SUBQ SI, R8
DECQ DI
JA emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_forward_sse_loop_32
- LEAQ -32(CX)(R8*1), SI
- LEAQ -32(AX)(R8*1), R9
+ LEAQ -32(AX)(R8*1), SI
+ LEAQ -32(CX)(R8*1), R9
emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_big_loop_back:
MOVOU (SI), X4
@@ -5968,961 +5973,962 @@ emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_big_loop_back:
JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_big_loop_back
emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_forward_sse_loop_32:
- MOVOU -32(CX)(R8*1), X4
- MOVOU -16(CX)(R8*1), X5
- MOVOA X4, -32(AX)(R8*1)
- MOVOA X5, -16(AX)(R8*1)
+ MOVOU -32(AX)(R8*1), X4
+ MOVOU -16(AX)(R8*1), X5
+ MOVOA X4, -32(CX)(R8*1)
+ MOVOA X5, -16(CX)(R8*1)
ADDQ $0x20, R8
CMPQ BX, R8
JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
- MOVQ DX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
+ MOVQ DX, CX
emit_literal_done_emit_remainder_encodeBlockAsm8B:
- MOVQ dst_base+0(FP), CX
- SUBQ CX, AX
- MOVQ AX, ret+48(FP)
+ MOVQ dst_base+0(FP), AX
+ SUBQ AX, CX
+ MOVQ CX, ret+56(FP)
RET
-// func encodeBetterBlockAsm(dst []byte, src []byte) int
+// func encodeBetterBlockAsm(dst []byte, src []byte, tmp *[589824]byte) int
// Requires: BMI, SSE2
-TEXT ·encodeBetterBlockAsm(SB), $589848-56
- MOVQ dst_base+0(FP), AX
- MOVQ $0x00001200, CX
- LEAQ 24(SP), DX
+TEXT ·encodeBetterBlockAsm(SB), $24-64
+ MOVQ tmp+48(FP), AX
+ MOVQ dst_base+0(FP), CX
+ MOVQ $0x00001200, DX
+ MOVQ AX, BX
PXOR X0, X0
zero_loop_encodeBetterBlockAsm:
- MOVOU X0, (DX)
- MOVOU X0, 16(DX)
- MOVOU X0, 32(DX)
- MOVOU X0, 48(DX)
- MOVOU X0, 64(DX)
- MOVOU X0, 80(DX)
- MOVOU X0, 96(DX)
- MOVOU X0, 112(DX)
- ADDQ $0x80, DX
- DECQ CX
+ MOVOU X0, (BX)
+ MOVOU X0, 16(BX)
+ MOVOU X0, 32(BX)
+ MOVOU X0, 48(BX)
+ MOVOU X0, 64(BX)
+ MOVOU X0, 80(BX)
+ MOVOU X0, 96(BX)
+ MOVOU X0, 112(BX)
+ ADDQ $0x80, BX
+ DECQ DX
JNZ zero_loop_encodeBetterBlockAsm
MOVL $0x00000000, 12(SP)
- MOVQ src_len+32(FP), CX
- LEAQ -6(CX), DX
- LEAQ -8(CX), BX
- MOVL BX, 8(SP)
- SHRQ $0x05, CX
- SUBL CX, DX
- LEAQ (AX)(DX*1), DX
- MOVQ DX, (SP)
- MOVL $0x00000001, CX
+ MOVQ src_len+32(FP), DX
+ LEAQ -6(DX), BX
+ LEAQ -8(DX), SI
+ MOVL SI, 8(SP)
+ SHRQ $0x05, DX
+ SUBL DX, BX
+ LEAQ (CX)(BX*1), BX
+ MOVQ BX, (SP)
+ MOVL $0x00000001, DX
MOVL $0x00000000, 16(SP)
- MOVQ src_base+24(FP), DX
+ MOVQ src_base+24(FP), BX
search_loop_encodeBetterBlockAsm:
- MOVL CX, BX
- SUBL 12(SP), BX
- SHRL $0x07, BX
- CMPL BX, $0x63
+ MOVL DX, SI
+ SUBL 12(SP), SI
+ SHRL $0x07, SI
+ CMPL SI, $0x63
JBE check_maxskip_ok_encodeBetterBlockAsm
- LEAL 100(CX), BX
+ LEAL 100(DX), SI
JMP check_maxskip_cont_encodeBetterBlockAsm
check_maxskip_ok_encodeBetterBlockAsm:
- LEAL 1(CX)(BX*1), BX
+ LEAL 1(DX)(SI*1), SI
check_maxskip_cont_encodeBetterBlockAsm:
- CMPL BX, 8(SP)
+ CMPL SI, 8(SP)
JAE emit_remainder_encodeBetterBlockAsm
- MOVQ (DX)(CX*1), SI
- MOVL BX, 20(SP)
- MOVQ $0x00cf1bbcdcbfa563, R8
- MOVQ $0x9e3779b1, BX
- MOVQ SI, R9
- MOVQ SI, R10
- SHLQ $0x08, R9
- IMULQ R8, R9
- SHRQ $0x2f, R9
- SHLQ $0x20, R10
- IMULQ BX, R10
- SHRQ $0x32, R10
- MOVL 24(SP)(R9*4), BX
- MOVL 524312(SP)(R10*4), DI
- MOVL CX, 24(SP)(R9*4)
- MOVL CX, 524312(SP)(R10*4)
- MOVQ (DX)(BX*1), R9
- MOVQ (DX)(DI*1), R10
- CMPQ R9, SI
+ MOVQ (BX)(DX*1), DI
+ MOVL SI, 20(SP)
+ MOVQ $0x00cf1bbcdcbfa563, R9
+ MOVQ $0x9e3779b1, SI
+ MOVQ DI, R10
+ MOVQ DI, R11
+ SHLQ $0x08, R10
+ IMULQ R9, R10
+ SHRQ $0x2f, R10
+ SHLQ $0x20, R11
+ IMULQ SI, R11
+ SHRQ $0x32, R11
+ MOVL (AX)(R10*4), SI
+ MOVL 524288(AX)(R11*4), R8
+ MOVL DX, (AX)(R10*4)
+ MOVL DX, 524288(AX)(R11*4)
+ MOVQ (BX)(SI*1), R10
+ MOVQ (BX)(R8*1), R11
+ CMPQ R10, DI
JEQ candidate_match_encodeBetterBlockAsm
- CMPQ R10, SI
+ CMPQ R11, DI
JNE no_short_found_encodeBetterBlockAsm
- MOVL DI, BX
+ MOVL R8, SI
JMP candidate_match_encodeBetterBlockAsm
no_short_found_encodeBetterBlockAsm:
- CMPL R9, SI
+ CMPL R10, DI
JEQ candidate_match_encodeBetterBlockAsm
- CMPL R10, SI
+ CMPL R11, DI
JEQ candidateS_match_encodeBetterBlockAsm
- MOVL 20(SP), CX
+ MOVL 20(SP), DX
JMP search_loop_encodeBetterBlockAsm
candidateS_match_encodeBetterBlockAsm:
- SHRQ $0x08, SI
- MOVQ SI, R9
- SHLQ $0x08, R9
- IMULQ R8, R9
- SHRQ $0x2f, R9
- MOVL 24(SP)(R9*4), BX
- INCL CX
- MOVL CX, 24(SP)(R9*4)
- CMPL (DX)(BX*1), SI
+ SHRQ $0x08, DI
+ MOVQ DI, R10
+ SHLQ $0x08, R10
+ IMULQ R9, R10
+ SHRQ $0x2f, R10
+ MOVL (AX)(R10*4), SI
+ INCL DX
+ MOVL DX, (AX)(R10*4)
+ CMPL (BX)(SI*1), DI
JEQ candidate_match_encodeBetterBlockAsm
- DECL CX
- MOVL DI, BX
+ DECL DX
+ MOVL R8, SI
candidate_match_encodeBetterBlockAsm:
- MOVL 12(SP), SI
- TESTL BX, BX
+ MOVL 12(SP), DI
+ TESTL SI, SI
JZ match_extend_back_end_encodeBetterBlockAsm
match_extend_back_loop_encodeBetterBlockAsm:
- CMPL CX, SI
+ CMPL DX, DI
JBE match_extend_back_end_encodeBetterBlockAsm
- MOVB -1(DX)(BX*1), DI
- MOVB -1(DX)(CX*1), R8
- CMPB DI, R8
+ MOVB -1(BX)(SI*1), R8
+ MOVB -1(BX)(DX*1), R9
+ CMPB R8, R9
JNE match_extend_back_end_encodeBetterBlockAsm
- LEAL -1(CX), CX
- DECL BX
+ LEAL -1(DX), DX
+ DECL SI
JZ match_extend_back_end_encodeBetterBlockAsm
JMP match_extend_back_loop_encodeBetterBlockAsm
match_extend_back_end_encodeBetterBlockAsm:
- MOVL CX, SI
- SUBL 12(SP), SI
- LEAQ 5(AX)(SI*1), SI
- CMPQ SI, (SP)
+ MOVL DX, DI
+ SUBL 12(SP), DI
+ LEAQ 5(CX)(DI*1), DI
+ CMPQ DI, (SP)
JB match_dst_size_check_encodeBetterBlockAsm
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_dst_size_check_encodeBetterBlockAsm:
- MOVL CX, SI
- ADDL $0x04, CX
- ADDL $0x04, BX
- MOVQ src_len+32(FP), DI
- SUBL CX, DI
- LEAQ (DX)(CX*1), R8
- LEAQ (DX)(BX*1), R9
+ MOVL DX, DI
+ ADDL $0x04, DX
+ ADDL $0x04, SI
+ MOVQ src_len+32(FP), R8
+ SUBL DX, R8
+ LEAQ (BX)(DX*1), R9
+ LEAQ (BX)(SI*1), R10
// matchLen
- XORL R11, R11
+ XORL R12, R12
matchlen_loopback_16_match_nolit_encodeBetterBlockAsm:
- CMPL DI, $0x10
+ CMPL R8, $0x10
JB matchlen_match8_match_nolit_encodeBetterBlockAsm
- MOVQ (R8)(R11*1), R10
- MOVQ 8(R8)(R11*1), R12
- XORQ (R9)(R11*1), R10
+ MOVQ (R9)(R12*1), R11
+ MOVQ 8(R9)(R12*1), R13
+ XORQ (R10)(R12*1), R11
JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm
- XORQ 8(R9)(R11*1), R12
+ XORQ 8(R10)(R12*1), R13
JNZ matchlen_bsf_16match_nolit_encodeBetterBlockAsm
- LEAL -16(DI), DI
- LEAL 16(R11), R11
+ LEAL -16(R8), R8
+ LEAL 16(R12), R12
JMP matchlen_loopback_16_match_nolit_encodeBetterBlockAsm
matchlen_bsf_16match_nolit_encodeBetterBlockAsm:
#ifdef GOAMD64_v3
- TZCNTQ R12, R12
+ TZCNTQ R13, R13
#else
- BSFQ R12, R12
+ BSFQ R13, R13
#endif
- SARQ $0x03, R12
- LEAL 8(R11)(R12*1), R11
+ SARQ $0x03, R13
+ LEAL 8(R12)(R13*1), R12
JMP match_nolit_end_encodeBetterBlockAsm
matchlen_match8_match_nolit_encodeBetterBlockAsm:
- CMPL DI, $0x08
+ CMPL R8, $0x08
JB matchlen_match4_match_nolit_encodeBetterBlockAsm
- MOVQ (R8)(R11*1), R10
- XORQ (R9)(R11*1), R10
+ MOVQ (R9)(R12*1), R11
+ XORQ (R10)(R12*1), R11
JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm
- LEAL -8(DI), DI
- LEAL 8(R11), R11
+ LEAL -8(R8), R8
+ LEAL 8(R12), R12
JMP matchlen_match4_match_nolit_encodeBetterBlockAsm
matchlen_bsf_8_match_nolit_encodeBetterBlockAsm:
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R11, R11
#else
- BSFQ R10, R10
+ BSFQ R11, R11
#endif
- SARQ $0x03, R10
- LEAL (R11)(R10*1), R11
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
JMP match_nolit_end_encodeBetterBlockAsm
matchlen_match4_match_nolit_encodeBetterBlockAsm:
- CMPL DI, $0x04
+ CMPL R8, $0x04
JB matchlen_match2_match_nolit_encodeBetterBlockAsm
- MOVL (R8)(R11*1), R10
- CMPL (R9)(R11*1), R10
+ MOVL (R9)(R12*1), R11
+ CMPL (R10)(R12*1), R11
JNE matchlen_match2_match_nolit_encodeBetterBlockAsm
- LEAL -4(DI), DI
- LEAL 4(R11), R11
+ LEAL -4(R8), R8
+ LEAL 4(R12), R12
matchlen_match2_match_nolit_encodeBetterBlockAsm:
- CMPL DI, $0x01
+ CMPL R8, $0x01
JE matchlen_match1_match_nolit_encodeBetterBlockAsm
JB match_nolit_end_encodeBetterBlockAsm
- MOVW (R8)(R11*1), R10
- CMPW (R9)(R11*1), R10
+ MOVW (R9)(R12*1), R11
+ CMPW (R10)(R12*1), R11
JNE matchlen_match1_match_nolit_encodeBetterBlockAsm
- LEAL 2(R11), R11
- SUBL $0x02, DI
+ LEAL 2(R12), R12
+ SUBL $0x02, R8
JZ match_nolit_end_encodeBetterBlockAsm
matchlen_match1_match_nolit_encodeBetterBlockAsm:
- MOVB (R8)(R11*1), R10
- CMPB (R9)(R11*1), R10
+ MOVB (R9)(R12*1), R11
+ CMPB (R10)(R12*1), R11
JNE match_nolit_end_encodeBetterBlockAsm
- LEAL 1(R11), R11
+ LEAL 1(R12), R12
match_nolit_end_encodeBetterBlockAsm:
- MOVL CX, DI
- SUBL BX, DI
+ MOVL DX, R8
+ SUBL SI, R8
// Check if repeat
- CMPL 16(SP), DI
+ CMPL 16(SP), R8
JEQ match_is_repeat_encodeBetterBlockAsm
- CMPL R11, $0x01
+ CMPL R12, $0x01
JA match_length_ok_encodeBetterBlockAsm
- CMPL DI, $0x0000ffff
+ CMPL R8, $0x0000ffff
JBE match_length_ok_encodeBetterBlockAsm
- MOVL 20(SP), CX
- INCL CX
+ MOVL 20(SP), DX
+ INCL DX
JMP search_loop_encodeBetterBlockAsm
match_length_ok_encodeBetterBlockAsm:
- MOVL DI, 16(SP)
- MOVL 12(SP), BX
- CMPL BX, SI
+ MOVL R8, 16(SP)
+ MOVL 12(SP), SI
+ CMPL SI, DI
JEQ emit_literal_done_match_emit_encodeBetterBlockAsm
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(BX*1), R9
- SUBL BX, R8
- LEAL -1(R8), BX
- CMPL BX, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(SI*1), R10
+ SUBL SI, R9
+ LEAL -1(R9), SI
+ CMPL SI, $0x3c
JB one_byte_match_emit_encodeBetterBlockAsm
- CMPL BX, $0x00000100
+ CMPL SI, $0x00000100
JB two_bytes_match_emit_encodeBetterBlockAsm
- CMPL BX, $0x00010000
+ CMPL SI, $0x00010000
JB three_bytes_match_emit_encodeBetterBlockAsm
- CMPL BX, $0x01000000
+ CMPL SI, $0x01000000
JB four_bytes_match_emit_encodeBetterBlockAsm
- MOVB $0xfc, (AX)
- MOVL BX, 1(AX)
- ADDQ $0x05, AX
+ MOVB $0xfc, (CX)
+ MOVL SI, 1(CX)
+ ADDQ $0x05, CX
JMP memmove_long_match_emit_encodeBetterBlockAsm
four_bytes_match_emit_encodeBetterBlockAsm:
- MOVL BX, R10
- SHRL $0x10, R10
- MOVB $0xf8, (AX)
- MOVW BX, 1(AX)
- MOVB R10, 3(AX)
- ADDQ $0x04, AX
+ MOVL SI, R11
+ SHRL $0x10, R11
+ MOVB $0xf8, (CX)
+ MOVW SI, 1(CX)
+ MOVB R11, 3(CX)
+ ADDQ $0x04, CX
JMP memmove_long_match_emit_encodeBetterBlockAsm
three_bytes_match_emit_encodeBetterBlockAsm:
- MOVB $0xf4, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_match_emit_encodeBetterBlockAsm
two_bytes_match_emit_encodeBetterBlockAsm:
- MOVB $0xf0, (AX)
- MOVB BL, 1(AX)
- ADDQ $0x02, AX
- CMPL BX, $0x40
+ MOVB $0xf0, (CX)
+ MOVB SI, 1(CX)
+ ADDQ $0x02, CX
+ CMPL SI, $0x40
JB memmove_match_emit_encodeBetterBlockAsm
JMP memmove_long_match_emit_encodeBetterBlockAsm
one_byte_match_emit_encodeBetterBlockAsm:
- SHLB $0x02, BL
- MOVB BL, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, SI
+ MOVB SI, (CX)
+ ADDQ $0x01, CX
memmove_match_emit_encodeBetterBlockAsm:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveShort
- CMPQ R8, $0x04
+ CMPQ R9, $0x04
JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_4
- CMPQ R8, $0x08
+ CMPQ R9, $0x08
JB emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_4through7
- CMPQ R8, $0x10
+ CMPQ R9, $0x10
JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ R9, $0x20
JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_33through64
emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_4:
- MOVL (R9), R10
- MOVL R10, (AX)
+ MOVL (R10), R11
+ MOVL R11, (CX)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm
emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_4through7:
- MOVL (R9), R10
- MOVL -4(R9)(R8*1), R9
- MOVL R10, (AX)
- MOVL R9, -4(AX)(R8*1)
+ MOVL (R10), R11
+ MOVL -4(R10)(R9*1), R10
+ MOVL R11, (CX)
+ MOVL R10, -4(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm
emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_8through16:
- MOVQ (R9), R10
- MOVQ -8(R9)(R8*1), R9
- MOVQ R10, (AX)
- MOVQ R9, -8(AX)(R8*1)
+ MOVQ (R10), R11
+ MOVQ -8(R10)(R9*1), R10
+ MOVQ R11, (CX)
+ MOVQ R10, -8(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm
emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_17through32:
- MOVOU (R9), X0
- MOVOU -16(R9)(R8*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU -16(R10)(R9*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm
emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_33through64:
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
memmove_end_copy_match_emit_encodeBetterBlockAsm:
- MOVQ BX, AX
+ MOVQ SI, CX
JMP emit_literal_done_match_emit_encodeBetterBlockAsm
memmove_long_match_emit_encodeBetterBlockAsm:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveLong
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVQ R8, R12
- SHRQ $0x05, R12
- MOVQ AX, R10
- ANDL $0x0000001f, R10
- MOVQ $0x00000040, R13
- SUBQ R10, R13
- DECQ R12
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVQ R9, R13
+ SHRQ $0x05, R13
+ MOVQ CX, R11
+ ANDL $0x0000001f, R11
+ MOVQ $0x00000040, R14
+ SUBQ R11, R14
+ DECQ R13
JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_forward_sse_loop_32
- LEAQ -32(R9)(R13*1), R10
- LEAQ -32(AX)(R13*1), R14
+ LEAQ -32(R10)(R14*1), R11
+ LEAQ -32(CX)(R14*1), R15
emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_big_loop_back:
- MOVOU (R10), X4
- MOVOU 16(R10), X5
- MOVOA X4, (R14)
- MOVOA X5, 16(R14)
+ MOVOU (R11), X4
+ MOVOU 16(R11), X5
+ MOVOA X4, (R15)
+ MOVOA X5, 16(R15)
+ ADDQ $0x20, R15
+ ADDQ $0x20, R11
ADDQ $0x20, R14
- ADDQ $0x20, R10
- ADDQ $0x20, R13
- DECQ R12
+ DECQ R13
JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_big_loop_back
emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_forward_sse_loop_32:
- MOVOU -32(R9)(R13*1), X4
- MOVOU -16(R9)(R13*1), X5
- MOVOA X4, -32(AX)(R13*1)
- MOVOA X5, -16(AX)(R13*1)
- ADDQ $0x20, R13
- CMPQ R8, R13
+ MOVOU -32(R10)(R14*1), X4
+ MOVOU -16(R10)(R14*1), X5
+ MOVOA X4, -32(CX)(R14*1)
+ MOVOA X5, -16(CX)(R14*1)
+ ADDQ $0x20, R14
+ CMPQ R9, R14
JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ BX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
+ MOVQ SI, CX
emit_literal_done_match_emit_encodeBetterBlockAsm:
- ADDL R11, CX
- ADDL $0x04, R11
- MOVL CX, 12(SP)
+ ADDL R12, DX
+ ADDL $0x04, R12
+ MOVL DX, 12(SP)
// emitCopy
- CMPL DI, $0x00010000
+ CMPL R8, $0x00010000
JB two_byte_offset_match_nolit_encodeBetterBlockAsm
- CMPL R11, $0x40
+ CMPL R12, $0x40
JBE four_bytes_remain_match_nolit_encodeBetterBlockAsm
- MOVB $0xff, (AX)
- MOVL DI, 1(AX)
- LEAL -64(R11), R11
- ADDQ $0x05, AX
- CMPL R11, $0x04
+ MOVB $0xff, (CX)
+ MOVL R8, 1(CX)
+ LEAL -64(R12), R12
+ ADDQ $0x05, CX
+ CMPL R12, $0x04
JB four_bytes_remain_match_nolit_encodeBetterBlockAsm
// emitRepeat
emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy:
- MOVL R11, BX
- LEAL -4(R11), R11
- CMPL BX, $0x08
+ MOVL R12, SI
+ LEAL -4(R12), R12
+ CMPL SI, $0x08
JBE repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy
- CMPL BX, $0x0c
+ CMPL SI, $0x0c
JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy
- CMPL DI, $0x00000800
+ CMPL R8, $0x00000800
JB repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy
cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy:
- CMPL R11, $0x00000104
+ CMPL R12, $0x00000104
JB repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy
- CMPL R11, $0x00010100
+ CMPL R12, $0x00010100
JB repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy
- CMPL R11, $0x0100ffff
+ CMPL R12, $0x0100ffff
JB repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy
- LEAL -16842747(R11), R11
- MOVL $0xfffb001d, (AX)
- MOVB $0xff, 4(AX)
- ADDQ $0x05, AX
+ LEAL -16842747(R12), R12
+ MOVL $0xfffb001d, (CX)
+ MOVB $0xff, 4(CX)
+ ADDQ $0x05, CX
JMP emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy
repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy:
- LEAL -65536(R11), R11
- MOVL R11, DI
- MOVW $0x001d, (AX)
- MOVW R11, 2(AX)
- SARL $0x10, DI
- MOVB DI, 4(AX)
- ADDQ $0x05, AX
+ LEAL -65536(R12), R12
+ MOVL R12, R8
+ MOVW $0x001d, (CX)
+ MOVW R12, 2(CX)
+ SARL $0x10, R8
+ MOVB R8, 4(CX)
+ ADDQ $0x05, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy:
- LEAL -256(R11), R11
- MOVW $0x0019, (AX)
- MOVW R11, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(R12), R12
+ MOVW $0x0019, (CX)
+ MOVW R12, 2(CX)
+ ADDQ $0x04, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy:
- LEAL -4(R11), R11
- MOVW $0x0015, (AX)
- MOVB R11, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(R12), R12
+ MOVW $0x0015, (CX)
+ MOVB R12, 2(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy:
- SHLL $0x02, R11
- ORL $0x01, R11
- MOVW R11, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, R12
+ ORL $0x01, R12
+ MOVW R12, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy:
- XORQ BX, BX
- LEAL 1(BX)(R11*4), R11
- MOVB DI, 1(AX)
- SARL $0x08, DI
- SHLL $0x05, DI
- ORL DI, R11
- MOVB R11, (AX)
- ADDQ $0x02, AX
+ XORQ SI, SI
+ LEAL 1(SI)(R12*4), R12
+ MOVB R8, 1(CX)
+ SARL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, R12
+ MOVB R12, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
four_bytes_remain_match_nolit_encodeBetterBlockAsm:
- TESTL R11, R11
+ TESTL R12, R12
JZ match_nolit_emitcopy_end_encodeBetterBlockAsm
- XORL BX, BX
- LEAL -1(BX)(R11*4), R11
- MOVB R11, (AX)
- MOVL DI, 1(AX)
- ADDQ $0x05, AX
+ XORL SI, SI
+ LEAL -1(SI)(R12*4), R12
+ MOVB R12, (CX)
+ MOVL R8, 1(CX)
+ ADDQ $0x05, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
two_byte_offset_match_nolit_encodeBetterBlockAsm:
- CMPL R11, $0x40
+ CMPL R12, $0x40
JBE two_byte_offset_short_match_nolit_encodeBetterBlockAsm
- CMPL DI, $0x00000800
+ CMPL R8, $0x00000800
JAE long_offset_short_match_nolit_encodeBetterBlockAsm
- MOVL $0x00000001, BX
- LEAL 16(BX), BX
- MOVB DI, 1(AX)
- MOVL DI, R8
- SHRL $0x08, R8
- SHLL $0x05, R8
- ORL R8, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
- SUBL $0x08, R11
+ MOVL $0x00000001, SI
+ LEAL 16(SI), SI
+ MOVB R8, 1(CX)
+ MOVL R8, R9
+ SHRL $0x08, R9
+ SHLL $0x05, R9
+ ORL R9, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
+ SUBL $0x08, R12
// emitRepeat
- LEAL -4(R11), R11
+ LEAL -4(R12), R12
JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b
emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b:
- MOVL R11, BX
- LEAL -4(R11), R11
- CMPL BX, $0x08
+ MOVL R12, SI
+ LEAL -4(R12), R12
+ CMPL SI, $0x08
JBE repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b
- CMPL BX, $0x0c
+ CMPL SI, $0x0c
JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b
- CMPL DI, $0x00000800
+ CMPL R8, $0x00000800
JB repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b
cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b:
- CMPL R11, $0x00000104
+ CMPL R12, $0x00000104
JB repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b
- CMPL R11, $0x00010100
+ CMPL R12, $0x00010100
JB repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b
- CMPL R11, $0x0100ffff
+ CMPL R12, $0x0100ffff
JB repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b
- LEAL -16842747(R11), R11
- MOVL $0xfffb001d, (AX)
- MOVB $0xff, 4(AX)
- ADDQ $0x05, AX
+ LEAL -16842747(R12), R12
+ MOVL $0xfffb001d, (CX)
+ MOVB $0xff, 4(CX)
+ ADDQ $0x05, CX
JMP emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b
repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b:
- LEAL -65536(R11), R11
- MOVL R11, DI
- MOVW $0x001d, (AX)
- MOVW R11, 2(AX)
- SARL $0x10, DI
- MOVB DI, 4(AX)
- ADDQ $0x05, AX
+ LEAL -65536(R12), R12
+ MOVL R12, R8
+ MOVW $0x001d, (CX)
+ MOVW R12, 2(CX)
+ SARL $0x10, R8
+ MOVB R8, 4(CX)
+ ADDQ $0x05, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b:
- LEAL -256(R11), R11
- MOVW $0x0019, (AX)
- MOVW R11, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(R12), R12
+ MOVW $0x0019, (CX)
+ MOVW R12, 2(CX)
+ ADDQ $0x04, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b:
- LEAL -4(R11), R11
- MOVW $0x0015, (AX)
- MOVB R11, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(R12), R12
+ MOVW $0x0015, (CX)
+ MOVB R12, 2(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b:
- SHLL $0x02, R11
- ORL $0x01, R11
- MOVW R11, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, R12
+ ORL $0x01, R12
+ MOVW R12, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b:
- XORQ BX, BX
- LEAL 1(BX)(R11*4), R11
- MOVB DI, 1(AX)
- SARL $0x08, DI
- SHLL $0x05, DI
- ORL DI, R11
- MOVB R11, (AX)
- ADDQ $0x02, AX
+ XORQ SI, SI
+ LEAL 1(SI)(R12*4), R12
+ MOVB R8, 1(CX)
+ SARL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, R12
+ MOVB R12, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
long_offset_short_match_nolit_encodeBetterBlockAsm:
- MOVB $0xee, (AX)
- MOVW DI, 1(AX)
- LEAL -60(R11), R11
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW R8, 1(CX)
+ LEAL -60(R12), R12
+ ADDQ $0x03, CX
// emitRepeat
emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy_short:
- MOVL R11, BX
- LEAL -4(R11), R11
- CMPL BX, $0x08
+ MOVL R12, SI
+ LEAL -4(R12), R12
+ CMPL SI, $0x08
JBE repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy_short
- CMPL BX, $0x0c
+ CMPL SI, $0x0c
JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short
- CMPL DI, $0x00000800
+ CMPL R8, $0x00000800
JB repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short
cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short:
- CMPL R11, $0x00000104
+ CMPL R12, $0x00000104
JB repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy_short
- CMPL R11, $0x00010100
+ CMPL R12, $0x00010100
JB repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy_short
- CMPL R11, $0x0100ffff
+ CMPL R12, $0x0100ffff
JB repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy_short
- LEAL -16842747(R11), R11
- MOVL $0xfffb001d, (AX)
- MOVB $0xff, 4(AX)
- ADDQ $0x05, AX
+ LEAL -16842747(R12), R12
+ MOVL $0xfffb001d, (CX)
+ MOVB $0xff, 4(CX)
+ ADDQ $0x05, CX
JMP emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy_short
repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy_short:
- LEAL -65536(R11), R11
- MOVL R11, DI
- MOVW $0x001d, (AX)
- MOVW R11, 2(AX)
- SARL $0x10, DI
- MOVB DI, 4(AX)
- ADDQ $0x05, AX
+ LEAL -65536(R12), R12
+ MOVL R12, R8
+ MOVW $0x001d, (CX)
+ MOVW R12, 2(CX)
+ SARL $0x10, R8
+ MOVB R8, 4(CX)
+ ADDQ $0x05, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy_short:
- LEAL -256(R11), R11
- MOVW $0x0019, (AX)
- MOVW R11, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(R12), R12
+ MOVW $0x0019, (CX)
+ MOVW R12, 2(CX)
+ ADDQ $0x04, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy_short:
- LEAL -4(R11), R11
- MOVW $0x0015, (AX)
- MOVB R11, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(R12), R12
+ MOVW $0x0015, (CX)
+ MOVB R12, 2(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy_short:
- SHLL $0x02, R11
- ORL $0x01, R11
- MOVW R11, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, R12
+ ORL $0x01, R12
+ MOVW R12, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short:
- XORQ BX, BX
- LEAL 1(BX)(R11*4), R11
- MOVB DI, 1(AX)
- SARL $0x08, DI
- SHLL $0x05, DI
- ORL DI, R11
- MOVB R11, (AX)
- ADDQ $0x02, AX
+ XORQ SI, SI
+ LEAL 1(SI)(R12*4), R12
+ MOVB R8, 1(CX)
+ SARL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, R12
+ MOVB R12, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
two_byte_offset_short_match_nolit_encodeBetterBlockAsm:
- MOVL R11, BX
- SHLL $0x02, BX
- CMPL R11, $0x0c
+ MOVL R12, SI
+ SHLL $0x02, SI
+ CMPL R12, $0x0c
JAE emit_copy_three_match_nolit_encodeBetterBlockAsm
- CMPL DI, $0x00000800
+ CMPL R8, $0x00000800
JAE emit_copy_three_match_nolit_encodeBetterBlockAsm
- LEAL -15(BX), BX
- MOVB DI, 1(AX)
- SHRL $0x08, DI
- SHLL $0x05, DI
- ORL DI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
+ LEAL -15(SI), SI
+ MOVB R8, 1(CX)
+ SHRL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
emit_copy_three_match_nolit_encodeBetterBlockAsm:
- LEAL -2(BX), BX
- MOVB BL, (AX)
- MOVW DI, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(SI), SI
+ MOVB SI, (CX)
+ MOVW R8, 1(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
match_is_repeat_encodeBetterBlockAsm:
- MOVL 12(SP), BX
- CMPL BX, SI
+ MOVL 12(SP), SI
+ CMPL SI, DI
JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(BX*1), R9
- SUBL BX, R8
- LEAL -1(R8), BX
- CMPL BX, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(SI*1), R10
+ SUBL SI, R9
+ LEAL -1(R9), SI
+ CMPL SI, $0x3c
JB one_byte_match_emit_repeat_encodeBetterBlockAsm
- CMPL BX, $0x00000100
+ CMPL SI, $0x00000100
JB two_bytes_match_emit_repeat_encodeBetterBlockAsm
- CMPL BX, $0x00010000
+ CMPL SI, $0x00010000
JB three_bytes_match_emit_repeat_encodeBetterBlockAsm
- CMPL BX, $0x01000000
+ CMPL SI, $0x01000000
JB four_bytes_match_emit_repeat_encodeBetterBlockAsm
- MOVB $0xfc, (AX)
- MOVL BX, 1(AX)
- ADDQ $0x05, AX
+ MOVB $0xfc, (CX)
+ MOVL SI, 1(CX)
+ ADDQ $0x05, CX
JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm
four_bytes_match_emit_repeat_encodeBetterBlockAsm:
- MOVL BX, R10
- SHRL $0x10, R10
- MOVB $0xf8, (AX)
- MOVW BX, 1(AX)
- MOVB R10, 3(AX)
- ADDQ $0x04, AX
+ MOVL SI, R11
+ SHRL $0x10, R11
+ MOVB $0xf8, (CX)
+ MOVW SI, 1(CX)
+ MOVB R11, 3(CX)
+ ADDQ $0x04, CX
JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm
three_bytes_match_emit_repeat_encodeBetterBlockAsm:
- MOVB $0xf4, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm
two_bytes_match_emit_repeat_encodeBetterBlockAsm:
- MOVB $0xf0, (AX)
- MOVB BL, 1(AX)
- ADDQ $0x02, AX
- CMPL BX, $0x40
+ MOVB $0xf0, (CX)
+ MOVB SI, 1(CX)
+ ADDQ $0x02, CX
+ CMPL SI, $0x40
JB memmove_match_emit_repeat_encodeBetterBlockAsm
JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm
one_byte_match_emit_repeat_encodeBetterBlockAsm:
- SHLB $0x02, BL
- MOVB BL, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, SI
+ MOVB SI, (CX)
+ ADDQ $0x01, CX
memmove_match_emit_repeat_encodeBetterBlockAsm:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveShort
- CMPQ R8, $0x04
+ CMPQ R9, $0x04
JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_4
- CMPQ R8, $0x08
+ CMPQ R9, $0x08
JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_4through7
- CMPQ R8, $0x10
+ CMPQ R9, $0x10
JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ R9, $0x20
JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_17through32
JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_33through64
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_4:
- MOVL (R9), R10
- MOVL R10, (AX)
+ MOVL (R10), R11
+ MOVL R11, (CX)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_4through7:
- MOVL (R9), R10
- MOVL -4(R9)(R8*1), R9
- MOVL R10, (AX)
- MOVL R9, -4(AX)(R8*1)
+ MOVL (R10), R11
+ MOVL -4(R10)(R9*1), R10
+ MOVL R11, (CX)
+ MOVL R10, -4(CX)(R9*1)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_8through16:
- MOVQ (R9), R10
- MOVQ -8(R9)(R8*1), R9
- MOVQ R10, (AX)
- MOVQ R9, -8(AX)(R8*1)
+ MOVQ (R10), R11
+ MOVQ -8(R10)(R9*1), R10
+ MOVQ R11, (CX)
+ MOVQ R10, -8(CX)(R9*1)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_17through32:
- MOVOU (R9), X0
- MOVOU -16(R9)(R8*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU -16(R10)(R9*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R9*1)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_33through64:
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm:
- MOVQ BX, AX
+ MOVQ SI, CX
JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm
memmove_long_match_emit_repeat_encodeBetterBlockAsm:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveLong
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVQ R8, R12
- SHRQ $0x05, R12
- MOVQ AX, R10
- ANDL $0x0000001f, R10
- MOVQ $0x00000040, R13
- SUBQ R10, R13
- DECQ R12
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVQ R9, R13
+ SHRQ $0x05, R13
+ MOVQ CX, R11
+ ANDL $0x0000001f, R11
+ MOVQ $0x00000040, R14
+ SUBQ R11, R14
+ DECQ R13
JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_forward_sse_loop_32
- LEAQ -32(R9)(R13*1), R10
- LEAQ -32(AX)(R13*1), R14
+ LEAQ -32(R10)(R14*1), R11
+ LEAQ -32(CX)(R14*1), R15
emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_big_loop_back:
- MOVOU (R10), X4
- MOVOU 16(R10), X5
- MOVOA X4, (R14)
- MOVOA X5, 16(R14)
+ MOVOU (R11), X4
+ MOVOU 16(R11), X5
+ MOVOA X4, (R15)
+ MOVOA X5, 16(R15)
+ ADDQ $0x20, R15
+ ADDQ $0x20, R11
ADDQ $0x20, R14
- ADDQ $0x20, R10
- ADDQ $0x20, R13
- DECQ R12
+ DECQ R13
JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_big_loop_back
emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_forward_sse_loop_32:
- MOVOU -32(R9)(R13*1), X4
- MOVOU -16(R9)(R13*1), X5
- MOVOA X4, -32(AX)(R13*1)
- MOVOA X5, -16(AX)(R13*1)
- ADDQ $0x20, R13
- CMPQ R8, R13
+ MOVOU -32(R10)(R14*1), X4
+ MOVOU -16(R10)(R14*1), X5
+ MOVOA X4, -32(CX)(R14*1)
+ MOVOA X5, -16(CX)(R14*1)
+ ADDQ $0x20, R14
+ CMPQ R9, R14
JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ BX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
+ MOVQ SI, CX
emit_literal_done_match_emit_repeat_encodeBetterBlockAsm:
- ADDL R11, CX
- ADDL $0x04, R11
- MOVL CX, 12(SP)
+ ADDL R12, DX
+ ADDL $0x04, R12
+ MOVL DX, 12(SP)
// emitRepeat
emit_repeat_again_match_nolit_repeat_encodeBetterBlockAsm:
- MOVL R11, BX
- LEAL -4(R11), R11
- CMPL BX, $0x08
+ MOVL R12, SI
+ LEAL -4(R12), R12
+ CMPL SI, $0x08
JBE repeat_two_match_nolit_repeat_encodeBetterBlockAsm
- CMPL BX, $0x0c
+ CMPL SI, $0x0c
JAE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm
- CMPL DI, $0x00000800
+ CMPL R8, $0x00000800
JB repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm
cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm:
- CMPL R11, $0x00000104
+ CMPL R12, $0x00000104
JB repeat_three_match_nolit_repeat_encodeBetterBlockAsm
- CMPL R11, $0x00010100
+ CMPL R12, $0x00010100
JB repeat_four_match_nolit_repeat_encodeBetterBlockAsm
- CMPL R11, $0x0100ffff
+ CMPL R12, $0x0100ffff
JB repeat_five_match_nolit_repeat_encodeBetterBlockAsm
- LEAL -16842747(R11), R11
- MOVL $0xfffb001d, (AX)
- MOVB $0xff, 4(AX)
- ADDQ $0x05, AX
+ LEAL -16842747(R12), R12
+ MOVL $0xfffb001d, (CX)
+ MOVB $0xff, 4(CX)
+ ADDQ $0x05, CX
JMP emit_repeat_again_match_nolit_repeat_encodeBetterBlockAsm
repeat_five_match_nolit_repeat_encodeBetterBlockAsm:
- LEAL -65536(R11), R11
- MOVL R11, DI
- MOVW $0x001d, (AX)
- MOVW R11, 2(AX)
- SARL $0x10, DI
- MOVB DI, 4(AX)
- ADDQ $0x05, AX
+ LEAL -65536(R12), R12
+ MOVL R12, R8
+ MOVW $0x001d, (CX)
+ MOVW R12, 2(CX)
+ SARL $0x10, R8
+ MOVB R8, 4(CX)
+ ADDQ $0x05, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
repeat_four_match_nolit_repeat_encodeBetterBlockAsm:
- LEAL -256(R11), R11
- MOVW $0x0019, (AX)
- MOVW R11, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(R12), R12
+ MOVW $0x0019, (CX)
+ MOVW R12, 2(CX)
+ ADDQ $0x04, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
repeat_three_match_nolit_repeat_encodeBetterBlockAsm:
- LEAL -4(R11), R11
- MOVW $0x0015, (AX)
- MOVB R11, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(R12), R12
+ MOVW $0x0015, (CX)
+ MOVB R12, 2(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
repeat_two_match_nolit_repeat_encodeBetterBlockAsm:
- SHLL $0x02, R11
- ORL $0x01, R11
- MOVW R11, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, R12
+ ORL $0x01, R12
+ MOVW R12, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm:
- XORQ BX, BX
- LEAL 1(BX)(R11*4), R11
- MOVB DI, 1(AX)
- SARL $0x08, DI
- SHLL $0x05, DI
- ORL DI, R11
- MOVB R11, (AX)
- ADDQ $0x02, AX
+ XORQ SI, SI
+ LEAL 1(SI)(R12*4), R12
+ MOVB R8, 1(CX)
+ SARL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, R12
+ MOVB R12, (CX)
+ ADDQ $0x02, CX
match_nolit_emitcopy_end_encodeBetterBlockAsm:
- CMPL CX, 8(SP)
+ CMPL DX, 8(SP)
JAE emit_remainder_encodeBetterBlockAsm
- CMPQ AX, (SP)
- JB match_nolit_dst_ok_encodeBetterBlockAsm
- MOVQ $0x00000000, ret+48(FP)
- RET
-
-match_nolit_dst_ok_encodeBetterBlockAsm:
- MOVQ $0x00cf1bbcdcbfa563, BX
- MOVQ $0x9e3779b1, DI
- LEAQ 1(SI), SI
- LEAQ -2(CX), R8
- MOVQ (DX)(SI*1), R9
- MOVQ 1(DX)(SI*1), R10
- MOVQ (DX)(R8*1), R11
- MOVQ 1(DX)(R8*1), R12
- SHLQ $0x08, R9
- IMULQ BX, R9
- SHRQ $0x2f, R9
- SHLQ $0x20, R10
- IMULQ DI, R10
- SHRQ $0x32, R10
- SHLQ $0x08, R11
- IMULQ BX, R11
- SHRQ $0x2f, R11
- SHLQ $0x20, R12
- IMULQ DI, R12
- SHRQ $0x32, R12
- LEAQ 1(SI), DI
- LEAQ 1(R8), R13
- MOVL SI, 24(SP)(R9*4)
- MOVL R8, 24(SP)(R11*4)
- MOVL DI, 524312(SP)(R10*4)
- MOVL R13, 524312(SP)(R12*4)
- LEAQ 1(R8)(SI*1), DI
- SHRQ $0x01, DI
- ADDQ $0x01, SI
- SUBQ $0x01, R8
+ CMPQ CX, (SP)
+ JB match_nolit_dst_ok_encodeBetterBlockAsm
+ MOVQ $0x00000000, ret+56(FP)
+ RET
+
+match_nolit_dst_ok_encodeBetterBlockAsm:
+ MOVQ $0x00cf1bbcdcbfa563, SI
+ MOVQ $0x9e3779b1, R8
+ LEAQ 1(DI), DI
+ LEAQ -2(DX), R9
+ MOVQ (BX)(DI*1), R10
+ MOVQ 1(BX)(DI*1), R11
+ MOVQ (BX)(R9*1), R12
+ MOVQ 1(BX)(R9*1), R13
+ SHLQ $0x08, R10
+ IMULQ SI, R10
+ SHRQ $0x2f, R10
+ SHLQ $0x20, R11
+ IMULQ R8, R11
+ SHRQ $0x32, R11
+ SHLQ $0x08, R12
+ IMULQ SI, R12
+ SHRQ $0x2f, R12
+ SHLQ $0x20, R13
+ IMULQ R8, R13
+ SHRQ $0x32, R13
+ LEAQ 1(DI), R8
+ LEAQ 1(R9), R14
+ MOVL DI, (AX)(R10*4)
+ MOVL R9, (AX)(R12*4)
+ MOVL R8, 524288(AX)(R11*4)
+ MOVL R14, 524288(AX)(R13*4)
+ LEAQ 1(R9)(DI*1), R8
+ SHRQ $0x01, R8
+ ADDQ $0x01, DI
+ SUBQ $0x01, R9
index_loop_encodeBetterBlockAsm:
- CMPQ DI, R8
+ CMPQ R8, R9
JAE search_loop_encodeBetterBlockAsm
- MOVQ (DX)(SI*1), R9
- MOVQ (DX)(DI*1), R10
- SHLQ $0x08, R9
- IMULQ BX, R9
- SHRQ $0x2f, R9
+ MOVQ (BX)(DI*1), R10
+ MOVQ (BX)(R8*1), R11
SHLQ $0x08, R10
- IMULQ BX, R10
+ IMULQ SI, R10
SHRQ $0x2f, R10
- MOVL SI, 24(SP)(R9*4)
- MOVL DI, 24(SP)(R10*4)
- ADDQ $0x02, SI
+ SHLQ $0x08, R11
+ IMULQ SI, R11
+ SHRQ $0x2f, R11
+ MOVL DI, (AX)(R10*4)
+ MOVL R8, (AX)(R11*4)
ADDQ $0x02, DI
+ ADDQ $0x02, R8
JMP index_loop_encodeBetterBlockAsm
emit_remainder_encodeBetterBlockAsm:
- MOVQ src_len+32(FP), CX
- SUBL 12(SP), CX
- LEAQ 5(AX)(CX*1), CX
- CMPQ CX, (SP)
+ MOVQ src_len+32(FP), AX
+ SUBL 12(SP), AX
+ LEAQ 5(CX)(AX*1), AX
+ CMPQ AX, (SP)
JB emit_remainder_ok_encodeBetterBlockAsm
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
emit_remainder_ok_encodeBetterBlockAsm:
- MOVQ src_len+32(FP), CX
- MOVL 12(SP), BX
- CMPL BX, CX
+ MOVQ src_len+32(FP), AX
+ MOVL 12(SP), DX
+ CMPL DX, AX
JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm
- MOVL CX, SI
- MOVL CX, 12(SP)
- LEAQ (DX)(BX*1), CX
- SUBL BX, SI
+ MOVL AX, SI
+ MOVL AX, 12(SP)
+ LEAQ (BX)(DX*1), AX
+ SUBL DX, SI
LEAL -1(SI), DX
CMPL DX, $0x3c
JB one_byte_emit_remainder_encodeBetterBlockAsm
@@ -6932,41 +6938,41 @@ emit_remainder_ok_encodeBetterBlockAsm:
JB three_bytes_emit_remainder_encodeBetterBlockAsm
CMPL DX, $0x01000000
JB four_bytes_emit_remainder_encodeBetterBlockAsm
- MOVB $0xfc, (AX)
- MOVL DX, 1(AX)
- ADDQ $0x05, AX
+ MOVB $0xfc, (CX)
+ MOVL DX, 1(CX)
+ ADDQ $0x05, CX
JMP memmove_long_emit_remainder_encodeBetterBlockAsm
four_bytes_emit_remainder_encodeBetterBlockAsm:
MOVL DX, BX
SHRL $0x10, BX
- MOVB $0xf8, (AX)
- MOVW DX, 1(AX)
- MOVB BL, 3(AX)
- ADDQ $0x04, AX
+ MOVB $0xf8, (CX)
+ MOVW DX, 1(CX)
+ MOVB BL, 3(CX)
+ ADDQ $0x04, CX
JMP memmove_long_emit_remainder_encodeBetterBlockAsm
three_bytes_emit_remainder_encodeBetterBlockAsm:
- MOVB $0xf4, (AX)
- MOVW DX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW DX, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_emit_remainder_encodeBetterBlockAsm
two_bytes_emit_remainder_encodeBetterBlockAsm:
- MOVB $0xf0, (AX)
- MOVB DL, 1(AX)
- ADDQ $0x02, AX
+ MOVB $0xf0, (CX)
+ MOVB DL, 1(CX)
+ ADDQ $0x02, CX
CMPL DX, $0x40
JB memmove_emit_remainder_encodeBetterBlockAsm
JMP memmove_long_emit_remainder_encodeBetterBlockAsm
one_byte_emit_remainder_encodeBetterBlockAsm:
SHLB $0x02, DL
- MOVB DL, (AX)
- ADDQ $0x01, AX
+ MOVB DL, (CX)
+ ADDQ $0x01, CX
memmove_emit_remainder_encodeBetterBlockAsm:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveShort
@@ -6982,73 +6988,73 @@ memmove_emit_remainder_encodeBetterBlockAsm:
JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_33through64
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_1or2:
- MOVB (CX), SI
- MOVB -1(CX)(BX*1), CL
- MOVB SI, (AX)
- MOVB CL, -1(AX)(BX*1)
+ MOVB (AX), SI
+ MOVB -1(AX)(BX*1), AL
+ MOVB SI, (CX)
+ MOVB AL, -1(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_3:
- MOVW (CX), SI
- MOVB 2(CX), CL
- MOVW SI, (AX)
- MOVB CL, 2(AX)
+ MOVW (AX), SI
+ MOVB 2(AX), AL
+ MOVW SI, (CX)
+ MOVB AL, 2(CX)
JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_4through7:
- MOVL (CX), SI
- MOVL -4(CX)(BX*1), CX
- MOVL SI, (AX)
- MOVL CX, -4(AX)(BX*1)
+ MOVL (AX), SI
+ MOVL -4(AX)(BX*1), AX
+ MOVL SI, (CX)
+ MOVL AX, -4(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_8through16:
- MOVQ (CX), SI
- MOVQ -8(CX)(BX*1), CX
- MOVQ SI, (AX)
- MOVQ CX, -8(AX)(BX*1)
+ MOVQ (AX), SI
+ MOVQ -8(AX)(BX*1), AX
+ MOVQ SI, (CX)
+ MOVQ AX, -8(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_17through32:
- MOVOU (CX), X0
- MOVOU -16(CX)(BX*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU -16(AX)(BX*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_33through64:
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
memmove_end_copy_emit_remainder_encodeBetterBlockAsm:
- MOVQ DX, AX
+ MOVQ DX, CX
JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm
memmove_long_emit_remainder_encodeBetterBlockAsm:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveLong
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
MOVQ BX, DI
SHRQ $0x05, DI
- MOVQ AX, SI
+ MOVQ CX, SI
ANDL $0x0000001f, SI
MOVQ $0x00000040, R8
SUBQ SI, R8
DECQ DI
JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_forward_sse_loop_32
- LEAQ -32(CX)(R8*1), SI
- LEAQ -32(AX)(R8*1), R9
+ LEAQ -32(AX)(R8*1), SI
+ LEAQ -32(CX)(R8*1), R9
emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_big_loop_back:
MOVOU (SI), X4
@@ -7062,903 +7068,904 @@ emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_big_loop_back:
JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_big_loop_back
emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_forward_sse_loop_32:
- MOVOU -32(CX)(R8*1), X4
- MOVOU -16(CX)(R8*1), X5
- MOVOA X4, -32(AX)(R8*1)
- MOVOA X5, -16(AX)(R8*1)
+ MOVOU -32(AX)(R8*1), X4
+ MOVOU -16(AX)(R8*1), X5
+ MOVOA X4, -32(CX)(R8*1)
+ MOVOA X5, -16(CX)(R8*1)
ADDQ $0x20, R8
CMPQ BX, R8
JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
- MOVQ DX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
+ MOVQ DX, CX
emit_literal_done_emit_remainder_encodeBetterBlockAsm:
- MOVQ dst_base+0(FP), CX
- SUBQ CX, AX
- MOVQ AX, ret+48(FP)
+ MOVQ dst_base+0(FP), AX
+ SUBQ AX, CX
+ MOVQ CX, ret+56(FP)
RET
-// func encodeBetterBlockAsm4MB(dst []byte, src []byte) int
+// func encodeBetterBlockAsm4MB(dst []byte, src []byte, tmp *[589824]byte) int
// Requires: BMI, SSE2
-TEXT ·encodeBetterBlockAsm4MB(SB), $589848-56
- MOVQ dst_base+0(FP), AX
- MOVQ $0x00001200, CX
- LEAQ 24(SP), DX
+TEXT ·encodeBetterBlockAsm4MB(SB), $24-64
+ MOVQ tmp+48(FP), AX
+ MOVQ dst_base+0(FP), CX
+ MOVQ $0x00001200, DX
+ MOVQ AX, BX
PXOR X0, X0
zero_loop_encodeBetterBlockAsm4MB:
- MOVOU X0, (DX)
- MOVOU X0, 16(DX)
- MOVOU X0, 32(DX)
- MOVOU X0, 48(DX)
- MOVOU X0, 64(DX)
- MOVOU X0, 80(DX)
- MOVOU X0, 96(DX)
- MOVOU X0, 112(DX)
- ADDQ $0x80, DX
- DECQ CX
+ MOVOU X0, (BX)
+ MOVOU X0, 16(BX)
+ MOVOU X0, 32(BX)
+ MOVOU X0, 48(BX)
+ MOVOU X0, 64(BX)
+ MOVOU X0, 80(BX)
+ MOVOU X0, 96(BX)
+ MOVOU X0, 112(BX)
+ ADDQ $0x80, BX
+ DECQ DX
JNZ zero_loop_encodeBetterBlockAsm4MB
MOVL $0x00000000, 12(SP)
- MOVQ src_len+32(FP), CX
- LEAQ -6(CX), DX
- LEAQ -8(CX), BX
- MOVL BX, 8(SP)
- SHRQ $0x05, CX
- SUBL CX, DX
- LEAQ (AX)(DX*1), DX
- MOVQ DX, (SP)
- MOVL $0x00000001, CX
+ MOVQ src_len+32(FP), DX
+ LEAQ -6(DX), BX
+ LEAQ -8(DX), SI
+ MOVL SI, 8(SP)
+ SHRQ $0x05, DX
+ SUBL DX, BX
+ LEAQ (CX)(BX*1), BX
+ MOVQ BX, (SP)
+ MOVL $0x00000001, DX
MOVL $0x00000000, 16(SP)
- MOVQ src_base+24(FP), DX
+ MOVQ src_base+24(FP), BX
search_loop_encodeBetterBlockAsm4MB:
- MOVL CX, BX
- SUBL 12(SP), BX
- SHRL $0x07, BX
- CMPL BX, $0x63
+ MOVL DX, SI
+ SUBL 12(SP), SI
+ SHRL $0x07, SI
+ CMPL SI, $0x63
JBE check_maxskip_ok_encodeBetterBlockAsm4MB
- LEAL 100(CX), BX
+ LEAL 100(DX), SI
JMP check_maxskip_cont_encodeBetterBlockAsm4MB
check_maxskip_ok_encodeBetterBlockAsm4MB:
- LEAL 1(CX)(BX*1), BX
+ LEAL 1(DX)(SI*1), SI
check_maxskip_cont_encodeBetterBlockAsm4MB:
- CMPL BX, 8(SP)
+ CMPL SI, 8(SP)
JAE emit_remainder_encodeBetterBlockAsm4MB
- MOVQ (DX)(CX*1), SI
- MOVL BX, 20(SP)
- MOVQ $0x00cf1bbcdcbfa563, R8
- MOVQ $0x9e3779b1, BX
- MOVQ SI, R9
- MOVQ SI, R10
- SHLQ $0x08, R9
- IMULQ R8, R9
- SHRQ $0x2f, R9
- SHLQ $0x20, R10
- IMULQ BX, R10
- SHRQ $0x32, R10
- MOVL 24(SP)(R9*4), BX
- MOVL 524312(SP)(R10*4), DI
- MOVL CX, 24(SP)(R9*4)
- MOVL CX, 524312(SP)(R10*4)
- MOVQ (DX)(BX*1), R9
- MOVQ (DX)(DI*1), R10
- CMPQ R9, SI
+ MOVQ (BX)(DX*1), DI
+ MOVL SI, 20(SP)
+ MOVQ $0x00cf1bbcdcbfa563, R9
+ MOVQ $0x9e3779b1, SI
+ MOVQ DI, R10
+ MOVQ DI, R11
+ SHLQ $0x08, R10
+ IMULQ R9, R10
+ SHRQ $0x2f, R10
+ SHLQ $0x20, R11
+ IMULQ SI, R11
+ SHRQ $0x32, R11
+ MOVL (AX)(R10*4), SI
+ MOVL 524288(AX)(R11*4), R8
+ MOVL DX, (AX)(R10*4)
+ MOVL DX, 524288(AX)(R11*4)
+ MOVQ (BX)(SI*1), R10
+ MOVQ (BX)(R8*1), R11
+ CMPQ R10, DI
JEQ candidate_match_encodeBetterBlockAsm4MB
- CMPQ R10, SI
+ CMPQ R11, DI
JNE no_short_found_encodeBetterBlockAsm4MB
- MOVL DI, BX
+ MOVL R8, SI
JMP candidate_match_encodeBetterBlockAsm4MB
no_short_found_encodeBetterBlockAsm4MB:
- CMPL R9, SI
+ CMPL R10, DI
JEQ candidate_match_encodeBetterBlockAsm4MB
- CMPL R10, SI
+ CMPL R11, DI
JEQ candidateS_match_encodeBetterBlockAsm4MB
- MOVL 20(SP), CX
+ MOVL 20(SP), DX
JMP search_loop_encodeBetterBlockAsm4MB
candidateS_match_encodeBetterBlockAsm4MB:
- SHRQ $0x08, SI
- MOVQ SI, R9
- SHLQ $0x08, R9
- IMULQ R8, R9
- SHRQ $0x2f, R9
- MOVL 24(SP)(R9*4), BX
- INCL CX
- MOVL CX, 24(SP)(R9*4)
- CMPL (DX)(BX*1), SI
+ SHRQ $0x08, DI
+ MOVQ DI, R10
+ SHLQ $0x08, R10
+ IMULQ R9, R10
+ SHRQ $0x2f, R10
+ MOVL (AX)(R10*4), SI
+ INCL DX
+ MOVL DX, (AX)(R10*4)
+ CMPL (BX)(SI*1), DI
JEQ candidate_match_encodeBetterBlockAsm4MB
- DECL CX
- MOVL DI, BX
+ DECL DX
+ MOVL R8, SI
candidate_match_encodeBetterBlockAsm4MB:
- MOVL 12(SP), SI
- TESTL BX, BX
+ MOVL 12(SP), DI
+ TESTL SI, SI
JZ match_extend_back_end_encodeBetterBlockAsm4MB
match_extend_back_loop_encodeBetterBlockAsm4MB:
- CMPL CX, SI
+ CMPL DX, DI
JBE match_extend_back_end_encodeBetterBlockAsm4MB
- MOVB -1(DX)(BX*1), DI
- MOVB -1(DX)(CX*1), R8
- CMPB DI, R8
+ MOVB -1(BX)(SI*1), R8
+ MOVB -1(BX)(DX*1), R9
+ CMPB R8, R9
JNE match_extend_back_end_encodeBetterBlockAsm4MB
- LEAL -1(CX), CX
- DECL BX
+ LEAL -1(DX), DX
+ DECL SI
JZ match_extend_back_end_encodeBetterBlockAsm4MB
JMP match_extend_back_loop_encodeBetterBlockAsm4MB
match_extend_back_end_encodeBetterBlockAsm4MB:
- MOVL CX, SI
- SUBL 12(SP), SI
- LEAQ 4(AX)(SI*1), SI
- CMPQ SI, (SP)
+ MOVL DX, DI
+ SUBL 12(SP), DI
+ LEAQ 4(CX)(DI*1), DI
+ CMPQ DI, (SP)
JB match_dst_size_check_encodeBetterBlockAsm4MB
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_dst_size_check_encodeBetterBlockAsm4MB:
- MOVL CX, SI
- ADDL $0x04, CX
- ADDL $0x04, BX
- MOVQ src_len+32(FP), DI
- SUBL CX, DI
- LEAQ (DX)(CX*1), R8
- LEAQ (DX)(BX*1), R9
+ MOVL DX, DI
+ ADDL $0x04, DX
+ ADDL $0x04, SI
+ MOVQ src_len+32(FP), R8
+ SUBL DX, R8
+ LEAQ (BX)(DX*1), R9
+ LEAQ (BX)(SI*1), R10
// matchLen
- XORL R11, R11
+ XORL R12, R12
matchlen_loopback_16_match_nolit_encodeBetterBlockAsm4MB:
- CMPL DI, $0x10
+ CMPL R8, $0x10
JB matchlen_match8_match_nolit_encodeBetterBlockAsm4MB
- MOVQ (R8)(R11*1), R10
- MOVQ 8(R8)(R11*1), R12
- XORQ (R9)(R11*1), R10
+ MOVQ (R9)(R12*1), R11
+ MOVQ 8(R9)(R12*1), R13
+ XORQ (R10)(R12*1), R11
JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm4MB
- XORQ 8(R9)(R11*1), R12
+ XORQ 8(R10)(R12*1), R13
JNZ matchlen_bsf_16match_nolit_encodeBetterBlockAsm4MB
- LEAL -16(DI), DI
- LEAL 16(R11), R11
+ LEAL -16(R8), R8
+ LEAL 16(R12), R12
JMP matchlen_loopback_16_match_nolit_encodeBetterBlockAsm4MB
matchlen_bsf_16match_nolit_encodeBetterBlockAsm4MB:
#ifdef GOAMD64_v3
- TZCNTQ R12, R12
+ TZCNTQ R13, R13
#else
- BSFQ R12, R12
+ BSFQ R13, R13
#endif
- SARQ $0x03, R12
- LEAL 8(R11)(R12*1), R11
+ SARQ $0x03, R13
+ LEAL 8(R12)(R13*1), R12
JMP match_nolit_end_encodeBetterBlockAsm4MB
matchlen_match8_match_nolit_encodeBetterBlockAsm4MB:
- CMPL DI, $0x08
+ CMPL R8, $0x08
JB matchlen_match4_match_nolit_encodeBetterBlockAsm4MB
- MOVQ (R8)(R11*1), R10
- XORQ (R9)(R11*1), R10
+ MOVQ (R9)(R12*1), R11
+ XORQ (R10)(R12*1), R11
JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm4MB
- LEAL -8(DI), DI
- LEAL 8(R11), R11
+ LEAL -8(R8), R8
+ LEAL 8(R12), R12
JMP matchlen_match4_match_nolit_encodeBetterBlockAsm4MB
matchlen_bsf_8_match_nolit_encodeBetterBlockAsm4MB:
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R11, R11
#else
- BSFQ R10, R10
+ BSFQ R11, R11
#endif
- SARQ $0x03, R10
- LEAL (R11)(R10*1), R11
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
JMP match_nolit_end_encodeBetterBlockAsm4MB
matchlen_match4_match_nolit_encodeBetterBlockAsm4MB:
- CMPL DI, $0x04
+ CMPL R8, $0x04
JB matchlen_match2_match_nolit_encodeBetterBlockAsm4MB
- MOVL (R8)(R11*1), R10
- CMPL (R9)(R11*1), R10
+ MOVL (R9)(R12*1), R11
+ CMPL (R10)(R12*1), R11
JNE matchlen_match2_match_nolit_encodeBetterBlockAsm4MB
- LEAL -4(DI), DI
- LEAL 4(R11), R11
+ LEAL -4(R8), R8
+ LEAL 4(R12), R12
matchlen_match2_match_nolit_encodeBetterBlockAsm4MB:
- CMPL DI, $0x01
+ CMPL R8, $0x01
JE matchlen_match1_match_nolit_encodeBetterBlockAsm4MB
JB match_nolit_end_encodeBetterBlockAsm4MB
- MOVW (R8)(R11*1), R10
- CMPW (R9)(R11*1), R10
+ MOVW (R9)(R12*1), R11
+ CMPW (R10)(R12*1), R11
JNE matchlen_match1_match_nolit_encodeBetterBlockAsm4MB
- LEAL 2(R11), R11
- SUBL $0x02, DI
+ LEAL 2(R12), R12
+ SUBL $0x02, R8
JZ match_nolit_end_encodeBetterBlockAsm4MB
matchlen_match1_match_nolit_encodeBetterBlockAsm4MB:
- MOVB (R8)(R11*1), R10
- CMPB (R9)(R11*1), R10
+ MOVB (R9)(R12*1), R11
+ CMPB (R10)(R12*1), R11
JNE match_nolit_end_encodeBetterBlockAsm4MB
- LEAL 1(R11), R11
+ LEAL 1(R12), R12
match_nolit_end_encodeBetterBlockAsm4MB:
- MOVL CX, DI
- SUBL BX, DI
+ MOVL DX, R8
+ SUBL SI, R8
// Check if repeat
- CMPL 16(SP), DI
+ CMPL 16(SP), R8
JEQ match_is_repeat_encodeBetterBlockAsm4MB
- CMPL R11, $0x01
+ CMPL R12, $0x01
JA match_length_ok_encodeBetterBlockAsm4MB
- CMPL DI, $0x0000ffff
+ CMPL R8, $0x0000ffff
JBE match_length_ok_encodeBetterBlockAsm4MB
- MOVL 20(SP), CX
- INCL CX
+ MOVL 20(SP), DX
+ INCL DX
JMP search_loop_encodeBetterBlockAsm4MB
match_length_ok_encodeBetterBlockAsm4MB:
- MOVL DI, 16(SP)
- MOVL 12(SP), BX
- CMPL BX, SI
+ MOVL R8, 16(SP)
+ MOVL 12(SP), SI
+ CMPL SI, DI
JEQ emit_literal_done_match_emit_encodeBetterBlockAsm4MB
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(BX*1), R9
- SUBL BX, R8
- LEAL -1(R8), BX
- CMPL BX, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(SI*1), R10
+ SUBL SI, R9
+ LEAL -1(R9), SI
+ CMPL SI, $0x3c
JB one_byte_match_emit_encodeBetterBlockAsm4MB
- CMPL BX, $0x00000100
+ CMPL SI, $0x00000100
JB two_bytes_match_emit_encodeBetterBlockAsm4MB
- CMPL BX, $0x00010000
+ CMPL SI, $0x00010000
JB three_bytes_match_emit_encodeBetterBlockAsm4MB
- MOVL BX, R10
- SHRL $0x10, R10
- MOVB $0xf8, (AX)
- MOVW BX, 1(AX)
- MOVB R10, 3(AX)
- ADDQ $0x04, AX
+ MOVL SI, R11
+ SHRL $0x10, R11
+ MOVB $0xf8, (CX)
+ MOVW SI, 1(CX)
+ MOVB R11, 3(CX)
+ ADDQ $0x04, CX
JMP memmove_long_match_emit_encodeBetterBlockAsm4MB
three_bytes_match_emit_encodeBetterBlockAsm4MB:
- MOVB $0xf4, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_match_emit_encodeBetterBlockAsm4MB
two_bytes_match_emit_encodeBetterBlockAsm4MB:
- MOVB $0xf0, (AX)
- MOVB BL, 1(AX)
- ADDQ $0x02, AX
- CMPL BX, $0x40
+ MOVB $0xf0, (CX)
+ MOVB SI, 1(CX)
+ ADDQ $0x02, CX
+ CMPL SI, $0x40
JB memmove_match_emit_encodeBetterBlockAsm4MB
JMP memmove_long_match_emit_encodeBetterBlockAsm4MB
one_byte_match_emit_encodeBetterBlockAsm4MB:
- SHLB $0x02, BL
- MOVB BL, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, SI
+ MOVB SI, (CX)
+ ADDQ $0x01, CX
memmove_match_emit_encodeBetterBlockAsm4MB:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveShort
- CMPQ R8, $0x04
+ CMPQ R9, $0x04
JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_4
- CMPQ R8, $0x08
+ CMPQ R9, $0x08
JB emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_4through7
- CMPQ R8, $0x10
+ CMPQ R9, $0x10
JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ R9, $0x20
JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_33through64
emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_4:
- MOVL (R9), R10
- MOVL R10, (AX)
+ MOVL (R10), R11
+ MOVL R11, (CX)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm4MB
emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_4through7:
- MOVL (R9), R10
- MOVL -4(R9)(R8*1), R9
- MOVL R10, (AX)
- MOVL R9, -4(AX)(R8*1)
+ MOVL (R10), R11
+ MOVL -4(R10)(R9*1), R10
+ MOVL R11, (CX)
+ MOVL R10, -4(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm4MB
emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_8through16:
- MOVQ (R9), R10
- MOVQ -8(R9)(R8*1), R9
- MOVQ R10, (AX)
- MOVQ R9, -8(AX)(R8*1)
+ MOVQ (R10), R11
+ MOVQ -8(R10)(R9*1), R10
+ MOVQ R11, (CX)
+ MOVQ R10, -8(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm4MB
emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_17through32:
- MOVOU (R9), X0
- MOVOU -16(R9)(R8*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU -16(R10)(R9*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm4MB
emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_33through64:
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
memmove_end_copy_match_emit_encodeBetterBlockAsm4MB:
- MOVQ BX, AX
+ MOVQ SI, CX
JMP emit_literal_done_match_emit_encodeBetterBlockAsm4MB
memmove_long_match_emit_encodeBetterBlockAsm4MB:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveLong
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVQ R8, R12
- SHRQ $0x05, R12
- MOVQ AX, R10
- ANDL $0x0000001f, R10
- MOVQ $0x00000040, R13
- SUBQ R10, R13
- DECQ R12
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVQ R9, R13
+ SHRQ $0x05, R13
+ MOVQ CX, R11
+ ANDL $0x0000001f, R11
+ MOVQ $0x00000040, R14
+ SUBQ R11, R14
+ DECQ R13
JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32
- LEAQ -32(R9)(R13*1), R10
- LEAQ -32(AX)(R13*1), R14
+ LEAQ -32(R10)(R14*1), R11
+ LEAQ -32(CX)(R14*1), R15
emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_big_loop_back:
- MOVOU (R10), X4
- MOVOU 16(R10), X5
- MOVOA X4, (R14)
- MOVOA X5, 16(R14)
+ MOVOU (R11), X4
+ MOVOU 16(R11), X5
+ MOVOA X4, (R15)
+ MOVOA X5, 16(R15)
+ ADDQ $0x20, R15
+ ADDQ $0x20, R11
ADDQ $0x20, R14
- ADDQ $0x20, R10
- ADDQ $0x20, R13
- DECQ R12
+ DECQ R13
JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_big_loop_back
emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32:
- MOVOU -32(R9)(R13*1), X4
- MOVOU -16(R9)(R13*1), X5
- MOVOA X4, -32(AX)(R13*1)
- MOVOA X5, -16(AX)(R13*1)
- ADDQ $0x20, R13
- CMPQ R8, R13
+ MOVOU -32(R10)(R14*1), X4
+ MOVOU -16(R10)(R14*1), X5
+ MOVOA X4, -32(CX)(R14*1)
+ MOVOA X5, -16(CX)(R14*1)
+ ADDQ $0x20, R14
+ CMPQ R9, R14
JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ BX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
+ MOVQ SI, CX
emit_literal_done_match_emit_encodeBetterBlockAsm4MB:
- ADDL R11, CX
- ADDL $0x04, R11
- MOVL CX, 12(SP)
+ ADDL R12, DX
+ ADDL $0x04, R12
+ MOVL DX, 12(SP)
// emitCopy
- CMPL DI, $0x00010000
+ CMPL R8, $0x00010000
JB two_byte_offset_match_nolit_encodeBetterBlockAsm4MB
- CMPL R11, $0x40
+ CMPL R12, $0x40
JBE four_bytes_remain_match_nolit_encodeBetterBlockAsm4MB
- MOVB $0xff, (AX)
- MOVL DI, 1(AX)
- LEAL -64(R11), R11
- ADDQ $0x05, AX
- CMPL R11, $0x04
+ MOVB $0xff, (CX)
+ MOVL R8, 1(CX)
+ LEAL -64(R12), R12
+ ADDQ $0x05, CX
+ CMPL R12, $0x04
JB four_bytes_remain_match_nolit_encodeBetterBlockAsm4MB
// emitRepeat
- MOVL R11, BX
- LEAL -4(R11), R11
- CMPL BX, $0x08
+ MOVL R12, SI
+ LEAL -4(R12), R12
+ CMPL SI, $0x08
JBE repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy
- CMPL BX, $0x0c
+ CMPL SI, $0x0c
JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy
- CMPL DI, $0x00000800
+ CMPL R8, $0x00000800
JB repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy
cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy:
- CMPL R11, $0x00000104
+ CMPL R12, $0x00000104
JB repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy
- CMPL R11, $0x00010100
+ CMPL R12, $0x00010100
JB repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy
- LEAL -65536(R11), R11
- MOVL R11, DI
- MOVW $0x001d, (AX)
- MOVW R11, 2(AX)
- SARL $0x10, DI
- MOVB DI, 4(AX)
- ADDQ $0x05, AX
+ LEAL -65536(R12), R12
+ MOVL R12, R8
+ MOVW $0x001d, (CX)
+ MOVW R12, 2(CX)
+ SARL $0x10, R8
+ MOVB R8, 4(CX)
+ ADDQ $0x05, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy:
- LEAL -256(R11), R11
- MOVW $0x0019, (AX)
- MOVW R11, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(R12), R12
+ MOVW $0x0019, (CX)
+ MOVW R12, 2(CX)
+ ADDQ $0x04, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy:
- LEAL -4(R11), R11
- MOVW $0x0015, (AX)
- MOVB R11, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(R12), R12
+ MOVW $0x0015, (CX)
+ MOVB R12, 2(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy:
- SHLL $0x02, R11
- ORL $0x01, R11
- MOVW R11, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, R12
+ ORL $0x01, R12
+ MOVW R12, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy:
- XORQ BX, BX
- LEAL 1(BX)(R11*4), R11
- MOVB DI, 1(AX)
- SARL $0x08, DI
- SHLL $0x05, DI
- ORL DI, R11
- MOVB R11, (AX)
- ADDQ $0x02, AX
+ XORQ SI, SI
+ LEAL 1(SI)(R12*4), R12
+ MOVB R8, 1(CX)
+ SARL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, R12
+ MOVB R12, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
four_bytes_remain_match_nolit_encodeBetterBlockAsm4MB:
- TESTL R11, R11
+ TESTL R12, R12
JZ match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
- XORL BX, BX
- LEAL -1(BX)(R11*4), R11
- MOVB R11, (AX)
- MOVL DI, 1(AX)
- ADDQ $0x05, AX
+ XORL SI, SI
+ LEAL -1(SI)(R12*4), R12
+ MOVB R12, (CX)
+ MOVL R8, 1(CX)
+ ADDQ $0x05, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
two_byte_offset_match_nolit_encodeBetterBlockAsm4MB:
- CMPL R11, $0x40
+ CMPL R12, $0x40
JBE two_byte_offset_short_match_nolit_encodeBetterBlockAsm4MB
- CMPL DI, $0x00000800
+ CMPL R8, $0x00000800
JAE long_offset_short_match_nolit_encodeBetterBlockAsm4MB
- MOVL $0x00000001, BX
- LEAL 16(BX), BX
- MOVB DI, 1(AX)
- SHRL $0x08, DI
- SHLL $0x05, DI
- ORL DI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
- SUBL $0x08, R11
+ MOVL $0x00000001, SI
+ LEAL 16(SI), SI
+ MOVB R8, 1(CX)
+ SHRL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
+ SUBL $0x08, R12
// emitRepeat
- LEAL -4(R11), R11
+ LEAL -4(R12), R12
JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b
- MOVL R11, BX
- LEAL -4(R11), R11
- CMPL BX, $0x08
+ MOVL R12, SI
+ LEAL -4(R12), R12
+ CMPL SI, $0x08
JBE repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b
- CMPL BX, $0x0c
+ CMPL SI, $0x0c
JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b
- CMPL DI, $0x00000800
+ CMPL R8, $0x00000800
JB repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b
cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b:
- CMPL R11, $0x00000104
+ CMPL R12, $0x00000104
JB repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b
- CMPL R11, $0x00010100
+ CMPL R12, $0x00010100
JB repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b
- LEAL -65536(R11), R11
- MOVL R11, DI
- MOVW $0x001d, (AX)
- MOVW R11, 2(AX)
- SARL $0x10, DI
- MOVB DI, 4(AX)
- ADDQ $0x05, AX
+ LEAL -65536(R12), R12
+ MOVL R12, R8
+ MOVW $0x001d, (CX)
+ MOVW R12, 2(CX)
+ SARL $0x10, R8
+ MOVB R8, 4(CX)
+ ADDQ $0x05, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b:
- LEAL -256(R11), R11
- MOVW $0x0019, (AX)
- MOVW R11, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(R12), R12
+ MOVW $0x0019, (CX)
+ MOVW R12, 2(CX)
+ ADDQ $0x04, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b:
- LEAL -4(R11), R11
- MOVW $0x0015, (AX)
- MOVB R11, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(R12), R12
+ MOVW $0x0015, (CX)
+ MOVB R12, 2(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b:
- SHLL $0x02, R11
- ORL $0x01, R11
- MOVW R11, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, R12
+ ORL $0x01, R12
+ MOVW R12, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b:
- XORQ BX, BX
- LEAL 1(BX)(R11*4), R11
- MOVB DI, 1(AX)
- SARL $0x08, DI
- SHLL $0x05, DI
- ORL DI, R11
- MOVB R11, (AX)
- ADDQ $0x02, AX
+ XORQ SI, SI
+ LEAL 1(SI)(R12*4), R12
+ MOVB R8, 1(CX)
+ SARL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, R12
+ MOVB R12, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
long_offset_short_match_nolit_encodeBetterBlockAsm4MB:
- MOVB $0xee, (AX)
- MOVW DI, 1(AX)
- LEAL -60(R11), R11
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW R8, 1(CX)
+ LEAL -60(R12), R12
+ ADDQ $0x03, CX
// emitRepeat
- MOVL R11, BX
- LEAL -4(R11), R11
- CMPL BX, $0x08
+ MOVL R12, SI
+ LEAL -4(R12), R12
+ CMPL SI, $0x08
JBE repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short
- CMPL BX, $0x0c
+ CMPL SI, $0x0c
JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short
- CMPL DI, $0x00000800
+ CMPL R8, $0x00000800
JB repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short
cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short:
- CMPL R11, $0x00000104
+ CMPL R12, $0x00000104
JB repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short
- CMPL R11, $0x00010100
+ CMPL R12, $0x00010100
JB repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short
- LEAL -65536(R11), R11
- MOVL R11, DI
- MOVW $0x001d, (AX)
- MOVW R11, 2(AX)
- SARL $0x10, DI
- MOVB DI, 4(AX)
- ADDQ $0x05, AX
+ LEAL -65536(R12), R12
+ MOVL R12, R8
+ MOVW $0x001d, (CX)
+ MOVW R12, 2(CX)
+ SARL $0x10, R8
+ MOVB R8, 4(CX)
+ ADDQ $0x05, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short:
- LEAL -256(R11), R11
- MOVW $0x0019, (AX)
- MOVW R11, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(R12), R12
+ MOVW $0x0019, (CX)
+ MOVW R12, 2(CX)
+ ADDQ $0x04, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short:
- LEAL -4(R11), R11
- MOVW $0x0015, (AX)
- MOVB R11, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(R12), R12
+ MOVW $0x0015, (CX)
+ MOVB R12, 2(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short:
- SHLL $0x02, R11
- ORL $0x01, R11
- MOVW R11, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, R12
+ ORL $0x01, R12
+ MOVW R12, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short:
- XORQ BX, BX
- LEAL 1(BX)(R11*4), R11
- MOVB DI, 1(AX)
- SARL $0x08, DI
- SHLL $0x05, DI
- ORL DI, R11
- MOVB R11, (AX)
- ADDQ $0x02, AX
+ XORQ SI, SI
+ LEAL 1(SI)(R12*4), R12
+ MOVB R8, 1(CX)
+ SARL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, R12
+ MOVB R12, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
two_byte_offset_short_match_nolit_encodeBetterBlockAsm4MB:
- MOVL R11, BX
- SHLL $0x02, BX
- CMPL R11, $0x0c
+ MOVL R12, SI
+ SHLL $0x02, SI
+ CMPL R12, $0x0c
JAE emit_copy_three_match_nolit_encodeBetterBlockAsm4MB
- CMPL DI, $0x00000800
+ CMPL R8, $0x00000800
JAE emit_copy_three_match_nolit_encodeBetterBlockAsm4MB
- LEAL -15(BX), BX
- MOVB DI, 1(AX)
- SHRL $0x08, DI
- SHLL $0x05, DI
- ORL DI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
+ LEAL -15(SI), SI
+ MOVB R8, 1(CX)
+ SHRL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
emit_copy_three_match_nolit_encodeBetterBlockAsm4MB:
- LEAL -2(BX), BX
- MOVB BL, (AX)
- MOVW DI, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(SI), SI
+ MOVB SI, (CX)
+ MOVW R8, 1(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
match_is_repeat_encodeBetterBlockAsm4MB:
- MOVL 12(SP), BX
- CMPL BX, SI
+ MOVL 12(SP), SI
+ CMPL SI, DI
JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm4MB
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(BX*1), R9
- SUBL BX, R8
- LEAL -1(R8), BX
- CMPL BX, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(SI*1), R10
+ SUBL SI, R9
+ LEAL -1(R9), SI
+ CMPL SI, $0x3c
JB one_byte_match_emit_repeat_encodeBetterBlockAsm4MB
- CMPL BX, $0x00000100
+ CMPL SI, $0x00000100
JB two_bytes_match_emit_repeat_encodeBetterBlockAsm4MB
- CMPL BX, $0x00010000
+ CMPL SI, $0x00010000
JB three_bytes_match_emit_repeat_encodeBetterBlockAsm4MB
- MOVL BX, R10
- SHRL $0x10, R10
- MOVB $0xf8, (AX)
- MOVW BX, 1(AX)
- MOVB R10, 3(AX)
- ADDQ $0x04, AX
+ MOVL SI, R11
+ SHRL $0x10, R11
+ MOVB $0xf8, (CX)
+ MOVW SI, 1(CX)
+ MOVB R11, 3(CX)
+ ADDQ $0x04, CX
JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm4MB
three_bytes_match_emit_repeat_encodeBetterBlockAsm4MB:
- MOVB $0xf4, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm4MB
two_bytes_match_emit_repeat_encodeBetterBlockAsm4MB:
- MOVB $0xf0, (AX)
- MOVB BL, 1(AX)
- ADDQ $0x02, AX
- CMPL BX, $0x40
+ MOVB $0xf0, (CX)
+ MOVB SI, 1(CX)
+ ADDQ $0x02, CX
+ CMPL SI, $0x40
JB memmove_match_emit_repeat_encodeBetterBlockAsm4MB
JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm4MB
one_byte_match_emit_repeat_encodeBetterBlockAsm4MB:
- SHLB $0x02, BL
- MOVB BL, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, SI
+ MOVB SI, (CX)
+ ADDQ $0x01, CX
memmove_match_emit_repeat_encodeBetterBlockAsm4MB:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveShort
- CMPQ R8, $0x04
+ CMPQ R9, $0x04
JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_4
- CMPQ R8, $0x08
+ CMPQ R9, $0x08
JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_4through7
- CMPQ R8, $0x10
+ CMPQ R9, $0x10
JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ R9, $0x20
JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_17through32
JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_33through64
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_4:
- MOVL (R9), R10
- MOVL R10, (AX)
+ MOVL (R10), R11
+ MOVL R11, (CX)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_4through7:
- MOVL (R9), R10
- MOVL -4(R9)(R8*1), R9
- MOVL R10, (AX)
- MOVL R9, -4(AX)(R8*1)
+ MOVL (R10), R11
+ MOVL -4(R10)(R9*1), R10
+ MOVL R11, (CX)
+ MOVL R10, -4(CX)(R9*1)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_8through16:
- MOVQ (R9), R10
- MOVQ -8(R9)(R8*1), R9
- MOVQ R10, (AX)
- MOVQ R9, -8(AX)(R8*1)
+ MOVQ (R10), R11
+ MOVQ -8(R10)(R9*1), R10
+ MOVQ R11, (CX)
+ MOVQ R10, -8(CX)(R9*1)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_17through32:
- MOVOU (R9), X0
- MOVOU -16(R9)(R8*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU -16(R10)(R9*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R9*1)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_33through64:
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB:
- MOVQ BX, AX
+ MOVQ SI, CX
JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm4MB
memmove_long_match_emit_repeat_encodeBetterBlockAsm4MB:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveLong
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVQ R8, R12
- SHRQ $0x05, R12
- MOVQ AX, R10
- ANDL $0x0000001f, R10
- MOVQ $0x00000040, R13
- SUBQ R10, R13
- DECQ R12
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVQ R9, R13
+ SHRQ $0x05, R13
+ MOVQ CX, R11
+ ANDL $0x0000001f, R11
+ MOVQ $0x00000040, R14
+ SUBQ R11, R14
+ DECQ R13
JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32
- LEAQ -32(R9)(R13*1), R10
- LEAQ -32(AX)(R13*1), R14
+ LEAQ -32(R10)(R14*1), R11
+ LEAQ -32(CX)(R14*1), R15
emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_big_loop_back:
- MOVOU (R10), X4
- MOVOU 16(R10), X5
- MOVOA X4, (R14)
- MOVOA X5, 16(R14)
+ MOVOU (R11), X4
+ MOVOU 16(R11), X5
+ MOVOA X4, (R15)
+ MOVOA X5, 16(R15)
+ ADDQ $0x20, R15
+ ADDQ $0x20, R11
ADDQ $0x20, R14
- ADDQ $0x20, R10
- ADDQ $0x20, R13
- DECQ R12
+ DECQ R13
JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_big_loop_back
emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32:
- MOVOU -32(R9)(R13*1), X4
- MOVOU -16(R9)(R13*1), X5
- MOVOA X4, -32(AX)(R13*1)
- MOVOA X5, -16(AX)(R13*1)
- ADDQ $0x20, R13
- CMPQ R8, R13
+ MOVOU -32(R10)(R14*1), X4
+ MOVOU -16(R10)(R14*1), X5
+ MOVOA X4, -32(CX)(R14*1)
+ MOVOA X5, -16(CX)(R14*1)
+ ADDQ $0x20, R14
+ CMPQ R9, R14
JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ BX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
+ MOVQ SI, CX
emit_literal_done_match_emit_repeat_encodeBetterBlockAsm4MB:
- ADDL R11, CX
- ADDL $0x04, R11
- MOVL CX, 12(SP)
+ ADDL R12, DX
+ ADDL $0x04, R12
+ MOVL DX, 12(SP)
// emitRepeat
- MOVL R11, BX
- LEAL -4(R11), R11
- CMPL BX, $0x08
+ MOVL R12, SI
+ LEAL -4(R12), R12
+ CMPL SI, $0x08
JBE repeat_two_match_nolit_repeat_encodeBetterBlockAsm4MB
- CMPL BX, $0x0c
+ CMPL SI, $0x0c
JAE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm4MB
- CMPL DI, $0x00000800
+ CMPL R8, $0x00000800
JB repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm4MB
cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm4MB:
- CMPL R11, $0x00000104
+ CMPL R12, $0x00000104
JB repeat_three_match_nolit_repeat_encodeBetterBlockAsm4MB
- CMPL R11, $0x00010100
+ CMPL R12, $0x00010100
JB repeat_four_match_nolit_repeat_encodeBetterBlockAsm4MB
- LEAL -65536(R11), R11
- MOVL R11, DI
- MOVW $0x001d, (AX)
- MOVW R11, 2(AX)
- SARL $0x10, DI
- MOVB DI, 4(AX)
- ADDQ $0x05, AX
+ LEAL -65536(R12), R12
+ MOVL R12, R8
+ MOVW $0x001d, (CX)
+ MOVW R12, 2(CX)
+ SARL $0x10, R8
+ MOVB R8, 4(CX)
+ ADDQ $0x05, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
repeat_four_match_nolit_repeat_encodeBetterBlockAsm4MB:
- LEAL -256(R11), R11
- MOVW $0x0019, (AX)
- MOVW R11, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(R12), R12
+ MOVW $0x0019, (CX)
+ MOVW R12, 2(CX)
+ ADDQ $0x04, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
repeat_three_match_nolit_repeat_encodeBetterBlockAsm4MB:
- LEAL -4(R11), R11
- MOVW $0x0015, (AX)
- MOVB R11, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(R12), R12
+ MOVW $0x0015, (CX)
+ MOVB R12, 2(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
repeat_two_match_nolit_repeat_encodeBetterBlockAsm4MB:
- SHLL $0x02, R11
- ORL $0x01, R11
- MOVW R11, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, R12
+ ORL $0x01, R12
+ MOVW R12, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm4MB:
- XORQ BX, BX
- LEAL 1(BX)(R11*4), R11
- MOVB DI, 1(AX)
- SARL $0x08, DI
- SHLL $0x05, DI
- ORL DI, R11
- MOVB R11, (AX)
- ADDQ $0x02, AX
+ XORQ SI, SI
+ LEAL 1(SI)(R12*4), R12
+ MOVB R8, 1(CX)
+ SARL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, R12
+ MOVB R12, (CX)
+ ADDQ $0x02, CX
match_nolit_emitcopy_end_encodeBetterBlockAsm4MB:
- CMPL CX, 8(SP)
+ CMPL DX, 8(SP)
JAE emit_remainder_encodeBetterBlockAsm4MB
- CMPQ AX, (SP)
+ CMPQ CX, (SP)
JB match_nolit_dst_ok_encodeBetterBlockAsm4MB
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_nolit_dst_ok_encodeBetterBlockAsm4MB:
- MOVQ $0x00cf1bbcdcbfa563, BX
- MOVQ $0x9e3779b1, DI
- LEAQ 1(SI), SI
- LEAQ -2(CX), R8
- MOVQ (DX)(SI*1), R9
- MOVQ 1(DX)(SI*1), R10
- MOVQ (DX)(R8*1), R11
- MOVQ 1(DX)(R8*1), R12
- SHLQ $0x08, R9
- IMULQ BX, R9
- SHRQ $0x2f, R9
- SHLQ $0x20, R10
- IMULQ DI, R10
- SHRQ $0x32, R10
- SHLQ $0x08, R11
- IMULQ BX, R11
- SHRQ $0x2f, R11
- SHLQ $0x20, R12
- IMULQ DI, R12
- SHRQ $0x32, R12
- LEAQ 1(SI), DI
- LEAQ 1(R8), R13
- MOVL SI, 24(SP)(R9*4)
- MOVL R8, 24(SP)(R11*4)
- MOVL DI, 524312(SP)(R10*4)
- MOVL R13, 524312(SP)(R12*4)
- LEAQ 1(R8)(SI*1), DI
- SHRQ $0x01, DI
- ADDQ $0x01, SI
- SUBQ $0x01, R8
+ MOVQ $0x00cf1bbcdcbfa563, SI
+ MOVQ $0x9e3779b1, R8
+ LEAQ 1(DI), DI
+ LEAQ -2(DX), R9
+ MOVQ (BX)(DI*1), R10
+ MOVQ 1(BX)(DI*1), R11
+ MOVQ (BX)(R9*1), R12
+ MOVQ 1(BX)(R9*1), R13
+ SHLQ $0x08, R10
+ IMULQ SI, R10
+ SHRQ $0x2f, R10
+ SHLQ $0x20, R11
+ IMULQ R8, R11
+ SHRQ $0x32, R11
+ SHLQ $0x08, R12
+ IMULQ SI, R12
+ SHRQ $0x2f, R12
+ SHLQ $0x20, R13
+ IMULQ R8, R13
+ SHRQ $0x32, R13
+ LEAQ 1(DI), R8
+ LEAQ 1(R9), R14
+ MOVL DI, (AX)(R10*4)
+ MOVL R9, (AX)(R12*4)
+ MOVL R8, 524288(AX)(R11*4)
+ MOVL R14, 524288(AX)(R13*4)
+ LEAQ 1(R9)(DI*1), R8
+ SHRQ $0x01, R8
+ ADDQ $0x01, DI
+ SUBQ $0x01, R9
index_loop_encodeBetterBlockAsm4MB:
- CMPQ DI, R8
+ CMPQ R8, R9
JAE search_loop_encodeBetterBlockAsm4MB
- MOVQ (DX)(SI*1), R9
- MOVQ (DX)(DI*1), R10
- SHLQ $0x08, R9
- IMULQ BX, R9
- SHRQ $0x2f, R9
+ MOVQ (BX)(DI*1), R10
+ MOVQ (BX)(R8*1), R11
SHLQ $0x08, R10
- IMULQ BX, R10
+ IMULQ SI, R10
SHRQ $0x2f, R10
- MOVL SI, 24(SP)(R9*4)
- MOVL DI, 24(SP)(R10*4)
- ADDQ $0x02, SI
+ SHLQ $0x08, R11
+ IMULQ SI, R11
+ SHRQ $0x2f, R11
+ MOVL DI, (AX)(R10*4)
+ MOVL R8, (AX)(R11*4)
ADDQ $0x02, DI
+ ADDQ $0x02, R8
JMP index_loop_encodeBetterBlockAsm4MB
emit_remainder_encodeBetterBlockAsm4MB:
- MOVQ src_len+32(FP), CX
- SUBL 12(SP), CX
- LEAQ 4(AX)(CX*1), CX
- CMPQ CX, (SP)
+ MOVQ src_len+32(FP), AX
+ SUBL 12(SP), AX
+ LEAQ 4(CX)(AX*1), AX
+ CMPQ AX, (SP)
JB emit_remainder_ok_encodeBetterBlockAsm4MB
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
emit_remainder_ok_encodeBetterBlockAsm4MB:
- MOVQ src_len+32(FP), CX
- MOVL 12(SP), BX
- CMPL BX, CX
+ MOVQ src_len+32(FP), AX
+ MOVL 12(SP), DX
+ CMPL DX, AX
JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm4MB
- MOVL CX, SI
- MOVL CX, 12(SP)
- LEAQ (DX)(BX*1), CX
- SUBL BX, SI
+ MOVL AX, SI
+ MOVL AX, 12(SP)
+ LEAQ (BX)(DX*1), AX
+ SUBL DX, SI
LEAL -1(SI), DX
CMPL DX, $0x3c
JB one_byte_emit_remainder_encodeBetterBlockAsm4MB
@@ -7968,33 +7975,33 @@ emit_remainder_ok_encodeBetterBlockAsm4MB:
JB three_bytes_emit_remainder_encodeBetterBlockAsm4MB
MOVL DX, BX
SHRL $0x10, BX
- MOVB $0xf8, (AX)
- MOVW DX, 1(AX)
- MOVB BL, 3(AX)
- ADDQ $0x04, AX
+ MOVB $0xf8, (CX)
+ MOVW DX, 1(CX)
+ MOVB BL, 3(CX)
+ ADDQ $0x04, CX
JMP memmove_long_emit_remainder_encodeBetterBlockAsm4MB
three_bytes_emit_remainder_encodeBetterBlockAsm4MB:
- MOVB $0xf4, (AX)
- MOVW DX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW DX, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_emit_remainder_encodeBetterBlockAsm4MB
two_bytes_emit_remainder_encodeBetterBlockAsm4MB:
- MOVB $0xf0, (AX)
- MOVB DL, 1(AX)
- ADDQ $0x02, AX
+ MOVB $0xf0, (CX)
+ MOVB DL, 1(CX)
+ ADDQ $0x02, CX
CMPL DX, $0x40
JB memmove_emit_remainder_encodeBetterBlockAsm4MB
JMP memmove_long_emit_remainder_encodeBetterBlockAsm4MB
one_byte_emit_remainder_encodeBetterBlockAsm4MB:
SHLB $0x02, DL
- MOVB DL, (AX)
- ADDQ $0x01, AX
+ MOVB DL, (CX)
+ ADDQ $0x01, CX
memmove_emit_remainder_encodeBetterBlockAsm4MB:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveShort
@@ -8010,73 +8017,73 @@ memmove_emit_remainder_encodeBetterBlockAsm4MB:
JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_33through64
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_1or2:
- MOVB (CX), SI
- MOVB -1(CX)(BX*1), CL
- MOVB SI, (AX)
- MOVB CL, -1(AX)(BX*1)
+ MOVB (AX), SI
+ MOVB -1(AX)(BX*1), AL
+ MOVB SI, (CX)
+ MOVB AL, -1(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_3:
- MOVW (CX), SI
- MOVB 2(CX), CL
- MOVW SI, (AX)
- MOVB CL, 2(AX)
+ MOVW (AX), SI
+ MOVB 2(AX), AL
+ MOVW SI, (CX)
+ MOVB AL, 2(CX)
JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_4through7:
- MOVL (CX), SI
- MOVL -4(CX)(BX*1), CX
- MOVL SI, (AX)
- MOVL CX, -4(AX)(BX*1)
+ MOVL (AX), SI
+ MOVL -4(AX)(BX*1), AX
+ MOVL SI, (CX)
+ MOVL AX, -4(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_8through16:
- MOVQ (CX), SI
- MOVQ -8(CX)(BX*1), CX
- MOVQ SI, (AX)
- MOVQ CX, -8(AX)(BX*1)
+ MOVQ (AX), SI
+ MOVQ -8(AX)(BX*1), AX
+ MOVQ SI, (CX)
+ MOVQ AX, -8(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_17through32:
- MOVOU (CX), X0
- MOVOU -16(CX)(BX*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU -16(AX)(BX*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_33through64:
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB:
- MOVQ DX, AX
+ MOVQ DX, CX
JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm4MB
memmove_long_emit_remainder_encodeBetterBlockAsm4MB:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveLong
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
MOVQ BX, DI
SHRQ $0x05, DI
- MOVQ AX, SI
+ MOVQ CX, SI
ANDL $0x0000001f, SI
MOVQ $0x00000040, R8
SUBQ SI, R8
DECQ DI
JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32
- LEAQ -32(CX)(R8*1), SI
- LEAQ -32(AX)(R8*1), R9
+ LEAQ -32(AX)(R8*1), SI
+ LEAQ -32(CX)(R8*1), R9
emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_big_loop_back:
MOVOU (SI), X4
@@ -8090,756 +8097,757 @@ emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_big_loop_back:
JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_big_loop_back
emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32:
- MOVOU -32(CX)(R8*1), X4
- MOVOU -16(CX)(R8*1), X5
- MOVOA X4, -32(AX)(R8*1)
- MOVOA X5, -16(AX)(R8*1)
+ MOVOU -32(AX)(R8*1), X4
+ MOVOU -16(AX)(R8*1), X5
+ MOVOA X4, -32(CX)(R8*1)
+ MOVOA X5, -16(CX)(R8*1)
ADDQ $0x20, R8
CMPQ BX, R8
JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
- MOVQ DX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
+ MOVQ DX, CX
emit_literal_done_emit_remainder_encodeBetterBlockAsm4MB:
- MOVQ dst_base+0(FP), CX
- SUBQ CX, AX
- MOVQ AX, ret+48(FP)
+ MOVQ dst_base+0(FP), AX
+ SUBQ AX, CX
+ MOVQ CX, ret+56(FP)
RET
-// func encodeBetterBlockAsm12B(dst []byte, src []byte) int
+// func encodeBetterBlockAsm12B(dst []byte, src []byte, tmp *[81920]byte) int
// Requires: BMI, SSE2
-TEXT ·encodeBetterBlockAsm12B(SB), $81944-56
- MOVQ dst_base+0(FP), AX
- MOVQ $0x00000280, CX
- LEAQ 24(SP), DX
+TEXT ·encodeBetterBlockAsm12B(SB), $24-64
+ MOVQ tmp+48(FP), AX
+ MOVQ dst_base+0(FP), CX
+ MOVQ $0x00000280, DX
+ MOVQ AX, BX
PXOR X0, X0
zero_loop_encodeBetterBlockAsm12B:
- MOVOU X0, (DX)
- MOVOU X0, 16(DX)
- MOVOU X0, 32(DX)
- MOVOU X0, 48(DX)
- MOVOU X0, 64(DX)
- MOVOU X0, 80(DX)
- MOVOU X0, 96(DX)
- MOVOU X0, 112(DX)
- ADDQ $0x80, DX
- DECQ CX
+ MOVOU X0, (BX)
+ MOVOU X0, 16(BX)
+ MOVOU X0, 32(BX)
+ MOVOU X0, 48(BX)
+ MOVOU X0, 64(BX)
+ MOVOU X0, 80(BX)
+ MOVOU X0, 96(BX)
+ MOVOU X0, 112(BX)
+ ADDQ $0x80, BX
+ DECQ DX
JNZ zero_loop_encodeBetterBlockAsm12B
MOVL $0x00000000, 12(SP)
- MOVQ src_len+32(FP), CX
- LEAQ -6(CX), DX
- LEAQ -8(CX), BX
- MOVL BX, 8(SP)
- SHRQ $0x05, CX
- SUBL CX, DX
- LEAQ (AX)(DX*1), DX
- MOVQ DX, (SP)
- MOVL $0x00000001, CX
+ MOVQ src_len+32(FP), DX
+ LEAQ -6(DX), BX
+ LEAQ -8(DX), SI
+ MOVL SI, 8(SP)
+ SHRQ $0x05, DX
+ SUBL DX, BX
+ LEAQ (CX)(BX*1), BX
+ MOVQ BX, (SP)
+ MOVL $0x00000001, DX
MOVL $0x00000000, 16(SP)
- MOVQ src_base+24(FP), DX
+ MOVQ src_base+24(FP), BX
search_loop_encodeBetterBlockAsm12B:
- MOVL CX, BX
- SUBL 12(SP), BX
- SHRL $0x06, BX
- LEAL 1(CX)(BX*1), BX
- CMPL BX, 8(SP)
+ MOVL DX, SI
+ SUBL 12(SP), SI
+ SHRL $0x06, SI
+ LEAL 1(DX)(SI*1), SI
+ CMPL SI, 8(SP)
JAE emit_remainder_encodeBetterBlockAsm12B
- MOVQ (DX)(CX*1), SI
- MOVL BX, 20(SP)
- MOVQ $0x0000cf1bbcdcbf9b, R8
- MOVQ $0x9e3779b1, BX
- MOVQ SI, R9
- MOVQ SI, R10
- SHLQ $0x10, R9
- IMULQ R8, R9
- SHRQ $0x32, R9
- SHLQ $0x20, R10
- IMULQ BX, R10
- SHRQ $0x34, R10
- MOVL 24(SP)(R9*4), BX
- MOVL 65560(SP)(R10*4), DI
- MOVL CX, 24(SP)(R9*4)
- MOVL CX, 65560(SP)(R10*4)
- MOVQ (DX)(BX*1), R9
- MOVQ (DX)(DI*1), R10
- CMPQ R9, SI
+ MOVQ (BX)(DX*1), DI
+ MOVL SI, 20(SP)
+ MOVQ $0x0000cf1bbcdcbf9b, R9
+ MOVQ $0x9e3779b1, SI
+ MOVQ DI, R10
+ MOVQ DI, R11
+ SHLQ $0x10, R10
+ IMULQ R9, R10
+ SHRQ $0x32, R10
+ SHLQ $0x20, R11
+ IMULQ SI, R11
+ SHRQ $0x34, R11
+ MOVL (AX)(R10*4), SI
+ MOVL 65536(AX)(R11*4), R8
+ MOVL DX, (AX)(R10*4)
+ MOVL DX, 65536(AX)(R11*4)
+ MOVQ (BX)(SI*1), R10
+ MOVQ (BX)(R8*1), R11
+ CMPQ R10, DI
JEQ candidate_match_encodeBetterBlockAsm12B
- CMPQ R10, SI
+ CMPQ R11, DI
JNE no_short_found_encodeBetterBlockAsm12B
- MOVL DI, BX
+ MOVL R8, SI
JMP candidate_match_encodeBetterBlockAsm12B
no_short_found_encodeBetterBlockAsm12B:
- CMPL R9, SI
+ CMPL R10, DI
JEQ candidate_match_encodeBetterBlockAsm12B
- CMPL R10, SI
+ CMPL R11, DI
JEQ candidateS_match_encodeBetterBlockAsm12B
- MOVL 20(SP), CX
+ MOVL 20(SP), DX
JMP search_loop_encodeBetterBlockAsm12B
candidateS_match_encodeBetterBlockAsm12B:
- SHRQ $0x08, SI
- MOVQ SI, R9
- SHLQ $0x10, R9
- IMULQ R8, R9
- SHRQ $0x32, R9
- MOVL 24(SP)(R9*4), BX
- INCL CX
- MOVL CX, 24(SP)(R9*4)
- CMPL (DX)(BX*1), SI
+ SHRQ $0x08, DI
+ MOVQ DI, R10
+ SHLQ $0x10, R10
+ IMULQ R9, R10
+ SHRQ $0x32, R10
+ MOVL (AX)(R10*4), SI
+ INCL DX
+ MOVL DX, (AX)(R10*4)
+ CMPL (BX)(SI*1), DI
JEQ candidate_match_encodeBetterBlockAsm12B
- DECL CX
- MOVL DI, BX
+ DECL DX
+ MOVL R8, SI
candidate_match_encodeBetterBlockAsm12B:
- MOVL 12(SP), SI
- TESTL BX, BX
+ MOVL 12(SP), DI
+ TESTL SI, SI
JZ match_extend_back_end_encodeBetterBlockAsm12B
match_extend_back_loop_encodeBetterBlockAsm12B:
- CMPL CX, SI
+ CMPL DX, DI
JBE match_extend_back_end_encodeBetterBlockAsm12B
- MOVB -1(DX)(BX*1), DI
- MOVB -1(DX)(CX*1), R8
- CMPB DI, R8
+ MOVB -1(BX)(SI*1), R8
+ MOVB -1(BX)(DX*1), R9
+ CMPB R8, R9
JNE match_extend_back_end_encodeBetterBlockAsm12B
- LEAL -1(CX), CX
- DECL BX
+ LEAL -1(DX), DX
+ DECL SI
JZ match_extend_back_end_encodeBetterBlockAsm12B
JMP match_extend_back_loop_encodeBetterBlockAsm12B
match_extend_back_end_encodeBetterBlockAsm12B:
- MOVL CX, SI
- SUBL 12(SP), SI
- LEAQ 3(AX)(SI*1), SI
- CMPQ SI, (SP)
+ MOVL DX, DI
+ SUBL 12(SP), DI
+ LEAQ 3(CX)(DI*1), DI
+ CMPQ DI, (SP)
JB match_dst_size_check_encodeBetterBlockAsm12B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_dst_size_check_encodeBetterBlockAsm12B:
- MOVL CX, SI
- ADDL $0x04, CX
- ADDL $0x04, BX
- MOVQ src_len+32(FP), DI
- SUBL CX, DI
- LEAQ (DX)(CX*1), R8
- LEAQ (DX)(BX*1), R9
+ MOVL DX, DI
+ ADDL $0x04, DX
+ ADDL $0x04, SI
+ MOVQ src_len+32(FP), R8
+ SUBL DX, R8
+ LEAQ (BX)(DX*1), R9
+ LEAQ (BX)(SI*1), R10
// matchLen
- XORL R11, R11
+ XORL R12, R12
matchlen_loopback_16_match_nolit_encodeBetterBlockAsm12B:
- CMPL DI, $0x10
+ CMPL R8, $0x10
JB matchlen_match8_match_nolit_encodeBetterBlockAsm12B
- MOVQ (R8)(R11*1), R10
- MOVQ 8(R8)(R11*1), R12
- XORQ (R9)(R11*1), R10
+ MOVQ (R9)(R12*1), R11
+ MOVQ 8(R9)(R12*1), R13
+ XORQ (R10)(R12*1), R11
JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm12B
- XORQ 8(R9)(R11*1), R12
+ XORQ 8(R10)(R12*1), R13
JNZ matchlen_bsf_16match_nolit_encodeBetterBlockAsm12B
- LEAL -16(DI), DI
- LEAL 16(R11), R11
+ LEAL -16(R8), R8
+ LEAL 16(R12), R12
JMP matchlen_loopback_16_match_nolit_encodeBetterBlockAsm12B
matchlen_bsf_16match_nolit_encodeBetterBlockAsm12B:
#ifdef GOAMD64_v3
- TZCNTQ R12, R12
+ TZCNTQ R13, R13
#else
- BSFQ R12, R12
+ BSFQ R13, R13
#endif
- SARQ $0x03, R12
- LEAL 8(R11)(R12*1), R11
+ SARQ $0x03, R13
+ LEAL 8(R12)(R13*1), R12
JMP match_nolit_end_encodeBetterBlockAsm12B
matchlen_match8_match_nolit_encodeBetterBlockAsm12B:
- CMPL DI, $0x08
+ CMPL R8, $0x08
JB matchlen_match4_match_nolit_encodeBetterBlockAsm12B
- MOVQ (R8)(R11*1), R10
- XORQ (R9)(R11*1), R10
+ MOVQ (R9)(R12*1), R11
+ XORQ (R10)(R12*1), R11
JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm12B
- LEAL -8(DI), DI
- LEAL 8(R11), R11
+ LEAL -8(R8), R8
+ LEAL 8(R12), R12
JMP matchlen_match4_match_nolit_encodeBetterBlockAsm12B
matchlen_bsf_8_match_nolit_encodeBetterBlockAsm12B:
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R11, R11
#else
- BSFQ R10, R10
+ BSFQ R11, R11
#endif
- SARQ $0x03, R10
- LEAL (R11)(R10*1), R11
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
JMP match_nolit_end_encodeBetterBlockAsm12B
matchlen_match4_match_nolit_encodeBetterBlockAsm12B:
- CMPL DI, $0x04
+ CMPL R8, $0x04
JB matchlen_match2_match_nolit_encodeBetterBlockAsm12B
- MOVL (R8)(R11*1), R10
- CMPL (R9)(R11*1), R10
+ MOVL (R9)(R12*1), R11
+ CMPL (R10)(R12*1), R11
JNE matchlen_match2_match_nolit_encodeBetterBlockAsm12B
- LEAL -4(DI), DI
- LEAL 4(R11), R11
+ LEAL -4(R8), R8
+ LEAL 4(R12), R12
matchlen_match2_match_nolit_encodeBetterBlockAsm12B:
- CMPL DI, $0x01
+ CMPL R8, $0x01
JE matchlen_match1_match_nolit_encodeBetterBlockAsm12B
JB match_nolit_end_encodeBetterBlockAsm12B
- MOVW (R8)(R11*1), R10
- CMPW (R9)(R11*1), R10
+ MOVW (R9)(R12*1), R11
+ CMPW (R10)(R12*1), R11
JNE matchlen_match1_match_nolit_encodeBetterBlockAsm12B
- LEAL 2(R11), R11
- SUBL $0x02, DI
+ LEAL 2(R12), R12
+ SUBL $0x02, R8
JZ match_nolit_end_encodeBetterBlockAsm12B
matchlen_match1_match_nolit_encodeBetterBlockAsm12B:
- MOVB (R8)(R11*1), R10
- CMPB (R9)(R11*1), R10
+ MOVB (R9)(R12*1), R11
+ CMPB (R10)(R12*1), R11
JNE match_nolit_end_encodeBetterBlockAsm12B
- LEAL 1(R11), R11
+ LEAL 1(R12), R12
match_nolit_end_encodeBetterBlockAsm12B:
- MOVL CX, DI
- SUBL BX, DI
+ MOVL DX, R8
+ SUBL SI, R8
// Check if repeat
- CMPL 16(SP), DI
+ CMPL 16(SP), R8
JEQ match_is_repeat_encodeBetterBlockAsm12B
- MOVL DI, 16(SP)
- MOVL 12(SP), BX
- CMPL BX, SI
+ MOVL R8, 16(SP)
+ MOVL 12(SP), SI
+ CMPL SI, DI
JEQ emit_literal_done_match_emit_encodeBetterBlockAsm12B
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(BX*1), R9
- SUBL BX, R8
- LEAL -1(R8), BX
- CMPL BX, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(SI*1), R10
+ SUBL SI, R9
+ LEAL -1(R9), SI
+ CMPL SI, $0x3c
JB one_byte_match_emit_encodeBetterBlockAsm12B
- CMPL BX, $0x00000100
+ CMPL SI, $0x00000100
JB two_bytes_match_emit_encodeBetterBlockAsm12B
JB three_bytes_match_emit_encodeBetterBlockAsm12B
three_bytes_match_emit_encodeBetterBlockAsm12B:
- MOVB $0xf4, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_match_emit_encodeBetterBlockAsm12B
two_bytes_match_emit_encodeBetterBlockAsm12B:
- MOVB $0xf0, (AX)
- MOVB BL, 1(AX)
- ADDQ $0x02, AX
- CMPL BX, $0x40
+ MOVB $0xf0, (CX)
+ MOVB SI, 1(CX)
+ ADDQ $0x02, CX
+ CMPL SI, $0x40
JB memmove_match_emit_encodeBetterBlockAsm12B
JMP memmove_long_match_emit_encodeBetterBlockAsm12B
one_byte_match_emit_encodeBetterBlockAsm12B:
- SHLB $0x02, BL
- MOVB BL, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, SI
+ MOVB SI, (CX)
+ ADDQ $0x01, CX
memmove_match_emit_encodeBetterBlockAsm12B:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveShort
- CMPQ R8, $0x04
+ CMPQ R9, $0x04
JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_4
- CMPQ R8, $0x08
+ CMPQ R9, $0x08
JB emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_4through7
- CMPQ R8, $0x10
+ CMPQ R9, $0x10
JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ R9, $0x20
JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_33through64
emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_4:
- MOVL (R9), R10
- MOVL R10, (AX)
+ MOVL (R10), R11
+ MOVL R11, (CX)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm12B
emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_4through7:
- MOVL (R9), R10
- MOVL -4(R9)(R8*1), R9
- MOVL R10, (AX)
- MOVL R9, -4(AX)(R8*1)
+ MOVL (R10), R11
+ MOVL -4(R10)(R9*1), R10
+ MOVL R11, (CX)
+ MOVL R10, -4(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm12B
emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_8through16:
- MOVQ (R9), R10
- MOVQ -8(R9)(R8*1), R9
- MOVQ R10, (AX)
- MOVQ R9, -8(AX)(R8*1)
+ MOVQ (R10), R11
+ MOVQ -8(R10)(R9*1), R10
+ MOVQ R11, (CX)
+ MOVQ R10, -8(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm12B
emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_17through32:
- MOVOU (R9), X0
- MOVOU -16(R9)(R8*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU -16(R10)(R9*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm12B
emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_33through64:
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
memmove_end_copy_match_emit_encodeBetterBlockAsm12B:
- MOVQ BX, AX
+ MOVQ SI, CX
JMP emit_literal_done_match_emit_encodeBetterBlockAsm12B
memmove_long_match_emit_encodeBetterBlockAsm12B:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveLong
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVQ R8, R12
- SHRQ $0x05, R12
- MOVQ AX, R10
- ANDL $0x0000001f, R10
- MOVQ $0x00000040, R13
- SUBQ R10, R13
- DECQ R12
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVQ R9, R13
+ SHRQ $0x05, R13
+ MOVQ CX, R11
+ ANDL $0x0000001f, R11
+ MOVQ $0x00000040, R14
+ SUBQ R11, R14
+ DECQ R13
JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_forward_sse_loop_32
- LEAQ -32(R9)(R13*1), R10
- LEAQ -32(AX)(R13*1), R14
+ LEAQ -32(R10)(R14*1), R11
+ LEAQ -32(CX)(R14*1), R15
emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_big_loop_back:
- MOVOU (R10), X4
- MOVOU 16(R10), X5
- MOVOA X4, (R14)
- MOVOA X5, 16(R14)
+ MOVOU (R11), X4
+ MOVOU 16(R11), X5
+ MOVOA X4, (R15)
+ MOVOA X5, 16(R15)
+ ADDQ $0x20, R15
+ ADDQ $0x20, R11
ADDQ $0x20, R14
- ADDQ $0x20, R10
- ADDQ $0x20, R13
- DECQ R12
+ DECQ R13
JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_big_loop_back
emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_forward_sse_loop_32:
- MOVOU -32(R9)(R13*1), X4
- MOVOU -16(R9)(R13*1), X5
- MOVOA X4, -32(AX)(R13*1)
- MOVOA X5, -16(AX)(R13*1)
- ADDQ $0x20, R13
- CMPQ R8, R13
+ MOVOU -32(R10)(R14*1), X4
+ MOVOU -16(R10)(R14*1), X5
+ MOVOA X4, -32(CX)(R14*1)
+ MOVOA X5, -16(CX)(R14*1)
+ ADDQ $0x20, R14
+ CMPQ R9, R14
JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ BX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
+ MOVQ SI, CX
emit_literal_done_match_emit_encodeBetterBlockAsm12B:
- ADDL R11, CX
- ADDL $0x04, R11
- MOVL CX, 12(SP)
+ ADDL R12, DX
+ ADDL $0x04, R12
+ MOVL DX, 12(SP)
// emitCopy
- CMPL R11, $0x40
+ CMPL R12, $0x40
JBE two_byte_offset_short_match_nolit_encodeBetterBlockAsm12B
- CMPL DI, $0x00000800
+ CMPL R8, $0x00000800
JAE long_offset_short_match_nolit_encodeBetterBlockAsm12B
- MOVL $0x00000001, BX
- LEAL 16(BX), BX
- MOVB DI, 1(AX)
- SHRL $0x08, DI
- SHLL $0x05, DI
- ORL DI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
- SUBL $0x08, R11
+ MOVL $0x00000001, SI
+ LEAL 16(SI), SI
+ MOVB R8, 1(CX)
+ SHRL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
+ SUBL $0x08, R12
// emitRepeat
- LEAL -4(R11), R11
+ LEAL -4(R12), R12
JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b
- MOVL R11, BX
- LEAL -4(R11), R11
- CMPL BX, $0x08
+ MOVL R12, SI
+ LEAL -4(R12), R12
+ CMPL SI, $0x08
JBE repeat_two_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b
- CMPL BX, $0x0c
+ CMPL SI, $0x0c
JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b
- CMPL DI, $0x00000800
+ CMPL R8, $0x00000800
JB repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b
cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b:
- CMPL R11, $0x00000104
+ CMPL R12, $0x00000104
JB repeat_three_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b
- LEAL -256(R11), R11
- MOVW $0x0019, (AX)
- MOVW R11, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(R12), R12
+ MOVW $0x0019, (CX)
+ MOVW R12, 2(CX)
+ ADDQ $0x04, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
repeat_three_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b:
- LEAL -4(R11), R11
- MOVW $0x0015, (AX)
- MOVB R11, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(R12), R12
+ MOVW $0x0015, (CX)
+ MOVB R12, 2(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
repeat_two_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b:
- SHLL $0x02, R11
- ORL $0x01, R11
- MOVW R11, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, R12
+ ORL $0x01, R12
+ MOVW R12, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b:
- XORQ BX, BX
- LEAL 1(BX)(R11*4), R11
- MOVB DI, 1(AX)
- SARL $0x08, DI
- SHLL $0x05, DI
- ORL DI, R11
- MOVB R11, (AX)
- ADDQ $0x02, AX
+ XORQ SI, SI
+ LEAL 1(SI)(R12*4), R12
+ MOVB R8, 1(CX)
+ SARL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, R12
+ MOVB R12, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
long_offset_short_match_nolit_encodeBetterBlockAsm12B:
- MOVB $0xee, (AX)
- MOVW DI, 1(AX)
- LEAL -60(R11), R11
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW R8, 1(CX)
+ LEAL -60(R12), R12
+ ADDQ $0x03, CX
// emitRepeat
- MOVL R11, BX
- LEAL -4(R11), R11
- CMPL BX, $0x08
+ MOVL R12, SI
+ LEAL -4(R12), R12
+ CMPL SI, $0x08
JBE repeat_two_match_nolit_encodeBetterBlockAsm12B_emit_copy_short
- CMPL BX, $0x0c
+ CMPL SI, $0x0c
JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short
- CMPL DI, $0x00000800
+ CMPL R8, $0x00000800
JB repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short
cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short:
- CMPL R11, $0x00000104
+ CMPL R12, $0x00000104
JB repeat_three_match_nolit_encodeBetterBlockAsm12B_emit_copy_short
- LEAL -256(R11), R11
- MOVW $0x0019, (AX)
- MOVW R11, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(R12), R12
+ MOVW $0x0019, (CX)
+ MOVW R12, 2(CX)
+ ADDQ $0x04, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
repeat_three_match_nolit_encodeBetterBlockAsm12B_emit_copy_short:
- LEAL -4(R11), R11
- MOVW $0x0015, (AX)
- MOVB R11, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(R12), R12
+ MOVW $0x0015, (CX)
+ MOVB R12, 2(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
repeat_two_match_nolit_encodeBetterBlockAsm12B_emit_copy_short:
- SHLL $0x02, R11
- ORL $0x01, R11
- MOVW R11, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, R12
+ ORL $0x01, R12
+ MOVW R12, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short:
- XORQ BX, BX
- LEAL 1(BX)(R11*4), R11
- MOVB DI, 1(AX)
- SARL $0x08, DI
- SHLL $0x05, DI
- ORL DI, R11
- MOVB R11, (AX)
- ADDQ $0x02, AX
+ XORQ SI, SI
+ LEAL 1(SI)(R12*4), R12
+ MOVB R8, 1(CX)
+ SARL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, R12
+ MOVB R12, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
two_byte_offset_short_match_nolit_encodeBetterBlockAsm12B:
- MOVL R11, BX
- SHLL $0x02, BX
- CMPL R11, $0x0c
+ MOVL R12, SI
+ SHLL $0x02, SI
+ CMPL R12, $0x0c
JAE emit_copy_three_match_nolit_encodeBetterBlockAsm12B
- CMPL DI, $0x00000800
+ CMPL R8, $0x00000800
JAE emit_copy_three_match_nolit_encodeBetterBlockAsm12B
- LEAL -15(BX), BX
- MOVB DI, 1(AX)
- SHRL $0x08, DI
- SHLL $0x05, DI
- ORL DI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
+ LEAL -15(SI), SI
+ MOVB R8, 1(CX)
+ SHRL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
emit_copy_three_match_nolit_encodeBetterBlockAsm12B:
- LEAL -2(BX), BX
- MOVB BL, (AX)
- MOVW DI, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(SI), SI
+ MOVB SI, (CX)
+ MOVW R8, 1(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
match_is_repeat_encodeBetterBlockAsm12B:
- MOVL 12(SP), BX
- CMPL BX, SI
+ MOVL 12(SP), SI
+ CMPL SI, DI
JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm12B
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(BX*1), R9
- SUBL BX, R8
- LEAL -1(R8), BX
- CMPL BX, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(SI*1), R10
+ SUBL SI, R9
+ LEAL -1(R9), SI
+ CMPL SI, $0x3c
JB one_byte_match_emit_repeat_encodeBetterBlockAsm12B
- CMPL BX, $0x00000100
+ CMPL SI, $0x00000100
JB two_bytes_match_emit_repeat_encodeBetterBlockAsm12B
JB three_bytes_match_emit_repeat_encodeBetterBlockAsm12B
three_bytes_match_emit_repeat_encodeBetterBlockAsm12B:
- MOVB $0xf4, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm12B
two_bytes_match_emit_repeat_encodeBetterBlockAsm12B:
- MOVB $0xf0, (AX)
- MOVB BL, 1(AX)
- ADDQ $0x02, AX
- CMPL BX, $0x40
+ MOVB $0xf0, (CX)
+ MOVB SI, 1(CX)
+ ADDQ $0x02, CX
+ CMPL SI, $0x40
JB memmove_match_emit_repeat_encodeBetterBlockAsm12B
JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm12B
one_byte_match_emit_repeat_encodeBetterBlockAsm12B:
- SHLB $0x02, BL
- MOVB BL, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, SI
+ MOVB SI, (CX)
+ ADDQ $0x01, CX
memmove_match_emit_repeat_encodeBetterBlockAsm12B:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveShort
- CMPQ R8, $0x04
+ CMPQ R9, $0x04
JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_4
- CMPQ R8, $0x08
+ CMPQ R9, $0x08
JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_4through7
- CMPQ R8, $0x10
+ CMPQ R9, $0x10
JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ R9, $0x20
JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_17through32
JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_33through64
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_4:
- MOVL (R9), R10
- MOVL R10, (AX)
+ MOVL (R10), R11
+ MOVL R11, (CX)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_4through7:
- MOVL (R9), R10
- MOVL -4(R9)(R8*1), R9
- MOVL R10, (AX)
- MOVL R9, -4(AX)(R8*1)
+ MOVL (R10), R11
+ MOVL -4(R10)(R9*1), R10
+ MOVL R11, (CX)
+ MOVL R10, -4(CX)(R9*1)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_8through16:
- MOVQ (R9), R10
- MOVQ -8(R9)(R8*1), R9
- MOVQ R10, (AX)
- MOVQ R9, -8(AX)(R8*1)
+ MOVQ (R10), R11
+ MOVQ -8(R10)(R9*1), R10
+ MOVQ R11, (CX)
+ MOVQ R10, -8(CX)(R9*1)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_17through32:
- MOVOU (R9), X0
- MOVOU -16(R9)(R8*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU -16(R10)(R9*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R9*1)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_33through64:
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B:
- MOVQ BX, AX
+ MOVQ SI, CX
JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm12B
memmove_long_match_emit_repeat_encodeBetterBlockAsm12B:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveLong
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVQ R8, R12
- SHRQ $0x05, R12
- MOVQ AX, R10
- ANDL $0x0000001f, R10
- MOVQ $0x00000040, R13
- SUBQ R10, R13
- DECQ R12
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVQ R9, R13
+ SHRQ $0x05, R13
+ MOVQ CX, R11
+ ANDL $0x0000001f, R11
+ MOVQ $0x00000040, R14
+ SUBQ R11, R14
+ DECQ R13
JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_forward_sse_loop_32
- LEAQ -32(R9)(R13*1), R10
- LEAQ -32(AX)(R13*1), R14
+ LEAQ -32(R10)(R14*1), R11
+ LEAQ -32(CX)(R14*1), R15
emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_big_loop_back:
- MOVOU (R10), X4
- MOVOU 16(R10), X5
- MOVOA X4, (R14)
- MOVOA X5, 16(R14)
+ MOVOU (R11), X4
+ MOVOU 16(R11), X5
+ MOVOA X4, (R15)
+ MOVOA X5, 16(R15)
+ ADDQ $0x20, R15
+ ADDQ $0x20, R11
ADDQ $0x20, R14
- ADDQ $0x20, R10
- ADDQ $0x20, R13
- DECQ R12
+ DECQ R13
JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_big_loop_back
emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_forward_sse_loop_32:
- MOVOU -32(R9)(R13*1), X4
- MOVOU -16(R9)(R13*1), X5
- MOVOA X4, -32(AX)(R13*1)
- MOVOA X5, -16(AX)(R13*1)
- ADDQ $0x20, R13
- CMPQ R8, R13
+ MOVOU -32(R10)(R14*1), X4
+ MOVOU -16(R10)(R14*1), X5
+ MOVOA X4, -32(CX)(R14*1)
+ MOVOA X5, -16(CX)(R14*1)
+ ADDQ $0x20, R14
+ CMPQ R9, R14
JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ BX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
+ MOVQ SI, CX
emit_literal_done_match_emit_repeat_encodeBetterBlockAsm12B:
- ADDL R11, CX
- ADDL $0x04, R11
- MOVL CX, 12(SP)
+ ADDL R12, DX
+ ADDL $0x04, R12
+ MOVL DX, 12(SP)
// emitRepeat
- MOVL R11, BX
- LEAL -4(R11), R11
- CMPL BX, $0x08
+ MOVL R12, SI
+ LEAL -4(R12), R12
+ CMPL SI, $0x08
JBE repeat_two_match_nolit_repeat_encodeBetterBlockAsm12B
- CMPL BX, $0x0c
+ CMPL SI, $0x0c
JAE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm12B
- CMPL DI, $0x00000800
+ CMPL R8, $0x00000800
JB repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm12B
cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm12B:
- CMPL R11, $0x00000104
+ CMPL R12, $0x00000104
JB repeat_three_match_nolit_repeat_encodeBetterBlockAsm12B
- LEAL -256(R11), R11
- MOVW $0x0019, (AX)
- MOVW R11, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(R12), R12
+ MOVW $0x0019, (CX)
+ MOVW R12, 2(CX)
+ ADDQ $0x04, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
repeat_three_match_nolit_repeat_encodeBetterBlockAsm12B:
- LEAL -4(R11), R11
- MOVW $0x0015, (AX)
- MOVB R11, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(R12), R12
+ MOVW $0x0015, (CX)
+ MOVB R12, 2(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
repeat_two_match_nolit_repeat_encodeBetterBlockAsm12B:
- SHLL $0x02, R11
- ORL $0x01, R11
- MOVW R11, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, R12
+ ORL $0x01, R12
+ MOVW R12, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm12B:
- XORQ BX, BX
- LEAL 1(BX)(R11*4), R11
- MOVB DI, 1(AX)
- SARL $0x08, DI
- SHLL $0x05, DI
- ORL DI, R11
- MOVB R11, (AX)
- ADDQ $0x02, AX
+ XORQ SI, SI
+ LEAL 1(SI)(R12*4), R12
+ MOVB R8, 1(CX)
+ SARL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, R12
+ MOVB R12, (CX)
+ ADDQ $0x02, CX
match_nolit_emitcopy_end_encodeBetterBlockAsm12B:
- CMPL CX, 8(SP)
+ CMPL DX, 8(SP)
JAE emit_remainder_encodeBetterBlockAsm12B
- CMPQ AX, (SP)
+ CMPQ CX, (SP)
JB match_nolit_dst_ok_encodeBetterBlockAsm12B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_nolit_dst_ok_encodeBetterBlockAsm12B:
- MOVQ $0x0000cf1bbcdcbf9b, BX
- MOVQ $0x9e3779b1, DI
- LEAQ 1(SI), SI
- LEAQ -2(CX), R8
- MOVQ (DX)(SI*1), R9
- MOVQ 1(DX)(SI*1), R10
- MOVQ (DX)(R8*1), R11
- MOVQ 1(DX)(R8*1), R12
- SHLQ $0x10, R9
- IMULQ BX, R9
- SHRQ $0x32, R9
- SHLQ $0x20, R10
- IMULQ DI, R10
- SHRQ $0x34, R10
- SHLQ $0x10, R11
- IMULQ BX, R11
- SHRQ $0x32, R11
- SHLQ $0x20, R12
- IMULQ DI, R12
- SHRQ $0x34, R12
- LEAQ 1(SI), DI
- LEAQ 1(R8), R13
- MOVL SI, 24(SP)(R9*4)
- MOVL R8, 24(SP)(R11*4)
- MOVL DI, 65560(SP)(R10*4)
- MOVL R13, 65560(SP)(R12*4)
- LEAQ 1(R8)(SI*1), DI
- SHRQ $0x01, DI
- ADDQ $0x01, SI
- SUBQ $0x01, R8
+ MOVQ $0x0000cf1bbcdcbf9b, SI
+ MOVQ $0x9e3779b1, R8
+ LEAQ 1(DI), DI
+ LEAQ -2(DX), R9
+ MOVQ (BX)(DI*1), R10
+ MOVQ 1(BX)(DI*1), R11
+ MOVQ (BX)(R9*1), R12
+ MOVQ 1(BX)(R9*1), R13
+ SHLQ $0x10, R10
+ IMULQ SI, R10
+ SHRQ $0x32, R10
+ SHLQ $0x20, R11
+ IMULQ R8, R11
+ SHRQ $0x34, R11
+ SHLQ $0x10, R12
+ IMULQ SI, R12
+ SHRQ $0x32, R12
+ SHLQ $0x20, R13
+ IMULQ R8, R13
+ SHRQ $0x34, R13
+ LEAQ 1(DI), R8
+ LEAQ 1(R9), R14
+ MOVL DI, (AX)(R10*4)
+ MOVL R9, (AX)(R12*4)
+ MOVL R8, 65536(AX)(R11*4)
+ MOVL R14, 65536(AX)(R13*4)
+ LEAQ 1(R9)(DI*1), R8
+ SHRQ $0x01, R8
+ ADDQ $0x01, DI
+ SUBQ $0x01, R9
index_loop_encodeBetterBlockAsm12B:
- CMPQ DI, R8
+ CMPQ R8, R9
JAE search_loop_encodeBetterBlockAsm12B
- MOVQ (DX)(SI*1), R9
- MOVQ (DX)(DI*1), R10
- SHLQ $0x10, R9
- IMULQ BX, R9
- SHRQ $0x32, R9
+ MOVQ (BX)(DI*1), R10
+ MOVQ (BX)(R8*1), R11
SHLQ $0x10, R10
- IMULQ BX, R10
+ IMULQ SI, R10
SHRQ $0x32, R10
- MOVL SI, 24(SP)(R9*4)
- MOVL DI, 24(SP)(R10*4)
- ADDQ $0x02, SI
+ SHLQ $0x10, R11
+ IMULQ SI, R11
+ SHRQ $0x32, R11
+ MOVL DI, (AX)(R10*4)
+ MOVL R8, (AX)(R11*4)
ADDQ $0x02, DI
+ ADDQ $0x02, R8
JMP index_loop_encodeBetterBlockAsm12B
emit_remainder_encodeBetterBlockAsm12B:
- MOVQ src_len+32(FP), CX
- SUBL 12(SP), CX
- LEAQ 3(AX)(CX*1), CX
- CMPQ CX, (SP)
+ MOVQ src_len+32(FP), AX
+ SUBL 12(SP), AX
+ LEAQ 3(CX)(AX*1), AX
+ CMPQ AX, (SP)
JB emit_remainder_ok_encodeBetterBlockAsm12B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
emit_remainder_ok_encodeBetterBlockAsm12B:
- MOVQ src_len+32(FP), CX
- MOVL 12(SP), BX
- CMPL BX, CX
+ MOVQ src_len+32(FP), AX
+ MOVL 12(SP), DX
+ CMPL DX, AX
JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm12B
- MOVL CX, SI
- MOVL CX, 12(SP)
- LEAQ (DX)(BX*1), CX
- SUBL BX, SI
+ MOVL AX, SI
+ MOVL AX, 12(SP)
+ LEAQ (BX)(DX*1), AX
+ SUBL DX, SI
LEAL -1(SI), DX
CMPL DX, $0x3c
JB one_byte_emit_remainder_encodeBetterBlockAsm12B
@@ -8848,26 +8856,26 @@ emit_remainder_ok_encodeBetterBlockAsm12B:
JB three_bytes_emit_remainder_encodeBetterBlockAsm12B
three_bytes_emit_remainder_encodeBetterBlockAsm12B:
- MOVB $0xf4, (AX)
- MOVW DX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW DX, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_emit_remainder_encodeBetterBlockAsm12B
two_bytes_emit_remainder_encodeBetterBlockAsm12B:
- MOVB $0xf0, (AX)
- MOVB DL, 1(AX)
- ADDQ $0x02, AX
+ MOVB $0xf0, (CX)
+ MOVB DL, 1(CX)
+ ADDQ $0x02, CX
CMPL DX, $0x40
JB memmove_emit_remainder_encodeBetterBlockAsm12B
JMP memmove_long_emit_remainder_encodeBetterBlockAsm12B
one_byte_emit_remainder_encodeBetterBlockAsm12B:
SHLB $0x02, DL
- MOVB DL, (AX)
- ADDQ $0x01, AX
+ MOVB DL, (CX)
+ ADDQ $0x01, CX
memmove_emit_remainder_encodeBetterBlockAsm12B:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveShort
@@ -8883,73 +8891,73 @@ memmove_emit_remainder_encodeBetterBlockAsm12B:
JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_33through64
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_1or2:
- MOVB (CX), SI
- MOVB -1(CX)(BX*1), CL
- MOVB SI, (AX)
- MOVB CL, -1(AX)(BX*1)
+ MOVB (AX), SI
+ MOVB -1(AX)(BX*1), AL
+ MOVB SI, (CX)
+ MOVB AL, -1(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_3:
- MOVW (CX), SI
- MOVB 2(CX), CL
- MOVW SI, (AX)
- MOVB CL, 2(AX)
+ MOVW (AX), SI
+ MOVB 2(AX), AL
+ MOVW SI, (CX)
+ MOVB AL, 2(CX)
JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_4through7:
- MOVL (CX), SI
- MOVL -4(CX)(BX*1), CX
- MOVL SI, (AX)
- MOVL CX, -4(AX)(BX*1)
+ MOVL (AX), SI
+ MOVL -4(AX)(BX*1), AX
+ MOVL SI, (CX)
+ MOVL AX, -4(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_8through16:
- MOVQ (CX), SI
- MOVQ -8(CX)(BX*1), CX
- MOVQ SI, (AX)
- MOVQ CX, -8(AX)(BX*1)
+ MOVQ (AX), SI
+ MOVQ -8(AX)(BX*1), AX
+ MOVQ SI, (CX)
+ MOVQ AX, -8(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_17through32:
- MOVOU (CX), X0
- MOVOU -16(CX)(BX*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU -16(AX)(BX*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_33through64:
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B:
- MOVQ DX, AX
+ MOVQ DX, CX
JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm12B
memmove_long_emit_remainder_encodeBetterBlockAsm12B:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveLong
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
MOVQ BX, DI
SHRQ $0x05, DI
- MOVQ AX, SI
+ MOVQ CX, SI
ANDL $0x0000001f, SI
MOVQ $0x00000040, R8
SUBQ SI, R8
DECQ DI
JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_forward_sse_loop_32
- LEAQ -32(CX)(R8*1), SI
- LEAQ -32(AX)(R8*1), R9
+ LEAQ -32(AX)(R8*1), SI
+ LEAQ -32(CX)(R8*1), R9
emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_big_loop_back:
MOVOU (SI), X4
@@ -8963,756 +8971,757 @@ emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_big_loop_back:
JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_big_loop_back
emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_forward_sse_loop_32:
- MOVOU -32(CX)(R8*1), X4
- MOVOU -16(CX)(R8*1), X5
- MOVOA X4, -32(AX)(R8*1)
- MOVOA X5, -16(AX)(R8*1)
+ MOVOU -32(AX)(R8*1), X4
+ MOVOU -16(AX)(R8*1), X5
+ MOVOA X4, -32(CX)(R8*1)
+ MOVOA X5, -16(CX)(R8*1)
ADDQ $0x20, R8
CMPQ BX, R8
- JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
- MOVQ DX, AX
+ JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_forward_sse_loop_32
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
+ MOVQ DX, CX
emit_literal_done_emit_remainder_encodeBetterBlockAsm12B:
- MOVQ dst_base+0(FP), CX
- SUBQ CX, AX
- MOVQ AX, ret+48(FP)
+ MOVQ dst_base+0(FP), AX
+ SUBQ AX, CX
+ MOVQ CX, ret+56(FP)
RET
-// func encodeBetterBlockAsm10B(dst []byte, src []byte) int
+// func encodeBetterBlockAsm10B(dst []byte, src []byte, tmp *[20480]byte) int
// Requires: BMI, SSE2
-TEXT ·encodeBetterBlockAsm10B(SB), $20504-56
- MOVQ dst_base+0(FP), AX
- MOVQ $0x000000a0, CX
- LEAQ 24(SP), DX
+TEXT ·encodeBetterBlockAsm10B(SB), $24-64
+ MOVQ tmp+48(FP), AX
+ MOVQ dst_base+0(FP), CX
+ MOVQ $0x000000a0, DX
+ MOVQ AX, BX
PXOR X0, X0
zero_loop_encodeBetterBlockAsm10B:
- MOVOU X0, (DX)
- MOVOU X0, 16(DX)
- MOVOU X0, 32(DX)
- MOVOU X0, 48(DX)
- MOVOU X0, 64(DX)
- MOVOU X0, 80(DX)
- MOVOU X0, 96(DX)
- MOVOU X0, 112(DX)
- ADDQ $0x80, DX
- DECQ CX
+ MOVOU X0, (BX)
+ MOVOU X0, 16(BX)
+ MOVOU X0, 32(BX)
+ MOVOU X0, 48(BX)
+ MOVOU X0, 64(BX)
+ MOVOU X0, 80(BX)
+ MOVOU X0, 96(BX)
+ MOVOU X0, 112(BX)
+ ADDQ $0x80, BX
+ DECQ DX
JNZ zero_loop_encodeBetterBlockAsm10B
MOVL $0x00000000, 12(SP)
- MOVQ src_len+32(FP), CX
- LEAQ -6(CX), DX
- LEAQ -8(CX), BX
- MOVL BX, 8(SP)
- SHRQ $0x05, CX
- SUBL CX, DX
- LEAQ (AX)(DX*1), DX
- MOVQ DX, (SP)
- MOVL $0x00000001, CX
+ MOVQ src_len+32(FP), DX
+ LEAQ -6(DX), BX
+ LEAQ -8(DX), SI
+ MOVL SI, 8(SP)
+ SHRQ $0x05, DX
+ SUBL DX, BX
+ LEAQ (CX)(BX*1), BX
+ MOVQ BX, (SP)
+ MOVL $0x00000001, DX
MOVL $0x00000000, 16(SP)
- MOVQ src_base+24(FP), DX
+ MOVQ src_base+24(FP), BX
search_loop_encodeBetterBlockAsm10B:
- MOVL CX, BX
- SUBL 12(SP), BX
- SHRL $0x05, BX
- LEAL 1(CX)(BX*1), BX
- CMPL BX, 8(SP)
+ MOVL DX, SI
+ SUBL 12(SP), SI
+ SHRL $0x05, SI
+ LEAL 1(DX)(SI*1), SI
+ CMPL SI, 8(SP)
JAE emit_remainder_encodeBetterBlockAsm10B
- MOVQ (DX)(CX*1), SI
- MOVL BX, 20(SP)
- MOVQ $0x0000cf1bbcdcbf9b, R8
- MOVQ $0x9e3779b1, BX
- MOVQ SI, R9
- MOVQ SI, R10
- SHLQ $0x10, R9
- IMULQ R8, R9
- SHRQ $0x34, R9
- SHLQ $0x20, R10
- IMULQ BX, R10
- SHRQ $0x36, R10
- MOVL 24(SP)(R9*4), BX
- MOVL 16408(SP)(R10*4), DI
- MOVL CX, 24(SP)(R9*4)
- MOVL CX, 16408(SP)(R10*4)
- MOVQ (DX)(BX*1), R9
- MOVQ (DX)(DI*1), R10
- CMPQ R9, SI
+ MOVQ (BX)(DX*1), DI
+ MOVL SI, 20(SP)
+ MOVQ $0x0000cf1bbcdcbf9b, R9
+ MOVQ $0x9e3779b1, SI
+ MOVQ DI, R10
+ MOVQ DI, R11
+ SHLQ $0x10, R10
+ IMULQ R9, R10
+ SHRQ $0x34, R10
+ SHLQ $0x20, R11
+ IMULQ SI, R11
+ SHRQ $0x36, R11
+ MOVL (AX)(R10*4), SI
+ MOVL 16384(AX)(R11*4), R8
+ MOVL DX, (AX)(R10*4)
+ MOVL DX, 16384(AX)(R11*4)
+ MOVQ (BX)(SI*1), R10
+ MOVQ (BX)(R8*1), R11
+ CMPQ R10, DI
JEQ candidate_match_encodeBetterBlockAsm10B
- CMPQ R10, SI
+ CMPQ R11, DI
JNE no_short_found_encodeBetterBlockAsm10B
- MOVL DI, BX
+ MOVL R8, SI
JMP candidate_match_encodeBetterBlockAsm10B
no_short_found_encodeBetterBlockAsm10B:
- CMPL R9, SI
+ CMPL R10, DI
JEQ candidate_match_encodeBetterBlockAsm10B
- CMPL R10, SI
+ CMPL R11, DI
JEQ candidateS_match_encodeBetterBlockAsm10B
- MOVL 20(SP), CX
+ MOVL 20(SP), DX
JMP search_loop_encodeBetterBlockAsm10B
candidateS_match_encodeBetterBlockAsm10B:
- SHRQ $0x08, SI
- MOVQ SI, R9
- SHLQ $0x10, R9
- IMULQ R8, R9
- SHRQ $0x34, R9
- MOVL 24(SP)(R9*4), BX
- INCL CX
- MOVL CX, 24(SP)(R9*4)
- CMPL (DX)(BX*1), SI
+ SHRQ $0x08, DI
+ MOVQ DI, R10
+ SHLQ $0x10, R10
+ IMULQ R9, R10
+ SHRQ $0x34, R10
+ MOVL (AX)(R10*4), SI
+ INCL DX
+ MOVL DX, (AX)(R10*4)
+ CMPL (BX)(SI*1), DI
JEQ candidate_match_encodeBetterBlockAsm10B
- DECL CX
- MOVL DI, BX
+ DECL DX
+ MOVL R8, SI
candidate_match_encodeBetterBlockAsm10B:
- MOVL 12(SP), SI
- TESTL BX, BX
+ MOVL 12(SP), DI
+ TESTL SI, SI
JZ match_extend_back_end_encodeBetterBlockAsm10B
match_extend_back_loop_encodeBetterBlockAsm10B:
- CMPL CX, SI
+ CMPL DX, DI
JBE match_extend_back_end_encodeBetterBlockAsm10B
- MOVB -1(DX)(BX*1), DI
- MOVB -1(DX)(CX*1), R8
- CMPB DI, R8
+ MOVB -1(BX)(SI*1), R8
+ MOVB -1(BX)(DX*1), R9
+ CMPB R8, R9
JNE match_extend_back_end_encodeBetterBlockAsm10B
- LEAL -1(CX), CX
- DECL BX
+ LEAL -1(DX), DX
+ DECL SI
JZ match_extend_back_end_encodeBetterBlockAsm10B
JMP match_extend_back_loop_encodeBetterBlockAsm10B
match_extend_back_end_encodeBetterBlockAsm10B:
- MOVL CX, SI
- SUBL 12(SP), SI
- LEAQ 3(AX)(SI*1), SI
- CMPQ SI, (SP)
+ MOVL DX, DI
+ SUBL 12(SP), DI
+ LEAQ 3(CX)(DI*1), DI
+ CMPQ DI, (SP)
JB match_dst_size_check_encodeBetterBlockAsm10B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_dst_size_check_encodeBetterBlockAsm10B:
- MOVL CX, SI
- ADDL $0x04, CX
- ADDL $0x04, BX
- MOVQ src_len+32(FP), DI
- SUBL CX, DI
- LEAQ (DX)(CX*1), R8
- LEAQ (DX)(BX*1), R9
+ MOVL DX, DI
+ ADDL $0x04, DX
+ ADDL $0x04, SI
+ MOVQ src_len+32(FP), R8
+ SUBL DX, R8
+ LEAQ (BX)(DX*1), R9
+ LEAQ (BX)(SI*1), R10
// matchLen
- XORL R11, R11
+ XORL R12, R12
matchlen_loopback_16_match_nolit_encodeBetterBlockAsm10B:
- CMPL DI, $0x10
+ CMPL R8, $0x10
JB matchlen_match8_match_nolit_encodeBetterBlockAsm10B
- MOVQ (R8)(R11*1), R10
- MOVQ 8(R8)(R11*1), R12
- XORQ (R9)(R11*1), R10
+ MOVQ (R9)(R12*1), R11
+ MOVQ 8(R9)(R12*1), R13
+ XORQ (R10)(R12*1), R11
JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm10B
- XORQ 8(R9)(R11*1), R12
+ XORQ 8(R10)(R12*1), R13
JNZ matchlen_bsf_16match_nolit_encodeBetterBlockAsm10B
- LEAL -16(DI), DI
- LEAL 16(R11), R11
+ LEAL -16(R8), R8
+ LEAL 16(R12), R12
JMP matchlen_loopback_16_match_nolit_encodeBetterBlockAsm10B
matchlen_bsf_16match_nolit_encodeBetterBlockAsm10B:
#ifdef GOAMD64_v3
- TZCNTQ R12, R12
+ TZCNTQ R13, R13
#else
- BSFQ R12, R12
+ BSFQ R13, R13
#endif
- SARQ $0x03, R12
- LEAL 8(R11)(R12*1), R11
+ SARQ $0x03, R13
+ LEAL 8(R12)(R13*1), R12
JMP match_nolit_end_encodeBetterBlockAsm10B
matchlen_match8_match_nolit_encodeBetterBlockAsm10B:
- CMPL DI, $0x08
+ CMPL R8, $0x08
JB matchlen_match4_match_nolit_encodeBetterBlockAsm10B
- MOVQ (R8)(R11*1), R10
- XORQ (R9)(R11*1), R10
+ MOVQ (R9)(R12*1), R11
+ XORQ (R10)(R12*1), R11
JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm10B
- LEAL -8(DI), DI
- LEAL 8(R11), R11
+ LEAL -8(R8), R8
+ LEAL 8(R12), R12
JMP matchlen_match4_match_nolit_encodeBetterBlockAsm10B
matchlen_bsf_8_match_nolit_encodeBetterBlockAsm10B:
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R11, R11
#else
- BSFQ R10, R10
+ BSFQ R11, R11
#endif
- SARQ $0x03, R10
- LEAL (R11)(R10*1), R11
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
JMP match_nolit_end_encodeBetterBlockAsm10B
matchlen_match4_match_nolit_encodeBetterBlockAsm10B:
- CMPL DI, $0x04
+ CMPL R8, $0x04
JB matchlen_match2_match_nolit_encodeBetterBlockAsm10B
- MOVL (R8)(R11*1), R10
- CMPL (R9)(R11*1), R10
+ MOVL (R9)(R12*1), R11
+ CMPL (R10)(R12*1), R11
JNE matchlen_match2_match_nolit_encodeBetterBlockAsm10B
- LEAL -4(DI), DI
- LEAL 4(R11), R11
+ LEAL -4(R8), R8
+ LEAL 4(R12), R12
matchlen_match2_match_nolit_encodeBetterBlockAsm10B:
- CMPL DI, $0x01
+ CMPL R8, $0x01
JE matchlen_match1_match_nolit_encodeBetterBlockAsm10B
JB match_nolit_end_encodeBetterBlockAsm10B
- MOVW (R8)(R11*1), R10
- CMPW (R9)(R11*1), R10
+ MOVW (R9)(R12*1), R11
+ CMPW (R10)(R12*1), R11
JNE matchlen_match1_match_nolit_encodeBetterBlockAsm10B
- LEAL 2(R11), R11
- SUBL $0x02, DI
+ LEAL 2(R12), R12
+ SUBL $0x02, R8
JZ match_nolit_end_encodeBetterBlockAsm10B
matchlen_match1_match_nolit_encodeBetterBlockAsm10B:
- MOVB (R8)(R11*1), R10
- CMPB (R9)(R11*1), R10
+ MOVB (R9)(R12*1), R11
+ CMPB (R10)(R12*1), R11
JNE match_nolit_end_encodeBetterBlockAsm10B
- LEAL 1(R11), R11
+ LEAL 1(R12), R12
match_nolit_end_encodeBetterBlockAsm10B:
- MOVL CX, DI
- SUBL BX, DI
+ MOVL DX, R8
+ SUBL SI, R8
// Check if repeat
- CMPL 16(SP), DI
+ CMPL 16(SP), R8
JEQ match_is_repeat_encodeBetterBlockAsm10B
- MOVL DI, 16(SP)
- MOVL 12(SP), BX
- CMPL BX, SI
+ MOVL R8, 16(SP)
+ MOVL 12(SP), SI
+ CMPL SI, DI
JEQ emit_literal_done_match_emit_encodeBetterBlockAsm10B
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(BX*1), R9
- SUBL BX, R8
- LEAL -1(R8), BX
- CMPL BX, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(SI*1), R10
+ SUBL SI, R9
+ LEAL -1(R9), SI
+ CMPL SI, $0x3c
JB one_byte_match_emit_encodeBetterBlockAsm10B
- CMPL BX, $0x00000100
+ CMPL SI, $0x00000100
JB two_bytes_match_emit_encodeBetterBlockAsm10B
JB three_bytes_match_emit_encodeBetterBlockAsm10B
three_bytes_match_emit_encodeBetterBlockAsm10B:
- MOVB $0xf4, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_match_emit_encodeBetterBlockAsm10B
two_bytes_match_emit_encodeBetterBlockAsm10B:
- MOVB $0xf0, (AX)
- MOVB BL, 1(AX)
- ADDQ $0x02, AX
- CMPL BX, $0x40
+ MOVB $0xf0, (CX)
+ MOVB SI, 1(CX)
+ ADDQ $0x02, CX
+ CMPL SI, $0x40
JB memmove_match_emit_encodeBetterBlockAsm10B
JMP memmove_long_match_emit_encodeBetterBlockAsm10B
one_byte_match_emit_encodeBetterBlockAsm10B:
- SHLB $0x02, BL
- MOVB BL, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, SI
+ MOVB SI, (CX)
+ ADDQ $0x01, CX
memmove_match_emit_encodeBetterBlockAsm10B:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveShort
- CMPQ R8, $0x04
+ CMPQ R9, $0x04
JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_4
- CMPQ R8, $0x08
+ CMPQ R9, $0x08
JB emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_4through7
- CMPQ R8, $0x10
+ CMPQ R9, $0x10
JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ R9, $0x20
JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_33through64
emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_4:
- MOVL (R9), R10
- MOVL R10, (AX)
+ MOVL (R10), R11
+ MOVL R11, (CX)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm10B
emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_4through7:
- MOVL (R9), R10
- MOVL -4(R9)(R8*1), R9
- MOVL R10, (AX)
- MOVL R9, -4(AX)(R8*1)
+ MOVL (R10), R11
+ MOVL -4(R10)(R9*1), R10
+ MOVL R11, (CX)
+ MOVL R10, -4(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm10B
emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_8through16:
- MOVQ (R9), R10
- MOVQ -8(R9)(R8*1), R9
- MOVQ R10, (AX)
- MOVQ R9, -8(AX)(R8*1)
+ MOVQ (R10), R11
+ MOVQ -8(R10)(R9*1), R10
+ MOVQ R11, (CX)
+ MOVQ R10, -8(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm10B
emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_17through32:
- MOVOU (R9), X0
- MOVOU -16(R9)(R8*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU -16(R10)(R9*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm10B
emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_33through64:
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
memmove_end_copy_match_emit_encodeBetterBlockAsm10B:
- MOVQ BX, AX
+ MOVQ SI, CX
JMP emit_literal_done_match_emit_encodeBetterBlockAsm10B
memmove_long_match_emit_encodeBetterBlockAsm10B:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveLong
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVQ R8, R12
- SHRQ $0x05, R12
- MOVQ AX, R10
- ANDL $0x0000001f, R10
- MOVQ $0x00000040, R13
- SUBQ R10, R13
- DECQ R12
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVQ R9, R13
+ SHRQ $0x05, R13
+ MOVQ CX, R11
+ ANDL $0x0000001f, R11
+ MOVQ $0x00000040, R14
+ SUBQ R11, R14
+ DECQ R13
JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_forward_sse_loop_32
- LEAQ -32(R9)(R13*1), R10
- LEAQ -32(AX)(R13*1), R14
+ LEAQ -32(R10)(R14*1), R11
+ LEAQ -32(CX)(R14*1), R15
emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_big_loop_back:
- MOVOU (R10), X4
- MOVOU 16(R10), X5
- MOVOA X4, (R14)
- MOVOA X5, 16(R14)
+ MOVOU (R11), X4
+ MOVOU 16(R11), X5
+ MOVOA X4, (R15)
+ MOVOA X5, 16(R15)
+ ADDQ $0x20, R15
+ ADDQ $0x20, R11
ADDQ $0x20, R14
- ADDQ $0x20, R10
- ADDQ $0x20, R13
- DECQ R12
+ DECQ R13
JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_big_loop_back
emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_forward_sse_loop_32:
- MOVOU -32(R9)(R13*1), X4
- MOVOU -16(R9)(R13*1), X5
- MOVOA X4, -32(AX)(R13*1)
- MOVOA X5, -16(AX)(R13*1)
- ADDQ $0x20, R13
- CMPQ R8, R13
+ MOVOU -32(R10)(R14*1), X4
+ MOVOU -16(R10)(R14*1), X5
+ MOVOA X4, -32(CX)(R14*1)
+ MOVOA X5, -16(CX)(R14*1)
+ ADDQ $0x20, R14
+ CMPQ R9, R14
JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ BX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
+ MOVQ SI, CX
emit_literal_done_match_emit_encodeBetterBlockAsm10B:
- ADDL R11, CX
- ADDL $0x04, R11
- MOVL CX, 12(SP)
+ ADDL R12, DX
+ ADDL $0x04, R12
+ MOVL DX, 12(SP)
// emitCopy
- CMPL R11, $0x40
+ CMPL R12, $0x40
JBE two_byte_offset_short_match_nolit_encodeBetterBlockAsm10B
- CMPL DI, $0x00000800
+ CMPL R8, $0x00000800
JAE long_offset_short_match_nolit_encodeBetterBlockAsm10B
- MOVL $0x00000001, BX
- LEAL 16(BX), BX
- MOVB DI, 1(AX)
- SHRL $0x08, DI
- SHLL $0x05, DI
- ORL DI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
- SUBL $0x08, R11
+ MOVL $0x00000001, SI
+ LEAL 16(SI), SI
+ MOVB R8, 1(CX)
+ SHRL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
+ SUBL $0x08, R12
// emitRepeat
- LEAL -4(R11), R11
+ LEAL -4(R12), R12
JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b
- MOVL R11, BX
- LEAL -4(R11), R11
- CMPL BX, $0x08
+ MOVL R12, SI
+ LEAL -4(R12), R12
+ CMPL SI, $0x08
JBE repeat_two_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b
- CMPL BX, $0x0c
+ CMPL SI, $0x0c
JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b
- CMPL DI, $0x00000800
+ CMPL R8, $0x00000800
JB repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b
cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b:
- CMPL R11, $0x00000104
+ CMPL R12, $0x00000104
JB repeat_three_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b
- LEAL -256(R11), R11
- MOVW $0x0019, (AX)
- MOVW R11, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(R12), R12
+ MOVW $0x0019, (CX)
+ MOVW R12, 2(CX)
+ ADDQ $0x04, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
repeat_three_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b:
- LEAL -4(R11), R11
- MOVW $0x0015, (AX)
- MOVB R11, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(R12), R12
+ MOVW $0x0015, (CX)
+ MOVB R12, 2(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
repeat_two_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b:
- SHLL $0x02, R11
- ORL $0x01, R11
- MOVW R11, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, R12
+ ORL $0x01, R12
+ MOVW R12, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b:
- XORQ BX, BX
- LEAL 1(BX)(R11*4), R11
- MOVB DI, 1(AX)
- SARL $0x08, DI
- SHLL $0x05, DI
- ORL DI, R11
- MOVB R11, (AX)
- ADDQ $0x02, AX
+ XORQ SI, SI
+ LEAL 1(SI)(R12*4), R12
+ MOVB R8, 1(CX)
+ SARL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, R12
+ MOVB R12, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
long_offset_short_match_nolit_encodeBetterBlockAsm10B:
- MOVB $0xee, (AX)
- MOVW DI, 1(AX)
- LEAL -60(R11), R11
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW R8, 1(CX)
+ LEAL -60(R12), R12
+ ADDQ $0x03, CX
// emitRepeat
- MOVL R11, BX
- LEAL -4(R11), R11
- CMPL BX, $0x08
+ MOVL R12, SI
+ LEAL -4(R12), R12
+ CMPL SI, $0x08
JBE repeat_two_match_nolit_encodeBetterBlockAsm10B_emit_copy_short
- CMPL BX, $0x0c
+ CMPL SI, $0x0c
JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short
- CMPL DI, $0x00000800
+ CMPL R8, $0x00000800
JB repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short
cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short:
- CMPL R11, $0x00000104
+ CMPL R12, $0x00000104
JB repeat_three_match_nolit_encodeBetterBlockAsm10B_emit_copy_short
- LEAL -256(R11), R11
- MOVW $0x0019, (AX)
- MOVW R11, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(R12), R12
+ MOVW $0x0019, (CX)
+ MOVW R12, 2(CX)
+ ADDQ $0x04, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
repeat_three_match_nolit_encodeBetterBlockAsm10B_emit_copy_short:
- LEAL -4(R11), R11
- MOVW $0x0015, (AX)
- MOVB R11, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(R12), R12
+ MOVW $0x0015, (CX)
+ MOVB R12, 2(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
repeat_two_match_nolit_encodeBetterBlockAsm10B_emit_copy_short:
- SHLL $0x02, R11
- ORL $0x01, R11
- MOVW R11, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, R12
+ ORL $0x01, R12
+ MOVW R12, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short:
- XORQ BX, BX
- LEAL 1(BX)(R11*4), R11
- MOVB DI, 1(AX)
- SARL $0x08, DI
- SHLL $0x05, DI
- ORL DI, R11
- MOVB R11, (AX)
- ADDQ $0x02, AX
+ XORQ SI, SI
+ LEAL 1(SI)(R12*4), R12
+ MOVB R8, 1(CX)
+ SARL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, R12
+ MOVB R12, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
two_byte_offset_short_match_nolit_encodeBetterBlockAsm10B:
- MOVL R11, BX
- SHLL $0x02, BX
- CMPL R11, $0x0c
+ MOVL R12, SI
+ SHLL $0x02, SI
+ CMPL R12, $0x0c
JAE emit_copy_three_match_nolit_encodeBetterBlockAsm10B
- CMPL DI, $0x00000800
+ CMPL R8, $0x00000800
JAE emit_copy_three_match_nolit_encodeBetterBlockAsm10B
- LEAL -15(BX), BX
- MOVB DI, 1(AX)
- SHRL $0x08, DI
- SHLL $0x05, DI
- ORL DI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
+ LEAL -15(SI), SI
+ MOVB R8, 1(CX)
+ SHRL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
emit_copy_three_match_nolit_encodeBetterBlockAsm10B:
- LEAL -2(BX), BX
- MOVB BL, (AX)
- MOVW DI, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(SI), SI
+ MOVB SI, (CX)
+ MOVW R8, 1(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
match_is_repeat_encodeBetterBlockAsm10B:
- MOVL 12(SP), BX
- CMPL BX, SI
+ MOVL 12(SP), SI
+ CMPL SI, DI
JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm10B
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(BX*1), R9
- SUBL BX, R8
- LEAL -1(R8), BX
- CMPL BX, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(SI*1), R10
+ SUBL SI, R9
+ LEAL -1(R9), SI
+ CMPL SI, $0x3c
JB one_byte_match_emit_repeat_encodeBetterBlockAsm10B
- CMPL BX, $0x00000100
+ CMPL SI, $0x00000100
JB two_bytes_match_emit_repeat_encodeBetterBlockAsm10B
JB three_bytes_match_emit_repeat_encodeBetterBlockAsm10B
three_bytes_match_emit_repeat_encodeBetterBlockAsm10B:
- MOVB $0xf4, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm10B
two_bytes_match_emit_repeat_encodeBetterBlockAsm10B:
- MOVB $0xf0, (AX)
- MOVB BL, 1(AX)
- ADDQ $0x02, AX
- CMPL BX, $0x40
+ MOVB $0xf0, (CX)
+ MOVB SI, 1(CX)
+ ADDQ $0x02, CX
+ CMPL SI, $0x40
JB memmove_match_emit_repeat_encodeBetterBlockAsm10B
JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm10B
one_byte_match_emit_repeat_encodeBetterBlockAsm10B:
- SHLB $0x02, BL
- MOVB BL, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, SI
+ MOVB SI, (CX)
+ ADDQ $0x01, CX
memmove_match_emit_repeat_encodeBetterBlockAsm10B:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveShort
- CMPQ R8, $0x04
+ CMPQ R9, $0x04
JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_4
- CMPQ R8, $0x08
+ CMPQ R9, $0x08
JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_4through7
- CMPQ R8, $0x10
+ CMPQ R9, $0x10
JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ R9, $0x20
JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_17through32
JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_33through64
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_4:
- MOVL (R9), R10
- MOVL R10, (AX)
+ MOVL (R10), R11
+ MOVL R11, (CX)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_4through7:
- MOVL (R9), R10
- MOVL -4(R9)(R8*1), R9
- MOVL R10, (AX)
- MOVL R9, -4(AX)(R8*1)
+ MOVL (R10), R11
+ MOVL -4(R10)(R9*1), R10
+ MOVL R11, (CX)
+ MOVL R10, -4(CX)(R9*1)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_8through16:
- MOVQ (R9), R10
- MOVQ -8(R9)(R8*1), R9
- MOVQ R10, (AX)
- MOVQ R9, -8(AX)(R8*1)
+ MOVQ (R10), R11
+ MOVQ -8(R10)(R9*1), R10
+ MOVQ R11, (CX)
+ MOVQ R10, -8(CX)(R9*1)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_17through32:
- MOVOU (R9), X0
- MOVOU -16(R9)(R8*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU -16(R10)(R9*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R9*1)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_33through64:
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B:
- MOVQ BX, AX
+ MOVQ SI, CX
JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm10B
memmove_long_match_emit_repeat_encodeBetterBlockAsm10B:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveLong
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVQ R8, R12
- SHRQ $0x05, R12
- MOVQ AX, R10
- ANDL $0x0000001f, R10
- MOVQ $0x00000040, R13
- SUBQ R10, R13
- DECQ R12
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVQ R9, R13
+ SHRQ $0x05, R13
+ MOVQ CX, R11
+ ANDL $0x0000001f, R11
+ MOVQ $0x00000040, R14
+ SUBQ R11, R14
+ DECQ R13
JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_forward_sse_loop_32
- LEAQ -32(R9)(R13*1), R10
- LEAQ -32(AX)(R13*1), R14
+ LEAQ -32(R10)(R14*1), R11
+ LEAQ -32(CX)(R14*1), R15
emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_big_loop_back:
- MOVOU (R10), X4
- MOVOU 16(R10), X5
- MOVOA X4, (R14)
- MOVOA X5, 16(R14)
+ MOVOU (R11), X4
+ MOVOU 16(R11), X5
+ MOVOA X4, (R15)
+ MOVOA X5, 16(R15)
+ ADDQ $0x20, R15
+ ADDQ $0x20, R11
ADDQ $0x20, R14
- ADDQ $0x20, R10
- ADDQ $0x20, R13
- DECQ R12
+ DECQ R13
JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_big_loop_back
emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_forward_sse_loop_32:
- MOVOU -32(R9)(R13*1), X4
- MOVOU -16(R9)(R13*1), X5
- MOVOA X4, -32(AX)(R13*1)
- MOVOA X5, -16(AX)(R13*1)
- ADDQ $0x20, R13
- CMPQ R8, R13
+ MOVOU -32(R10)(R14*1), X4
+ MOVOU -16(R10)(R14*1), X5
+ MOVOA X4, -32(CX)(R14*1)
+ MOVOA X5, -16(CX)(R14*1)
+ ADDQ $0x20, R14
+ CMPQ R9, R14
JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ BX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
+ MOVQ SI, CX
emit_literal_done_match_emit_repeat_encodeBetterBlockAsm10B:
- ADDL R11, CX
- ADDL $0x04, R11
- MOVL CX, 12(SP)
+ ADDL R12, DX
+ ADDL $0x04, R12
+ MOVL DX, 12(SP)
// emitRepeat
- MOVL R11, BX
- LEAL -4(R11), R11
- CMPL BX, $0x08
+ MOVL R12, SI
+ LEAL -4(R12), R12
+ CMPL SI, $0x08
JBE repeat_two_match_nolit_repeat_encodeBetterBlockAsm10B
- CMPL BX, $0x0c
+ CMPL SI, $0x0c
JAE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm10B
- CMPL DI, $0x00000800
+ CMPL R8, $0x00000800
JB repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm10B
cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm10B:
- CMPL R11, $0x00000104
+ CMPL R12, $0x00000104
JB repeat_three_match_nolit_repeat_encodeBetterBlockAsm10B
- LEAL -256(R11), R11
- MOVW $0x0019, (AX)
- MOVW R11, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(R12), R12
+ MOVW $0x0019, (CX)
+ MOVW R12, 2(CX)
+ ADDQ $0x04, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
repeat_three_match_nolit_repeat_encodeBetterBlockAsm10B:
- LEAL -4(R11), R11
- MOVW $0x0015, (AX)
- MOVB R11, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(R12), R12
+ MOVW $0x0015, (CX)
+ MOVB R12, 2(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
repeat_two_match_nolit_repeat_encodeBetterBlockAsm10B:
- SHLL $0x02, R11
- ORL $0x01, R11
- MOVW R11, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, R12
+ ORL $0x01, R12
+ MOVW R12, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm10B:
- XORQ BX, BX
- LEAL 1(BX)(R11*4), R11
- MOVB DI, 1(AX)
- SARL $0x08, DI
- SHLL $0x05, DI
- ORL DI, R11
- MOVB R11, (AX)
- ADDQ $0x02, AX
+ XORQ SI, SI
+ LEAL 1(SI)(R12*4), R12
+ MOVB R8, 1(CX)
+ SARL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, R12
+ MOVB R12, (CX)
+ ADDQ $0x02, CX
match_nolit_emitcopy_end_encodeBetterBlockAsm10B:
- CMPL CX, 8(SP)
+ CMPL DX, 8(SP)
JAE emit_remainder_encodeBetterBlockAsm10B
- CMPQ AX, (SP)
+ CMPQ CX, (SP)
JB match_nolit_dst_ok_encodeBetterBlockAsm10B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_nolit_dst_ok_encodeBetterBlockAsm10B:
- MOVQ $0x0000cf1bbcdcbf9b, BX
- MOVQ $0x9e3779b1, DI
- LEAQ 1(SI), SI
- LEAQ -2(CX), R8
- MOVQ (DX)(SI*1), R9
- MOVQ 1(DX)(SI*1), R10
- MOVQ (DX)(R8*1), R11
- MOVQ 1(DX)(R8*1), R12
- SHLQ $0x10, R9
- IMULQ BX, R9
- SHRQ $0x34, R9
- SHLQ $0x20, R10
- IMULQ DI, R10
- SHRQ $0x36, R10
- SHLQ $0x10, R11
- IMULQ BX, R11
- SHRQ $0x34, R11
- SHLQ $0x20, R12
- IMULQ DI, R12
- SHRQ $0x36, R12
- LEAQ 1(SI), DI
- LEAQ 1(R8), R13
- MOVL SI, 24(SP)(R9*4)
- MOVL R8, 24(SP)(R11*4)
- MOVL DI, 16408(SP)(R10*4)
- MOVL R13, 16408(SP)(R12*4)
- LEAQ 1(R8)(SI*1), DI
- SHRQ $0x01, DI
- ADDQ $0x01, SI
- SUBQ $0x01, R8
+ MOVQ $0x0000cf1bbcdcbf9b, SI
+ MOVQ $0x9e3779b1, R8
+ LEAQ 1(DI), DI
+ LEAQ -2(DX), R9
+ MOVQ (BX)(DI*1), R10
+ MOVQ 1(BX)(DI*1), R11
+ MOVQ (BX)(R9*1), R12
+ MOVQ 1(BX)(R9*1), R13
+ SHLQ $0x10, R10
+ IMULQ SI, R10
+ SHRQ $0x34, R10
+ SHLQ $0x20, R11
+ IMULQ R8, R11
+ SHRQ $0x36, R11
+ SHLQ $0x10, R12
+ IMULQ SI, R12
+ SHRQ $0x34, R12
+ SHLQ $0x20, R13
+ IMULQ R8, R13
+ SHRQ $0x36, R13
+ LEAQ 1(DI), R8
+ LEAQ 1(R9), R14
+ MOVL DI, (AX)(R10*4)
+ MOVL R9, (AX)(R12*4)
+ MOVL R8, 16384(AX)(R11*4)
+ MOVL R14, 16384(AX)(R13*4)
+ LEAQ 1(R9)(DI*1), R8
+ SHRQ $0x01, R8
+ ADDQ $0x01, DI
+ SUBQ $0x01, R9
index_loop_encodeBetterBlockAsm10B:
- CMPQ DI, R8
+ CMPQ R8, R9
JAE search_loop_encodeBetterBlockAsm10B
- MOVQ (DX)(SI*1), R9
- MOVQ (DX)(DI*1), R10
- SHLQ $0x10, R9
- IMULQ BX, R9
- SHRQ $0x34, R9
+ MOVQ (BX)(DI*1), R10
+ MOVQ (BX)(R8*1), R11
SHLQ $0x10, R10
- IMULQ BX, R10
+ IMULQ SI, R10
SHRQ $0x34, R10
- MOVL SI, 24(SP)(R9*4)
- MOVL DI, 24(SP)(R10*4)
- ADDQ $0x02, SI
+ SHLQ $0x10, R11
+ IMULQ SI, R11
+ SHRQ $0x34, R11
+ MOVL DI, (AX)(R10*4)
+ MOVL R8, (AX)(R11*4)
ADDQ $0x02, DI
+ ADDQ $0x02, R8
JMP index_loop_encodeBetterBlockAsm10B
emit_remainder_encodeBetterBlockAsm10B:
- MOVQ src_len+32(FP), CX
- SUBL 12(SP), CX
- LEAQ 3(AX)(CX*1), CX
- CMPQ CX, (SP)
+ MOVQ src_len+32(FP), AX
+ SUBL 12(SP), AX
+ LEAQ 3(CX)(AX*1), AX
+ CMPQ AX, (SP)
JB emit_remainder_ok_encodeBetterBlockAsm10B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
emit_remainder_ok_encodeBetterBlockAsm10B:
- MOVQ src_len+32(FP), CX
- MOVL 12(SP), BX
- CMPL BX, CX
+ MOVQ src_len+32(FP), AX
+ MOVL 12(SP), DX
+ CMPL DX, AX
JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm10B
- MOVL CX, SI
- MOVL CX, 12(SP)
- LEAQ (DX)(BX*1), CX
- SUBL BX, SI
+ MOVL AX, SI
+ MOVL AX, 12(SP)
+ LEAQ (BX)(DX*1), AX
+ SUBL DX, SI
LEAL -1(SI), DX
CMPL DX, $0x3c
JB one_byte_emit_remainder_encodeBetterBlockAsm10B
@@ -9721,26 +9730,26 @@ emit_remainder_ok_encodeBetterBlockAsm10B:
JB three_bytes_emit_remainder_encodeBetterBlockAsm10B
three_bytes_emit_remainder_encodeBetterBlockAsm10B:
- MOVB $0xf4, (AX)
- MOVW DX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW DX, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_emit_remainder_encodeBetterBlockAsm10B
two_bytes_emit_remainder_encodeBetterBlockAsm10B:
- MOVB $0xf0, (AX)
- MOVB DL, 1(AX)
- ADDQ $0x02, AX
+ MOVB $0xf0, (CX)
+ MOVB DL, 1(CX)
+ ADDQ $0x02, CX
CMPL DX, $0x40
JB memmove_emit_remainder_encodeBetterBlockAsm10B
JMP memmove_long_emit_remainder_encodeBetterBlockAsm10B
one_byte_emit_remainder_encodeBetterBlockAsm10B:
SHLB $0x02, DL
- MOVB DL, (AX)
- ADDQ $0x01, AX
+ MOVB DL, (CX)
+ ADDQ $0x01, CX
memmove_emit_remainder_encodeBetterBlockAsm10B:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveShort
@@ -9756,73 +9765,73 @@ memmove_emit_remainder_encodeBetterBlockAsm10B:
JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_33through64
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_1or2:
- MOVB (CX), SI
- MOVB -1(CX)(BX*1), CL
- MOVB SI, (AX)
- MOVB CL, -1(AX)(BX*1)
+ MOVB (AX), SI
+ MOVB -1(AX)(BX*1), AL
+ MOVB SI, (CX)
+ MOVB AL, -1(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_3:
- MOVW (CX), SI
- MOVB 2(CX), CL
- MOVW SI, (AX)
- MOVB CL, 2(AX)
+ MOVW (AX), SI
+ MOVB 2(AX), AL
+ MOVW SI, (CX)
+ MOVB AL, 2(CX)
JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_4through7:
- MOVL (CX), SI
- MOVL -4(CX)(BX*1), CX
- MOVL SI, (AX)
- MOVL CX, -4(AX)(BX*1)
+ MOVL (AX), SI
+ MOVL -4(AX)(BX*1), AX
+ MOVL SI, (CX)
+ MOVL AX, -4(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_8through16:
- MOVQ (CX), SI
- MOVQ -8(CX)(BX*1), CX
- MOVQ SI, (AX)
- MOVQ CX, -8(AX)(BX*1)
+ MOVQ (AX), SI
+ MOVQ -8(AX)(BX*1), AX
+ MOVQ SI, (CX)
+ MOVQ AX, -8(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_17through32:
- MOVOU (CX), X0
- MOVOU -16(CX)(BX*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU -16(AX)(BX*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_33through64:
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B:
- MOVQ DX, AX
+ MOVQ DX, CX
JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm10B
memmove_long_emit_remainder_encodeBetterBlockAsm10B:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveLong
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
MOVQ BX, DI
SHRQ $0x05, DI
- MOVQ AX, SI
+ MOVQ CX, SI
ANDL $0x0000001f, SI
MOVQ $0x00000040, R8
SUBQ SI, R8
DECQ DI
JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_forward_sse_loop_32
- LEAQ -32(CX)(R8*1), SI
- LEAQ -32(AX)(R8*1), R9
+ LEAQ -32(AX)(R8*1), SI
+ LEAQ -32(CX)(R8*1), R9
emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_big_loop_back:
MOVOU (SI), X4
@@ -9836,742 +9845,743 @@ emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_big_loop_back:
JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_big_loop_back
emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_forward_sse_loop_32:
- MOVOU -32(CX)(R8*1), X4
- MOVOU -16(CX)(R8*1), X5
- MOVOA X4, -32(AX)(R8*1)
- MOVOA X5, -16(AX)(R8*1)
+ MOVOU -32(AX)(R8*1), X4
+ MOVOU -16(AX)(R8*1), X5
+ MOVOA X4, -32(CX)(R8*1)
+ MOVOA X5, -16(CX)(R8*1)
ADDQ $0x20, R8
CMPQ BX, R8
JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
- MOVQ DX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
+ MOVQ DX, CX
emit_literal_done_emit_remainder_encodeBetterBlockAsm10B:
- MOVQ dst_base+0(FP), CX
- SUBQ CX, AX
- MOVQ AX, ret+48(FP)
+ MOVQ dst_base+0(FP), AX
+ SUBQ AX, CX
+ MOVQ CX, ret+56(FP)
RET
-// func encodeBetterBlockAsm8B(dst []byte, src []byte) int
+// func encodeBetterBlockAsm8B(dst []byte, src []byte, tmp *[5120]byte) int
// Requires: BMI, SSE2
-TEXT ·encodeBetterBlockAsm8B(SB), $5144-56
- MOVQ dst_base+0(FP), AX
- MOVQ $0x00000028, CX
- LEAQ 24(SP), DX
+TEXT ·encodeBetterBlockAsm8B(SB), $24-64
+ MOVQ tmp+48(FP), AX
+ MOVQ dst_base+0(FP), CX
+ MOVQ $0x00000028, DX
+ MOVQ AX, BX
PXOR X0, X0
zero_loop_encodeBetterBlockAsm8B:
- MOVOU X0, (DX)
- MOVOU X0, 16(DX)
- MOVOU X0, 32(DX)
- MOVOU X0, 48(DX)
- MOVOU X0, 64(DX)
- MOVOU X0, 80(DX)
- MOVOU X0, 96(DX)
- MOVOU X0, 112(DX)
- ADDQ $0x80, DX
- DECQ CX
+ MOVOU X0, (BX)
+ MOVOU X0, 16(BX)
+ MOVOU X0, 32(BX)
+ MOVOU X0, 48(BX)
+ MOVOU X0, 64(BX)
+ MOVOU X0, 80(BX)
+ MOVOU X0, 96(BX)
+ MOVOU X0, 112(BX)
+ ADDQ $0x80, BX
+ DECQ DX
JNZ zero_loop_encodeBetterBlockAsm8B
MOVL $0x00000000, 12(SP)
- MOVQ src_len+32(FP), CX
- LEAQ -6(CX), DX
- LEAQ -8(CX), BX
- MOVL BX, 8(SP)
- SHRQ $0x05, CX
- SUBL CX, DX
- LEAQ (AX)(DX*1), DX
- MOVQ DX, (SP)
- MOVL $0x00000001, CX
+ MOVQ src_len+32(FP), DX
+ LEAQ -6(DX), BX
+ LEAQ -8(DX), SI
+ MOVL SI, 8(SP)
+ SHRQ $0x05, DX
+ SUBL DX, BX
+ LEAQ (CX)(BX*1), BX
+ MOVQ BX, (SP)
+ MOVL $0x00000001, DX
MOVL $0x00000000, 16(SP)
- MOVQ src_base+24(FP), DX
+ MOVQ src_base+24(FP), BX
search_loop_encodeBetterBlockAsm8B:
- MOVL CX, BX
- SUBL 12(SP), BX
- SHRL $0x04, BX
- LEAL 1(CX)(BX*1), BX
- CMPL BX, 8(SP)
+ MOVL DX, SI
+ SUBL 12(SP), SI
+ SHRL $0x04, SI
+ LEAL 1(DX)(SI*1), SI
+ CMPL SI, 8(SP)
JAE emit_remainder_encodeBetterBlockAsm8B
- MOVQ (DX)(CX*1), SI
- MOVL BX, 20(SP)
- MOVQ $0x0000cf1bbcdcbf9b, R8
- MOVQ $0x9e3779b1, BX
- MOVQ SI, R9
- MOVQ SI, R10
- SHLQ $0x10, R9
- IMULQ R8, R9
- SHRQ $0x36, R9
- SHLQ $0x20, R10
- IMULQ BX, R10
- SHRQ $0x38, R10
- MOVL 24(SP)(R9*4), BX
- MOVL 4120(SP)(R10*4), DI
- MOVL CX, 24(SP)(R9*4)
- MOVL CX, 4120(SP)(R10*4)
- MOVQ (DX)(BX*1), R9
- MOVQ (DX)(DI*1), R10
- CMPQ R9, SI
+ MOVQ (BX)(DX*1), DI
+ MOVL SI, 20(SP)
+ MOVQ $0x0000cf1bbcdcbf9b, R9
+ MOVQ $0x9e3779b1, SI
+ MOVQ DI, R10
+ MOVQ DI, R11
+ SHLQ $0x10, R10
+ IMULQ R9, R10
+ SHRQ $0x36, R10
+ SHLQ $0x20, R11
+ IMULQ SI, R11
+ SHRQ $0x38, R11
+ MOVL (AX)(R10*4), SI
+ MOVL 4096(AX)(R11*4), R8
+ MOVL DX, (AX)(R10*4)
+ MOVL DX, 4096(AX)(R11*4)
+ MOVQ (BX)(SI*1), R10
+ MOVQ (BX)(R8*1), R11
+ CMPQ R10, DI
JEQ candidate_match_encodeBetterBlockAsm8B
- CMPQ R10, SI
+ CMPQ R11, DI
JNE no_short_found_encodeBetterBlockAsm8B
- MOVL DI, BX
+ MOVL R8, SI
JMP candidate_match_encodeBetterBlockAsm8B
no_short_found_encodeBetterBlockAsm8B:
- CMPL R9, SI
+ CMPL R10, DI
JEQ candidate_match_encodeBetterBlockAsm8B
- CMPL R10, SI
+ CMPL R11, DI
JEQ candidateS_match_encodeBetterBlockAsm8B
- MOVL 20(SP), CX
+ MOVL 20(SP), DX
JMP search_loop_encodeBetterBlockAsm8B
candidateS_match_encodeBetterBlockAsm8B:
- SHRQ $0x08, SI
- MOVQ SI, R9
- SHLQ $0x10, R9
- IMULQ R8, R9
- SHRQ $0x36, R9
- MOVL 24(SP)(R9*4), BX
- INCL CX
- MOVL CX, 24(SP)(R9*4)
- CMPL (DX)(BX*1), SI
+ SHRQ $0x08, DI
+ MOVQ DI, R10
+ SHLQ $0x10, R10
+ IMULQ R9, R10
+ SHRQ $0x36, R10
+ MOVL (AX)(R10*4), SI
+ INCL DX
+ MOVL DX, (AX)(R10*4)
+ CMPL (BX)(SI*1), DI
JEQ candidate_match_encodeBetterBlockAsm8B
- DECL CX
- MOVL DI, BX
+ DECL DX
+ MOVL R8, SI
candidate_match_encodeBetterBlockAsm8B:
- MOVL 12(SP), SI
- TESTL BX, BX
+ MOVL 12(SP), DI
+ TESTL SI, SI
JZ match_extend_back_end_encodeBetterBlockAsm8B
match_extend_back_loop_encodeBetterBlockAsm8B:
- CMPL CX, SI
+ CMPL DX, DI
JBE match_extend_back_end_encodeBetterBlockAsm8B
- MOVB -1(DX)(BX*1), DI
- MOVB -1(DX)(CX*1), R8
- CMPB DI, R8
+ MOVB -1(BX)(SI*1), R8
+ MOVB -1(BX)(DX*1), R9
+ CMPB R8, R9
JNE match_extend_back_end_encodeBetterBlockAsm8B
- LEAL -1(CX), CX
- DECL BX
+ LEAL -1(DX), DX
+ DECL SI
JZ match_extend_back_end_encodeBetterBlockAsm8B
JMP match_extend_back_loop_encodeBetterBlockAsm8B
match_extend_back_end_encodeBetterBlockAsm8B:
- MOVL CX, SI
- SUBL 12(SP), SI
- LEAQ 3(AX)(SI*1), SI
- CMPQ SI, (SP)
+ MOVL DX, DI
+ SUBL 12(SP), DI
+ LEAQ 3(CX)(DI*1), DI
+ CMPQ DI, (SP)
JB match_dst_size_check_encodeBetterBlockAsm8B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_dst_size_check_encodeBetterBlockAsm8B:
- MOVL CX, SI
- ADDL $0x04, CX
- ADDL $0x04, BX
- MOVQ src_len+32(FP), DI
- SUBL CX, DI
- LEAQ (DX)(CX*1), R8
- LEAQ (DX)(BX*1), R9
+ MOVL DX, DI
+ ADDL $0x04, DX
+ ADDL $0x04, SI
+ MOVQ src_len+32(FP), R8
+ SUBL DX, R8
+ LEAQ (BX)(DX*1), R9
+ LEAQ (BX)(SI*1), R10
// matchLen
- XORL R11, R11
+ XORL R12, R12
matchlen_loopback_16_match_nolit_encodeBetterBlockAsm8B:
- CMPL DI, $0x10
+ CMPL R8, $0x10
JB matchlen_match8_match_nolit_encodeBetterBlockAsm8B
- MOVQ (R8)(R11*1), R10
- MOVQ 8(R8)(R11*1), R12
- XORQ (R9)(R11*1), R10
+ MOVQ (R9)(R12*1), R11
+ MOVQ 8(R9)(R12*1), R13
+ XORQ (R10)(R12*1), R11
JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm8B
- XORQ 8(R9)(R11*1), R12
+ XORQ 8(R10)(R12*1), R13
JNZ matchlen_bsf_16match_nolit_encodeBetterBlockAsm8B
- LEAL -16(DI), DI
- LEAL 16(R11), R11
+ LEAL -16(R8), R8
+ LEAL 16(R12), R12
JMP matchlen_loopback_16_match_nolit_encodeBetterBlockAsm8B
matchlen_bsf_16match_nolit_encodeBetterBlockAsm8B:
#ifdef GOAMD64_v3
- TZCNTQ R12, R12
+ TZCNTQ R13, R13
#else
- BSFQ R12, R12
+ BSFQ R13, R13
#endif
- SARQ $0x03, R12
- LEAL 8(R11)(R12*1), R11
+ SARQ $0x03, R13
+ LEAL 8(R12)(R13*1), R12
JMP match_nolit_end_encodeBetterBlockAsm8B
matchlen_match8_match_nolit_encodeBetterBlockAsm8B:
- CMPL DI, $0x08
+ CMPL R8, $0x08
JB matchlen_match4_match_nolit_encodeBetterBlockAsm8B
- MOVQ (R8)(R11*1), R10
- XORQ (R9)(R11*1), R10
+ MOVQ (R9)(R12*1), R11
+ XORQ (R10)(R12*1), R11
JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm8B
- LEAL -8(DI), DI
- LEAL 8(R11), R11
+ LEAL -8(R8), R8
+ LEAL 8(R12), R12
JMP matchlen_match4_match_nolit_encodeBetterBlockAsm8B
matchlen_bsf_8_match_nolit_encodeBetterBlockAsm8B:
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R11, R11
#else
- BSFQ R10, R10
+ BSFQ R11, R11
#endif
- SARQ $0x03, R10
- LEAL (R11)(R10*1), R11
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
JMP match_nolit_end_encodeBetterBlockAsm8B
matchlen_match4_match_nolit_encodeBetterBlockAsm8B:
- CMPL DI, $0x04
+ CMPL R8, $0x04
JB matchlen_match2_match_nolit_encodeBetterBlockAsm8B
- MOVL (R8)(R11*1), R10
- CMPL (R9)(R11*1), R10
+ MOVL (R9)(R12*1), R11
+ CMPL (R10)(R12*1), R11
JNE matchlen_match2_match_nolit_encodeBetterBlockAsm8B
- LEAL -4(DI), DI
- LEAL 4(R11), R11
+ LEAL -4(R8), R8
+ LEAL 4(R12), R12
matchlen_match2_match_nolit_encodeBetterBlockAsm8B:
- CMPL DI, $0x01
+ CMPL R8, $0x01
JE matchlen_match1_match_nolit_encodeBetterBlockAsm8B
JB match_nolit_end_encodeBetterBlockAsm8B
- MOVW (R8)(R11*1), R10
- CMPW (R9)(R11*1), R10
+ MOVW (R9)(R12*1), R11
+ CMPW (R10)(R12*1), R11
JNE matchlen_match1_match_nolit_encodeBetterBlockAsm8B
- LEAL 2(R11), R11
- SUBL $0x02, DI
+ LEAL 2(R12), R12
+ SUBL $0x02, R8
JZ match_nolit_end_encodeBetterBlockAsm8B
matchlen_match1_match_nolit_encodeBetterBlockAsm8B:
- MOVB (R8)(R11*1), R10
- CMPB (R9)(R11*1), R10
+ MOVB (R9)(R12*1), R11
+ CMPB (R10)(R12*1), R11
JNE match_nolit_end_encodeBetterBlockAsm8B
- LEAL 1(R11), R11
+ LEAL 1(R12), R12
match_nolit_end_encodeBetterBlockAsm8B:
- MOVL CX, DI
- SUBL BX, DI
+ MOVL DX, R8
+ SUBL SI, R8
// Check if repeat
- CMPL 16(SP), DI
+ CMPL 16(SP), R8
JEQ match_is_repeat_encodeBetterBlockAsm8B
- MOVL DI, 16(SP)
- MOVL 12(SP), BX
- CMPL BX, SI
+ MOVL R8, 16(SP)
+ MOVL 12(SP), SI
+ CMPL SI, DI
JEQ emit_literal_done_match_emit_encodeBetterBlockAsm8B
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(BX*1), R9
- SUBL BX, R8
- LEAL -1(R8), BX
- CMPL BX, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(SI*1), R10
+ SUBL SI, R9
+ LEAL -1(R9), SI
+ CMPL SI, $0x3c
JB one_byte_match_emit_encodeBetterBlockAsm8B
- CMPL BX, $0x00000100
+ CMPL SI, $0x00000100
JB two_bytes_match_emit_encodeBetterBlockAsm8B
JB three_bytes_match_emit_encodeBetterBlockAsm8B
three_bytes_match_emit_encodeBetterBlockAsm8B:
- MOVB $0xf4, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_match_emit_encodeBetterBlockAsm8B
two_bytes_match_emit_encodeBetterBlockAsm8B:
- MOVB $0xf0, (AX)
- MOVB BL, 1(AX)
- ADDQ $0x02, AX
- CMPL BX, $0x40
+ MOVB $0xf0, (CX)
+ MOVB SI, 1(CX)
+ ADDQ $0x02, CX
+ CMPL SI, $0x40
JB memmove_match_emit_encodeBetterBlockAsm8B
JMP memmove_long_match_emit_encodeBetterBlockAsm8B
one_byte_match_emit_encodeBetterBlockAsm8B:
- SHLB $0x02, BL
- MOVB BL, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, SI
+ MOVB SI, (CX)
+ ADDQ $0x01, CX
memmove_match_emit_encodeBetterBlockAsm8B:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveShort
- CMPQ R8, $0x04
+ CMPQ R9, $0x04
JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_4
- CMPQ R8, $0x08
+ CMPQ R9, $0x08
JB emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_4through7
- CMPQ R8, $0x10
+ CMPQ R9, $0x10
JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ R9, $0x20
JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_33through64
emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_4:
- MOVL (R9), R10
- MOVL R10, (AX)
+ MOVL (R10), R11
+ MOVL R11, (CX)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm8B
emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_4through7:
- MOVL (R9), R10
- MOVL -4(R9)(R8*1), R9
- MOVL R10, (AX)
- MOVL R9, -4(AX)(R8*1)
+ MOVL (R10), R11
+ MOVL -4(R10)(R9*1), R10
+ MOVL R11, (CX)
+ MOVL R10, -4(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm8B
emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_8through16:
- MOVQ (R9), R10
- MOVQ -8(R9)(R8*1), R9
- MOVQ R10, (AX)
- MOVQ R9, -8(AX)(R8*1)
+ MOVQ (R10), R11
+ MOVQ -8(R10)(R9*1), R10
+ MOVQ R11, (CX)
+ MOVQ R10, -8(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm8B
emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_17through32:
- MOVOU (R9), X0
- MOVOU -16(R9)(R8*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU -16(R10)(R9*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm8B
emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_33through64:
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
memmove_end_copy_match_emit_encodeBetterBlockAsm8B:
- MOVQ BX, AX
+ MOVQ SI, CX
JMP emit_literal_done_match_emit_encodeBetterBlockAsm8B
memmove_long_match_emit_encodeBetterBlockAsm8B:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveLong
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVQ R8, R12
- SHRQ $0x05, R12
- MOVQ AX, R10
- ANDL $0x0000001f, R10
- MOVQ $0x00000040, R13
- SUBQ R10, R13
- DECQ R12
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVQ R9, R13
+ SHRQ $0x05, R13
+ MOVQ CX, R11
+ ANDL $0x0000001f, R11
+ MOVQ $0x00000040, R14
+ SUBQ R11, R14
+ DECQ R13
JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_forward_sse_loop_32
- LEAQ -32(R9)(R13*1), R10
- LEAQ -32(AX)(R13*1), R14
+ LEAQ -32(R10)(R14*1), R11
+ LEAQ -32(CX)(R14*1), R15
emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_big_loop_back:
- MOVOU (R10), X4
- MOVOU 16(R10), X5
- MOVOA X4, (R14)
- MOVOA X5, 16(R14)
+ MOVOU (R11), X4
+ MOVOU 16(R11), X5
+ MOVOA X4, (R15)
+ MOVOA X5, 16(R15)
+ ADDQ $0x20, R15
+ ADDQ $0x20, R11
ADDQ $0x20, R14
- ADDQ $0x20, R10
- ADDQ $0x20, R13
- DECQ R12
+ DECQ R13
JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_big_loop_back
emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_forward_sse_loop_32:
- MOVOU -32(R9)(R13*1), X4
- MOVOU -16(R9)(R13*1), X5
- MOVOA X4, -32(AX)(R13*1)
- MOVOA X5, -16(AX)(R13*1)
- ADDQ $0x20, R13
- CMPQ R8, R13
+ MOVOU -32(R10)(R14*1), X4
+ MOVOU -16(R10)(R14*1), X5
+ MOVOA X4, -32(CX)(R14*1)
+ MOVOA X5, -16(CX)(R14*1)
+ ADDQ $0x20, R14
+ CMPQ R9, R14
JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ BX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
+ MOVQ SI, CX
emit_literal_done_match_emit_encodeBetterBlockAsm8B:
- ADDL R11, CX
- ADDL $0x04, R11
- MOVL CX, 12(SP)
+ ADDL R12, DX
+ ADDL $0x04, R12
+ MOVL DX, 12(SP)
// emitCopy
- CMPL R11, $0x40
+ CMPL R12, $0x40
JBE two_byte_offset_short_match_nolit_encodeBetterBlockAsm8B
- CMPL DI, $0x00000800
+ CMPL R8, $0x00000800
JAE long_offset_short_match_nolit_encodeBetterBlockAsm8B
- MOVL $0x00000001, BX
- LEAL 16(BX), BX
- MOVB DI, 1(AX)
- SHRL $0x08, DI
- SHLL $0x05, DI
- ORL DI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
- SUBL $0x08, R11
+ MOVL $0x00000001, SI
+ LEAL 16(SI), SI
+ MOVB R8, 1(CX)
+ SHRL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
+ SUBL $0x08, R12
// emitRepeat
- LEAL -4(R11), R11
+ LEAL -4(R12), R12
JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b
- MOVL R11, BX
- LEAL -4(R11), R11
- CMPL BX, $0x08
+ MOVL R12, SI
+ LEAL -4(R12), R12
+ CMPL SI, $0x08
JBE repeat_two_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b
- CMPL BX, $0x0c
+ CMPL SI, $0x0c
JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b
cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b:
- CMPL R11, $0x00000104
+ CMPL R12, $0x00000104
JB repeat_three_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b
- LEAL -256(R11), R11
- MOVW $0x0019, (AX)
- MOVW R11, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(R12), R12
+ MOVW $0x0019, (CX)
+ MOVW R12, 2(CX)
+ ADDQ $0x04, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
repeat_three_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b:
- LEAL -4(R11), R11
- MOVW $0x0015, (AX)
- MOVB R11, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(R12), R12
+ MOVW $0x0015, (CX)
+ MOVB R12, 2(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
repeat_two_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b:
- SHLL $0x02, R11
- ORL $0x01, R11
- MOVW R11, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, R12
+ ORL $0x01, R12
+ MOVW R12, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
- XORQ BX, BX
- LEAL 1(BX)(R11*4), R11
- MOVB DI, 1(AX)
- SARL $0x08, DI
- SHLL $0x05, DI
- ORL DI, R11
- MOVB R11, (AX)
- ADDQ $0x02, AX
+ XORQ SI, SI
+ LEAL 1(SI)(R12*4), R12
+ MOVB R8, 1(CX)
+ SARL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, R12
+ MOVB R12, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
long_offset_short_match_nolit_encodeBetterBlockAsm8B:
- MOVB $0xee, (AX)
- MOVW DI, 1(AX)
- LEAL -60(R11), R11
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW R8, 1(CX)
+ LEAL -60(R12), R12
+ ADDQ $0x03, CX
// emitRepeat
- MOVL R11, BX
- LEAL -4(R11), R11
- CMPL BX, $0x08
+ MOVL R12, SI
+ LEAL -4(R12), R12
+ CMPL SI, $0x08
JBE repeat_two_match_nolit_encodeBetterBlockAsm8B_emit_copy_short
- CMPL BX, $0x0c
+ CMPL SI, $0x0c
JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short
cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short:
- CMPL R11, $0x00000104
+ CMPL R12, $0x00000104
JB repeat_three_match_nolit_encodeBetterBlockAsm8B_emit_copy_short
- LEAL -256(R11), R11
- MOVW $0x0019, (AX)
- MOVW R11, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(R12), R12
+ MOVW $0x0019, (CX)
+ MOVW R12, 2(CX)
+ ADDQ $0x04, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
repeat_three_match_nolit_encodeBetterBlockAsm8B_emit_copy_short:
- LEAL -4(R11), R11
- MOVW $0x0015, (AX)
- MOVB R11, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(R12), R12
+ MOVW $0x0015, (CX)
+ MOVB R12, 2(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
-repeat_two_match_nolit_encodeBetterBlockAsm8B_emit_copy_short:
- SHLL $0x02, R11
- ORL $0x01, R11
- MOVW R11, (AX)
- ADDQ $0x02, AX
- JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
- XORQ BX, BX
- LEAL 1(BX)(R11*4), R11
- MOVB DI, 1(AX)
- SARL $0x08, DI
- SHLL $0x05, DI
- ORL DI, R11
- MOVB R11, (AX)
- ADDQ $0x02, AX
+repeat_two_match_nolit_encodeBetterBlockAsm8B_emit_copy_short:
+ SHLL $0x02, R12
+ ORL $0x01, R12
+ MOVW R12, (CX)
+ ADDQ $0x02, CX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
+ XORQ SI, SI
+ LEAL 1(SI)(R12*4), R12
+ MOVB R8, 1(CX)
+ SARL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, R12
+ MOVB R12, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
two_byte_offset_short_match_nolit_encodeBetterBlockAsm8B:
- MOVL R11, BX
- SHLL $0x02, BX
- CMPL R11, $0x0c
+ MOVL R12, SI
+ SHLL $0x02, SI
+ CMPL R12, $0x0c
JAE emit_copy_three_match_nolit_encodeBetterBlockAsm8B
- LEAL -15(BX), BX
- MOVB DI, 1(AX)
- SHRL $0x08, DI
- SHLL $0x05, DI
- ORL DI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
+ LEAL -15(SI), SI
+ MOVB R8, 1(CX)
+ SHRL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
emit_copy_three_match_nolit_encodeBetterBlockAsm8B:
- LEAL -2(BX), BX
- MOVB BL, (AX)
- MOVW DI, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(SI), SI
+ MOVB SI, (CX)
+ MOVW R8, 1(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
match_is_repeat_encodeBetterBlockAsm8B:
- MOVL 12(SP), BX
- CMPL BX, SI
+ MOVL 12(SP), SI
+ CMPL SI, DI
JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm8B
- MOVL SI, DI
- MOVL SI, 12(SP)
- LEAQ (DX)(BX*1), R8
- SUBL BX, DI
- LEAL -1(DI), BX
- CMPL BX, $0x3c
+ MOVL DI, R8
+ MOVL DI, 12(SP)
+ LEAQ (BX)(SI*1), R9
+ SUBL SI, R8
+ LEAL -1(R8), SI
+ CMPL SI, $0x3c
JB one_byte_match_emit_repeat_encodeBetterBlockAsm8B
- CMPL BX, $0x00000100
+ CMPL SI, $0x00000100
JB two_bytes_match_emit_repeat_encodeBetterBlockAsm8B
JB three_bytes_match_emit_repeat_encodeBetterBlockAsm8B
three_bytes_match_emit_repeat_encodeBetterBlockAsm8B:
- MOVB $0xf4, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm8B
two_bytes_match_emit_repeat_encodeBetterBlockAsm8B:
- MOVB $0xf0, (AX)
- MOVB BL, 1(AX)
- ADDQ $0x02, AX
- CMPL BX, $0x40
+ MOVB $0xf0, (CX)
+ MOVB SI, 1(CX)
+ ADDQ $0x02, CX
+ CMPL SI, $0x40
JB memmove_match_emit_repeat_encodeBetterBlockAsm8B
JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm8B
one_byte_match_emit_repeat_encodeBetterBlockAsm8B:
- SHLB $0x02, BL
- MOVB BL, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, SI
+ MOVB SI, (CX)
+ ADDQ $0x01, CX
memmove_match_emit_repeat_encodeBetterBlockAsm8B:
- LEAQ (AX)(DI*1), BX
+ LEAQ (CX)(R8*1), SI
// genMemMoveShort
- CMPQ DI, $0x04
+ CMPQ R8, $0x04
JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_4
- CMPQ DI, $0x08
+ CMPQ R8, $0x08
JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_4through7
- CMPQ DI, $0x10
+ CMPQ R8, $0x10
JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_8through16
- CMPQ DI, $0x20
+ CMPQ R8, $0x20
JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_17through32
JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_33through64
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_4:
- MOVL (R8), R9
- MOVL R9, (AX)
+ MOVL (R9), R10
+ MOVL R10, (CX)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_4through7:
- MOVL (R8), R9
- MOVL -4(R8)(DI*1), R8
- MOVL R9, (AX)
- MOVL R8, -4(AX)(DI*1)
+ MOVL (R9), R10
+ MOVL -4(R9)(R8*1), R9
+ MOVL R10, (CX)
+ MOVL R9, -4(CX)(R8*1)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_8through16:
- MOVQ (R8), R9
- MOVQ -8(R8)(DI*1), R8
- MOVQ R9, (AX)
- MOVQ R8, -8(AX)(DI*1)
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (CX)
+ MOVQ R9, -8(CX)(R8*1)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_17through32:
- MOVOU (R8), X0
- MOVOU -16(R8)(DI*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(DI*1)
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R8*1)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_33through64:
- MOVOU (R8), X0
- MOVOU 16(R8), X1
- MOVOU -32(R8)(DI*1), X2
- MOVOU -16(R8)(DI*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(DI*1)
- MOVOU X3, -16(AX)(DI*1)
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R8*1)
+ MOVOU X3, -16(CX)(R8*1)
memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B:
- MOVQ BX, AX
+ MOVQ SI, CX
JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm8B
memmove_long_match_emit_repeat_encodeBetterBlockAsm8B:
- LEAQ (AX)(DI*1), BX
+ LEAQ (CX)(R8*1), SI
// genMemMoveLong
- MOVOU (R8), X0
- MOVOU 16(R8), X1
- MOVOU -32(R8)(DI*1), X2
- MOVOU -16(R8)(DI*1), X3
- MOVQ DI, R10
- SHRQ $0x05, R10
- MOVQ AX, R9
- ANDL $0x0000001f, R9
- MOVQ $0x00000040, R12
- SUBQ R9, R12
- DECQ R10
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R11
+ SHRQ $0x05, R11
+ MOVQ CX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R13
+ SUBQ R10, R13
+ DECQ R11
JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_forward_sse_loop_32
- LEAQ -32(R8)(R12*1), R9
- LEAQ -32(AX)(R12*1), R13
+ LEAQ -32(R9)(R13*1), R10
+ LEAQ -32(CX)(R13*1), R14
emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_big_loop_back:
- MOVOU (R9), X4
- MOVOU 16(R9), X5
- MOVOA X4, (R13)
- MOVOA X5, 16(R13)
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
+ ADDQ $0x20, R14
+ ADDQ $0x20, R10
ADDQ $0x20, R13
- ADDQ $0x20, R9
- ADDQ $0x20, R12
- DECQ R10
+ DECQ R11
JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_big_loop_back
emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_forward_sse_loop_32:
- MOVOU -32(R8)(R12*1), X4
- MOVOU -16(R8)(R12*1), X5
- MOVOA X4, -32(AX)(R12*1)
- MOVOA X5, -16(AX)(R12*1)
- ADDQ $0x20, R12
- CMPQ DI, R12
+ MOVOU -32(R9)(R13*1), X4
+ MOVOU -16(R9)(R13*1), X5
+ MOVOA X4, -32(CX)(R13*1)
+ MOVOA X5, -16(CX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R8, R13
JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(DI*1)
- MOVOU X3, -16(AX)(DI*1)
- MOVQ BX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R8*1)
+ MOVOU X3, -16(CX)(R8*1)
+ MOVQ SI, CX
emit_literal_done_match_emit_repeat_encodeBetterBlockAsm8B:
- ADDL R11, CX
- ADDL $0x04, R11
- MOVL CX, 12(SP)
+ ADDL R12, DX
+ ADDL $0x04, R12
+ MOVL DX, 12(SP)
// emitRepeat
- MOVL R11, BX
- LEAL -4(R11), R11
- CMPL BX, $0x08
+ MOVL R12, SI
+ LEAL -4(R12), R12
+ CMPL SI, $0x08
JBE repeat_two_match_nolit_repeat_encodeBetterBlockAsm8B
- CMPL BX, $0x0c
+ CMPL SI, $0x0c
JAE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm8B
cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm8B:
- CMPL R11, $0x00000104
+ CMPL R12, $0x00000104
JB repeat_three_match_nolit_repeat_encodeBetterBlockAsm8B
- LEAL -256(R11), R11
- MOVW $0x0019, (AX)
- MOVW R11, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(R12), R12
+ MOVW $0x0019, (CX)
+ MOVW R12, 2(CX)
+ ADDQ $0x04, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
repeat_three_match_nolit_repeat_encodeBetterBlockAsm8B:
- LEAL -4(R11), R11
- MOVW $0x0015, (AX)
- MOVB R11, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(R12), R12
+ MOVW $0x0015, (CX)
+ MOVB R12, 2(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
repeat_two_match_nolit_repeat_encodeBetterBlockAsm8B:
- SHLL $0x02, R11
- ORL $0x01, R11
- MOVW R11, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, R12
+ ORL $0x01, R12
+ MOVW R12, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
- XORQ BX, BX
- LEAL 1(BX)(R11*4), R11
- MOVB DI, 1(AX)
- SARL $0x08, DI
- SHLL $0x05, DI
- ORL DI, R11
- MOVB R11, (AX)
- ADDQ $0x02, AX
+ XORQ SI, SI
+ LEAL 1(SI)(R12*4), R12
+ MOVB R8, 1(CX)
+ SARL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, R12
+ MOVB R12, (CX)
+ ADDQ $0x02, CX
match_nolit_emitcopy_end_encodeBetterBlockAsm8B:
- CMPL CX, 8(SP)
+ CMPL DX, 8(SP)
JAE emit_remainder_encodeBetterBlockAsm8B
- CMPQ AX, (SP)
+ CMPQ CX, (SP)
JB match_nolit_dst_ok_encodeBetterBlockAsm8B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_nolit_dst_ok_encodeBetterBlockAsm8B:
- MOVQ $0x0000cf1bbcdcbf9b, BX
- MOVQ $0x9e3779b1, DI
- LEAQ 1(SI), SI
- LEAQ -2(CX), R8
- MOVQ (DX)(SI*1), R9
- MOVQ 1(DX)(SI*1), R10
- MOVQ (DX)(R8*1), R11
- MOVQ 1(DX)(R8*1), R12
- SHLQ $0x10, R9
- IMULQ BX, R9
- SHRQ $0x36, R9
- SHLQ $0x20, R10
- IMULQ DI, R10
- SHRQ $0x38, R10
- SHLQ $0x10, R11
- IMULQ BX, R11
- SHRQ $0x36, R11
- SHLQ $0x20, R12
- IMULQ DI, R12
- SHRQ $0x38, R12
- LEAQ 1(SI), DI
- LEAQ 1(R8), R13
- MOVL SI, 24(SP)(R9*4)
- MOVL R8, 24(SP)(R11*4)
- MOVL DI, 4120(SP)(R10*4)
- MOVL R13, 4120(SP)(R12*4)
- LEAQ 1(R8)(SI*1), DI
- SHRQ $0x01, DI
- ADDQ $0x01, SI
- SUBQ $0x01, R8
+ MOVQ $0x0000cf1bbcdcbf9b, SI
+ MOVQ $0x9e3779b1, R8
+ LEAQ 1(DI), DI
+ LEAQ -2(DX), R9
+ MOVQ (BX)(DI*1), R10
+ MOVQ 1(BX)(DI*1), R11
+ MOVQ (BX)(R9*1), R12
+ MOVQ 1(BX)(R9*1), R13
+ SHLQ $0x10, R10
+ IMULQ SI, R10
+ SHRQ $0x36, R10
+ SHLQ $0x20, R11
+ IMULQ R8, R11
+ SHRQ $0x38, R11
+ SHLQ $0x10, R12
+ IMULQ SI, R12
+ SHRQ $0x36, R12
+ SHLQ $0x20, R13
+ IMULQ R8, R13
+ SHRQ $0x38, R13
+ LEAQ 1(DI), R8
+ LEAQ 1(R9), R14
+ MOVL DI, (AX)(R10*4)
+ MOVL R9, (AX)(R12*4)
+ MOVL R8, 4096(AX)(R11*4)
+ MOVL R14, 4096(AX)(R13*4)
+ LEAQ 1(R9)(DI*1), R8
+ SHRQ $0x01, R8
+ ADDQ $0x01, DI
+ SUBQ $0x01, R9
index_loop_encodeBetterBlockAsm8B:
- CMPQ DI, R8
+ CMPQ R8, R9
JAE search_loop_encodeBetterBlockAsm8B
- MOVQ (DX)(SI*1), R9
- MOVQ (DX)(DI*1), R10
- SHLQ $0x10, R9
- IMULQ BX, R9
- SHRQ $0x36, R9
+ MOVQ (BX)(DI*1), R10
+ MOVQ (BX)(R8*1), R11
SHLQ $0x10, R10
- IMULQ BX, R10
+ IMULQ SI, R10
SHRQ $0x36, R10
- MOVL SI, 24(SP)(R9*4)
- MOVL DI, 24(SP)(R10*4)
- ADDQ $0x02, SI
+ SHLQ $0x10, R11
+ IMULQ SI, R11
+ SHRQ $0x36, R11
+ MOVL DI, (AX)(R10*4)
+ MOVL R8, (AX)(R11*4)
ADDQ $0x02, DI
+ ADDQ $0x02, R8
JMP index_loop_encodeBetterBlockAsm8B
emit_remainder_encodeBetterBlockAsm8B:
- MOVQ src_len+32(FP), CX
- SUBL 12(SP), CX
- LEAQ 3(AX)(CX*1), CX
- CMPQ CX, (SP)
+ MOVQ src_len+32(FP), AX
+ SUBL 12(SP), AX
+ LEAQ 3(CX)(AX*1), AX
+ CMPQ AX, (SP)
JB emit_remainder_ok_encodeBetterBlockAsm8B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
emit_remainder_ok_encodeBetterBlockAsm8B:
- MOVQ src_len+32(FP), CX
- MOVL 12(SP), BX
- CMPL BX, CX
+ MOVQ src_len+32(FP), AX
+ MOVL 12(SP), DX
+ CMPL DX, AX
JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm8B
- MOVL CX, SI
- MOVL CX, 12(SP)
- LEAQ (DX)(BX*1), CX
- SUBL BX, SI
+ MOVL AX, SI
+ MOVL AX, 12(SP)
+ LEAQ (BX)(DX*1), AX
+ SUBL DX, SI
LEAL -1(SI), DX
CMPL DX, $0x3c
JB one_byte_emit_remainder_encodeBetterBlockAsm8B
@@ -10580,26 +10590,26 @@ emit_remainder_ok_encodeBetterBlockAsm8B:
JB three_bytes_emit_remainder_encodeBetterBlockAsm8B
three_bytes_emit_remainder_encodeBetterBlockAsm8B:
- MOVB $0xf4, (AX)
- MOVW DX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW DX, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_emit_remainder_encodeBetterBlockAsm8B
two_bytes_emit_remainder_encodeBetterBlockAsm8B:
- MOVB $0xf0, (AX)
- MOVB DL, 1(AX)
- ADDQ $0x02, AX
+ MOVB $0xf0, (CX)
+ MOVB DL, 1(CX)
+ ADDQ $0x02, CX
CMPL DX, $0x40
JB memmove_emit_remainder_encodeBetterBlockAsm8B
JMP memmove_long_emit_remainder_encodeBetterBlockAsm8B
one_byte_emit_remainder_encodeBetterBlockAsm8B:
SHLB $0x02, DL
- MOVB DL, (AX)
- ADDQ $0x01, AX
+ MOVB DL, (CX)
+ ADDQ $0x01, CX
memmove_emit_remainder_encodeBetterBlockAsm8B:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveShort
@@ -10615,73 +10625,73 @@ memmove_emit_remainder_encodeBetterBlockAsm8B:
JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_33through64
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_1or2:
- MOVB (CX), SI
- MOVB -1(CX)(BX*1), CL
- MOVB SI, (AX)
- MOVB CL, -1(AX)(BX*1)
+ MOVB (AX), SI
+ MOVB -1(AX)(BX*1), AL
+ MOVB SI, (CX)
+ MOVB AL, -1(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_3:
- MOVW (CX), SI
- MOVB 2(CX), CL
- MOVW SI, (AX)
- MOVB CL, 2(AX)
+ MOVW (AX), SI
+ MOVB 2(AX), AL
+ MOVW SI, (CX)
+ MOVB AL, 2(CX)
JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_4through7:
- MOVL (CX), SI
- MOVL -4(CX)(BX*1), CX
- MOVL SI, (AX)
- MOVL CX, -4(AX)(BX*1)
+ MOVL (AX), SI
+ MOVL -4(AX)(BX*1), AX
+ MOVL SI, (CX)
+ MOVL AX, -4(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_8through16:
- MOVQ (CX), SI
- MOVQ -8(CX)(BX*1), CX
- MOVQ SI, (AX)
- MOVQ CX, -8(AX)(BX*1)
+ MOVQ (AX), SI
+ MOVQ -8(AX)(BX*1), AX
+ MOVQ SI, (CX)
+ MOVQ AX, -8(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_17through32:
- MOVOU (CX), X0
- MOVOU -16(CX)(BX*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU -16(AX)(BX*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_33through64:
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B:
- MOVQ DX, AX
+ MOVQ DX, CX
JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm8B
memmove_long_emit_remainder_encodeBetterBlockAsm8B:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveLong
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
MOVQ BX, DI
SHRQ $0x05, DI
- MOVQ AX, SI
+ MOVQ CX, SI
ANDL $0x0000001f, SI
MOVQ $0x00000040, R8
SUBQ SI, R8
DECQ DI
JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_forward_sse_loop_32
- LEAQ -32(CX)(R8*1), SI
- LEAQ -32(AX)(R8*1), R9
+ LEAQ -32(AX)(R8*1), SI
+ LEAQ -32(CX)(R8*1), R9
emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_big_loop_back:
MOVOU (SI), X4
@@ -10695,798 +10705,799 @@ emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_big_loop_back:
JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_big_loop_back
emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_forward_sse_loop_32:
- MOVOU -32(CX)(R8*1), X4
- MOVOU -16(CX)(R8*1), X5
- MOVOA X4, -32(AX)(R8*1)
- MOVOA X5, -16(AX)(R8*1)
+ MOVOU -32(AX)(R8*1), X4
+ MOVOU -16(AX)(R8*1), X5
+ MOVOA X4, -32(CX)(R8*1)
+ MOVOA X5, -16(CX)(R8*1)
ADDQ $0x20, R8
CMPQ BX, R8
JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
- MOVQ DX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
+ MOVQ DX, CX
emit_literal_done_emit_remainder_encodeBetterBlockAsm8B:
- MOVQ dst_base+0(FP), CX
- SUBQ CX, AX
- MOVQ AX, ret+48(FP)
+ MOVQ dst_base+0(FP), AX
+ SUBQ AX, CX
+ MOVQ CX, ret+56(FP)
RET
-// func encodeSnappyBlockAsm(dst []byte, src []byte) int
+// func encodeSnappyBlockAsm(dst []byte, src []byte, tmp *[65536]byte) int
// Requires: BMI, SSE2
-TEXT ·encodeSnappyBlockAsm(SB), $65560-56
- MOVQ dst_base+0(FP), AX
- MOVQ $0x00000200, CX
- LEAQ 24(SP), DX
+TEXT ·encodeSnappyBlockAsm(SB), $24-64
+ MOVQ tmp+48(FP), AX
+ MOVQ dst_base+0(FP), CX
+ MOVQ $0x00000200, DX
+ MOVQ AX, BX
PXOR X0, X0
zero_loop_encodeSnappyBlockAsm:
- MOVOU X0, (DX)
- MOVOU X0, 16(DX)
- MOVOU X0, 32(DX)
- MOVOU X0, 48(DX)
- MOVOU X0, 64(DX)
- MOVOU X0, 80(DX)
- MOVOU X0, 96(DX)
- MOVOU X0, 112(DX)
- ADDQ $0x80, DX
- DECQ CX
+ MOVOU X0, (BX)
+ MOVOU X0, 16(BX)
+ MOVOU X0, 32(BX)
+ MOVOU X0, 48(BX)
+ MOVOU X0, 64(BX)
+ MOVOU X0, 80(BX)
+ MOVOU X0, 96(BX)
+ MOVOU X0, 112(BX)
+ ADDQ $0x80, BX
+ DECQ DX
JNZ zero_loop_encodeSnappyBlockAsm
MOVL $0x00000000, 12(SP)
- MOVQ src_len+32(FP), CX
- LEAQ -9(CX), DX
- LEAQ -8(CX), BX
- MOVL BX, 8(SP)
- SHRQ $0x05, CX
- SUBL CX, DX
- LEAQ (AX)(DX*1), DX
- MOVQ DX, (SP)
- MOVL $0x00000001, CX
- MOVL CX, 16(SP)
- MOVQ src_base+24(FP), DX
+ MOVQ src_len+32(FP), DX
+ LEAQ -9(DX), BX
+ LEAQ -8(DX), SI
+ MOVL SI, 8(SP)
+ SHRQ $0x05, DX
+ SUBL DX, BX
+ LEAQ (CX)(BX*1), BX
+ MOVQ BX, (SP)
+ MOVL $0x00000001, DX
+ MOVL DX, 16(SP)
+ MOVQ src_base+24(FP), BX
search_loop_encodeSnappyBlockAsm:
- MOVL CX, BX
- SUBL 12(SP), BX
- SHRL $0x06, BX
- LEAL 4(CX)(BX*1), BX
- CMPL BX, 8(SP)
+ MOVL DX, SI
+ SUBL 12(SP), SI
+ SHRL $0x06, SI
+ LEAL 4(DX)(SI*1), SI
+ CMPL SI, 8(SP)
JAE emit_remainder_encodeSnappyBlockAsm
- MOVQ (DX)(CX*1), SI
- MOVL BX, 20(SP)
- MOVQ $0x0000cf1bbcdcbf9b, R8
- MOVQ SI, R9
- MOVQ SI, R10
- SHRQ $0x08, R10
- SHLQ $0x10, R9
- IMULQ R8, R9
- SHRQ $0x32, R9
+ MOVQ (BX)(DX*1), DI
+ MOVL SI, 20(SP)
+ MOVQ $0x0000cf1bbcdcbf9b, R9
+ MOVQ DI, R10
+ MOVQ DI, R11
+ SHRQ $0x08, R11
SHLQ $0x10, R10
- IMULQ R8, R10
+ IMULQ R9, R10
SHRQ $0x32, R10
- MOVL 24(SP)(R9*4), BX
- MOVL 24(SP)(R10*4), DI
- MOVL CX, 24(SP)(R9*4)
- LEAL 1(CX), R9
- MOVL R9, 24(SP)(R10*4)
- MOVQ SI, R9
- SHRQ $0x10, R9
- SHLQ $0x10, R9
- IMULQ R8, R9
- SHRQ $0x32, R9
- MOVL CX, R8
- SUBL 16(SP), R8
- MOVL 1(DX)(R8*1), R10
- MOVQ SI, R8
- SHRQ $0x08, R8
- CMPL R8, R10
+ SHLQ $0x10, R11
+ IMULQ R9, R11
+ SHRQ $0x32, R11
+ MOVL (AX)(R10*4), SI
+ MOVL (AX)(R11*4), R8
+ MOVL DX, (AX)(R10*4)
+ LEAL 1(DX), R10
+ MOVL R10, (AX)(R11*4)
+ MOVQ DI, R10
+ SHRQ $0x10, R10
+ SHLQ $0x10, R10
+ IMULQ R9, R10
+ SHRQ $0x32, R10
+ MOVL DX, R9
+ SUBL 16(SP), R9
+ MOVL 1(BX)(R9*1), R11
+ MOVQ DI, R9
+ SHRQ $0x08, R9
+ CMPL R9, R11
JNE no_repeat_found_encodeSnappyBlockAsm
- LEAL 1(CX), SI
- MOVL 12(SP), BX
- MOVL SI, DI
- SUBL 16(SP), DI
+ LEAL 1(DX), DI
+ MOVL 12(SP), SI
+ MOVL DI, R8
+ SUBL 16(SP), R8
JZ repeat_extend_back_end_encodeSnappyBlockAsm
repeat_extend_back_loop_encodeSnappyBlockAsm:
- CMPL SI, BX
+ CMPL DI, SI
JBE repeat_extend_back_end_encodeSnappyBlockAsm
- MOVB -1(DX)(DI*1), R8
- MOVB -1(DX)(SI*1), R9
- CMPB R8, R9
+ MOVB -1(BX)(R8*1), R9
+ MOVB -1(BX)(DI*1), R10
+ CMPB R9, R10
JNE repeat_extend_back_end_encodeSnappyBlockAsm
- LEAL -1(SI), SI
- DECL DI
+ LEAL -1(DI), DI
+ DECL R8
JNZ repeat_extend_back_loop_encodeSnappyBlockAsm
repeat_extend_back_end_encodeSnappyBlockAsm:
- MOVL SI, BX
- SUBL 12(SP), BX
- LEAQ 5(AX)(BX*1), BX
- CMPQ BX, (SP)
+ MOVL DI, SI
+ SUBL 12(SP), SI
+ LEAQ 5(CX)(SI*1), SI
+ CMPQ SI, (SP)
JB repeat_dst_size_check_encodeSnappyBlockAsm
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
repeat_dst_size_check_encodeSnappyBlockAsm:
- MOVL 12(SP), BX
- CMPL BX, SI
+ MOVL 12(SP), SI
+ CMPL SI, DI
JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm
- MOVL SI, DI
- MOVL SI, 12(SP)
- LEAQ (DX)(BX*1), R8
- SUBL BX, DI
- LEAL -1(DI), BX
- CMPL BX, $0x3c
+ MOVL DI, R8
+ MOVL DI, 12(SP)
+ LEAQ (BX)(SI*1), R9
+ SUBL SI, R8
+ LEAL -1(R8), SI
+ CMPL SI, $0x3c
JB one_byte_repeat_emit_encodeSnappyBlockAsm
- CMPL BX, $0x00000100
+ CMPL SI, $0x00000100
JB two_bytes_repeat_emit_encodeSnappyBlockAsm
- CMPL BX, $0x00010000
+ CMPL SI, $0x00010000
JB three_bytes_repeat_emit_encodeSnappyBlockAsm
- CMPL BX, $0x01000000
+ CMPL SI, $0x01000000
JB four_bytes_repeat_emit_encodeSnappyBlockAsm
- MOVB $0xfc, (AX)
- MOVL BX, 1(AX)
- ADDQ $0x05, AX
+ MOVB $0xfc, (CX)
+ MOVL SI, 1(CX)
+ ADDQ $0x05, CX
JMP memmove_long_repeat_emit_encodeSnappyBlockAsm
four_bytes_repeat_emit_encodeSnappyBlockAsm:
- MOVL BX, R9
- SHRL $0x10, R9
- MOVB $0xf8, (AX)
- MOVW BX, 1(AX)
- MOVB R9, 3(AX)
- ADDQ $0x04, AX
+ MOVL SI, R10
+ SHRL $0x10, R10
+ MOVB $0xf8, (CX)
+ MOVW SI, 1(CX)
+ MOVB R10, 3(CX)
+ ADDQ $0x04, CX
JMP memmove_long_repeat_emit_encodeSnappyBlockAsm
three_bytes_repeat_emit_encodeSnappyBlockAsm:
- MOVB $0xf4, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_repeat_emit_encodeSnappyBlockAsm
two_bytes_repeat_emit_encodeSnappyBlockAsm:
- MOVB $0xf0, (AX)
- MOVB BL, 1(AX)
- ADDQ $0x02, AX
- CMPL BX, $0x40
+ MOVB $0xf0, (CX)
+ MOVB SI, 1(CX)
+ ADDQ $0x02, CX
+ CMPL SI, $0x40
JB memmove_repeat_emit_encodeSnappyBlockAsm
JMP memmove_long_repeat_emit_encodeSnappyBlockAsm
one_byte_repeat_emit_encodeSnappyBlockAsm:
- SHLB $0x02, BL
- MOVB BL, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, SI
+ MOVB SI, (CX)
+ ADDQ $0x01, CX
memmove_repeat_emit_encodeSnappyBlockAsm:
- LEAQ (AX)(DI*1), BX
+ LEAQ (CX)(R8*1), SI
// genMemMoveShort
- CMPQ DI, $0x08
+ CMPQ R8, $0x08
JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_8
- CMPQ DI, $0x10
+ CMPQ R8, $0x10
JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_8through16
- CMPQ DI, $0x20
+ CMPQ R8, $0x20
JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_17through32
JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_33through64
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_8:
- MOVQ (R8), R9
- MOVQ R9, (AX)
+ MOVQ (R9), R10
+ MOVQ R10, (CX)
JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_8through16:
- MOVQ (R8), R9
- MOVQ -8(R8)(DI*1), R8
- MOVQ R9, (AX)
- MOVQ R8, -8(AX)(DI*1)
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (CX)
+ MOVQ R9, -8(CX)(R8*1)
JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_17through32:
- MOVOU (R8), X0
- MOVOU -16(R8)(DI*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(DI*1)
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R8*1)
JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_33through64:
- MOVOU (R8), X0
- MOVOU 16(R8), X1
- MOVOU -32(R8)(DI*1), X2
- MOVOU -16(R8)(DI*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(DI*1)
- MOVOU X3, -16(AX)(DI*1)
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R8*1)
+ MOVOU X3, -16(CX)(R8*1)
memmove_end_copy_repeat_emit_encodeSnappyBlockAsm:
- MOVQ BX, AX
+ MOVQ SI, CX
JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm
memmove_long_repeat_emit_encodeSnappyBlockAsm:
- LEAQ (AX)(DI*1), BX
+ LEAQ (CX)(R8*1), SI
// genMemMoveLong
- MOVOU (R8), X0
- MOVOU 16(R8), X1
- MOVOU -32(R8)(DI*1), X2
- MOVOU -16(R8)(DI*1), X3
- MOVQ DI, R10
- SHRQ $0x05, R10
- MOVQ AX, R9
- ANDL $0x0000001f, R9
- MOVQ $0x00000040, R11
- SUBQ R9, R11
- DECQ R10
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R11
+ SHRQ $0x05, R11
+ MOVQ CX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R12
+ SUBQ R10, R12
+ DECQ R11
JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32
- LEAQ -32(R8)(R11*1), R9
- LEAQ -32(AX)(R11*1), R12
+ LEAQ -32(R9)(R12*1), R10
+ LEAQ -32(CX)(R12*1), R13
emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_big_loop_back:
- MOVOU (R9), X4
- MOVOU 16(R9), X5
- MOVOA X4, (R12)
- MOVOA X5, 16(R12)
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R13)
+ MOVOA X5, 16(R13)
+ ADDQ $0x20, R13
+ ADDQ $0x20, R10
ADDQ $0x20, R12
- ADDQ $0x20, R9
- ADDQ $0x20, R11
- DECQ R10
+ DECQ R11
JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_big_loop_back
emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32:
- MOVOU -32(R8)(R11*1), X4
- MOVOU -16(R8)(R11*1), X5
- MOVOA X4, -32(AX)(R11*1)
- MOVOA X5, -16(AX)(R11*1)
- ADDQ $0x20, R11
- CMPQ DI, R11
+ MOVOU -32(R9)(R12*1), X4
+ MOVOU -16(R9)(R12*1), X5
+ MOVOA X4, -32(CX)(R12*1)
+ MOVOA X5, -16(CX)(R12*1)
+ ADDQ $0x20, R12
+ CMPQ R8, R12
JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(DI*1)
- MOVOU X3, -16(AX)(DI*1)
- MOVQ BX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R8*1)
+ MOVOU X3, -16(CX)(R8*1)
+ MOVQ SI, CX
emit_literal_done_repeat_emit_encodeSnappyBlockAsm:
- ADDL $0x05, CX
- MOVL CX, BX
- SUBL 16(SP), BX
- MOVQ src_len+32(FP), DI
- SUBL CX, DI
- LEAQ (DX)(CX*1), R8
- LEAQ (DX)(BX*1), BX
+ ADDL $0x05, DX
+ MOVL DX, SI
+ SUBL 16(SP), SI
+ MOVQ src_len+32(FP), R8
+ SUBL DX, R8
+ LEAQ (BX)(DX*1), R9
+ LEAQ (BX)(SI*1), SI
// matchLen
- XORL R10, R10
+ XORL R11, R11
matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm:
- CMPL DI, $0x10
+ CMPL R8, $0x10
JB matchlen_match8_repeat_extend_encodeSnappyBlockAsm
- MOVQ (R8)(R10*1), R9
- MOVQ 8(R8)(R10*1), R11
- XORQ (BX)(R10*1), R9
+ MOVQ (R9)(R11*1), R10
+ MOVQ 8(R9)(R11*1), R12
+ XORQ (SI)(R11*1), R10
JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm
- XORQ 8(BX)(R10*1), R11
+ XORQ 8(SI)(R11*1), R12
JNZ matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm
- LEAL -16(DI), DI
- LEAL 16(R10), R10
+ LEAL -16(R8), R8
+ LEAL 16(R11), R11
JMP matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm
matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm:
#ifdef GOAMD64_v3
- TZCNTQ R11, R11
+ TZCNTQ R12, R12
#else
- BSFQ R11, R11
+ BSFQ R12, R12
#endif
- SARQ $0x03, R11
- LEAL 8(R10)(R11*1), R10
+ SARQ $0x03, R12
+ LEAL 8(R11)(R12*1), R11
JMP repeat_extend_forward_end_encodeSnappyBlockAsm
matchlen_match8_repeat_extend_encodeSnappyBlockAsm:
- CMPL DI, $0x08
+ CMPL R8, $0x08
JB matchlen_match4_repeat_extend_encodeSnappyBlockAsm
- MOVQ (R8)(R10*1), R9
- XORQ (BX)(R10*1), R9
+ MOVQ (R9)(R11*1), R10
+ XORQ (SI)(R11*1), R10
JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm
- LEAL -8(DI), DI
- LEAL 8(R10), R10
+ LEAL -8(R8), R8
+ LEAL 8(R11), R11
JMP matchlen_match4_repeat_extend_encodeSnappyBlockAsm
matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm:
#ifdef GOAMD64_v3
- TZCNTQ R9, R9
+ TZCNTQ R10, R10
#else
- BSFQ R9, R9
+ BSFQ R10, R10
#endif
- SARQ $0x03, R9
- LEAL (R10)(R9*1), R10
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
JMP repeat_extend_forward_end_encodeSnappyBlockAsm
matchlen_match4_repeat_extend_encodeSnappyBlockAsm:
- CMPL DI, $0x04
+ CMPL R8, $0x04
JB matchlen_match2_repeat_extend_encodeSnappyBlockAsm
- MOVL (R8)(R10*1), R9
- CMPL (BX)(R10*1), R9
+ MOVL (R9)(R11*1), R10
+ CMPL (SI)(R11*1), R10
JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm
- LEAL -4(DI), DI
- LEAL 4(R10), R10
+ LEAL -4(R8), R8
+ LEAL 4(R11), R11
matchlen_match2_repeat_extend_encodeSnappyBlockAsm:
- CMPL DI, $0x01
+ CMPL R8, $0x01
JE matchlen_match1_repeat_extend_encodeSnappyBlockAsm
JB repeat_extend_forward_end_encodeSnappyBlockAsm
- MOVW (R8)(R10*1), R9
- CMPW (BX)(R10*1), R9
+ MOVW (R9)(R11*1), R10
+ CMPW (SI)(R11*1), R10
JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm
- LEAL 2(R10), R10
- SUBL $0x02, DI
+ LEAL 2(R11), R11
+ SUBL $0x02, R8
JZ repeat_extend_forward_end_encodeSnappyBlockAsm
matchlen_match1_repeat_extend_encodeSnappyBlockAsm:
- MOVB (R8)(R10*1), R9
- CMPB (BX)(R10*1), R9
+ MOVB (R9)(R11*1), R10
+ CMPB (SI)(R11*1), R10
JNE repeat_extend_forward_end_encodeSnappyBlockAsm
- LEAL 1(R10), R10
+ LEAL 1(R11), R11
repeat_extend_forward_end_encodeSnappyBlockAsm:
- ADDL R10, CX
- MOVL CX, BX
- SUBL SI, BX
- MOVL 16(SP), SI
+ ADDL R11, DX
+ MOVL DX, SI
+ SUBL DI, SI
+ MOVL 16(SP), DI
// emitCopy
- CMPL SI, $0x00010000
+ CMPL DI, $0x00010000
JB two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm
four_bytes_loop_back_repeat_as_copy_encodeSnappyBlockAsm:
- CMPL BX, $0x40
+ CMPL SI, $0x40
JBE four_bytes_remain_repeat_as_copy_encodeSnappyBlockAsm
- MOVB $0xff, (AX)
- MOVL SI, 1(AX)
- LEAL -64(BX), BX
- ADDQ $0x05, AX
- CMPL BX, $0x04
+ MOVB $0xff, (CX)
+ MOVL DI, 1(CX)
+ LEAL -64(SI), SI
+ ADDQ $0x05, CX
+ CMPL SI, $0x04
JB four_bytes_remain_repeat_as_copy_encodeSnappyBlockAsm
JMP four_bytes_loop_back_repeat_as_copy_encodeSnappyBlockAsm
four_bytes_remain_repeat_as_copy_encodeSnappyBlockAsm:
- TESTL BX, BX
+ TESTL SI, SI
JZ repeat_end_emit_encodeSnappyBlockAsm
- XORL DI, DI
- LEAL -1(DI)(BX*4), BX
- MOVB BL, (AX)
- MOVL SI, 1(AX)
- ADDQ $0x05, AX
+ XORL R8, R8
+ LEAL -1(R8)(SI*4), SI
+ MOVB SI, (CX)
+ MOVL DI, 1(CX)
+ ADDQ $0x05, CX
JMP repeat_end_emit_encodeSnappyBlockAsm
two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm:
- CMPL BX, $0x40
+ CMPL SI, $0x40
JBE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm
- MOVB $0xee, (AX)
- MOVW SI, 1(AX)
- LEAL -60(BX), BX
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW DI, 1(CX)
+ LEAL -60(SI), SI
+ ADDQ $0x03, CX
JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm
two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm:
- MOVL BX, DI
- SHLL $0x02, DI
- CMPL BX, $0x0c
+ MOVL SI, R8
+ SHLL $0x02, R8
+ CMPL SI, $0x0c
JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm
- CMPL SI, $0x00000800
+ CMPL DI, $0x00000800
JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm
- LEAL -15(DI), DI
- MOVB SI, 1(AX)
- SHRL $0x08, SI
- SHLL $0x05, SI
- ORL SI, DI
- MOVB DI, (AX)
- ADDQ $0x02, AX
+ LEAL -15(R8), R8
+ MOVB DI, 1(CX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R8
+ MOVB R8, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeSnappyBlockAsm
emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm:
- LEAL -2(DI), DI
- MOVB DI, (AX)
- MOVW SI, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(R8), R8
+ MOVB R8, (CX)
+ MOVW DI, 1(CX)
+ ADDQ $0x03, CX
repeat_end_emit_encodeSnappyBlockAsm:
- MOVL CX, 12(SP)
+ MOVL DX, 12(SP)
JMP search_loop_encodeSnappyBlockAsm
no_repeat_found_encodeSnappyBlockAsm:
- CMPL (DX)(BX*1), SI
+ CMPL (BX)(SI*1), DI
JEQ candidate_match_encodeSnappyBlockAsm
- SHRQ $0x08, SI
- MOVL 24(SP)(R9*4), BX
- LEAL 2(CX), R8
- CMPL (DX)(DI*1), SI
+ SHRQ $0x08, DI
+ MOVL (AX)(R10*4), SI
+ LEAL 2(DX), R9
+ CMPL (BX)(R8*1), DI
JEQ candidate2_match_encodeSnappyBlockAsm
- MOVL R8, 24(SP)(R9*4)
- SHRQ $0x08, SI
- CMPL (DX)(BX*1), SI
+ MOVL R9, (AX)(R10*4)
+ SHRQ $0x08, DI
+ CMPL (BX)(SI*1), DI
JEQ candidate3_match_encodeSnappyBlockAsm
- MOVL 20(SP), CX
+ MOVL 20(SP), DX
JMP search_loop_encodeSnappyBlockAsm
candidate3_match_encodeSnappyBlockAsm:
- ADDL $0x02, CX
+ ADDL $0x02, DX
JMP candidate_match_encodeSnappyBlockAsm
candidate2_match_encodeSnappyBlockAsm:
- MOVL R8, 24(SP)(R9*4)
- INCL CX
- MOVL DI, BX
+ MOVL R9, (AX)(R10*4)
+ INCL DX
+ MOVL R8, SI
candidate_match_encodeSnappyBlockAsm:
- MOVL 12(SP), SI
- TESTL BX, BX
+ MOVL 12(SP), DI
+ TESTL SI, SI
JZ match_extend_back_end_encodeSnappyBlockAsm
match_extend_back_loop_encodeSnappyBlockAsm:
- CMPL CX, SI
+ CMPL DX, DI
JBE match_extend_back_end_encodeSnappyBlockAsm
- MOVB -1(DX)(BX*1), DI
- MOVB -1(DX)(CX*1), R8
- CMPB DI, R8
+ MOVB -1(BX)(SI*1), R8
+ MOVB -1(BX)(DX*1), R9
+ CMPB R8, R9
JNE match_extend_back_end_encodeSnappyBlockAsm
- LEAL -1(CX), CX
- DECL BX
+ LEAL -1(DX), DX
+ DECL SI
JZ match_extend_back_end_encodeSnappyBlockAsm
JMP match_extend_back_loop_encodeSnappyBlockAsm
match_extend_back_end_encodeSnappyBlockAsm:
- MOVL CX, SI
- SUBL 12(SP), SI
- LEAQ 5(AX)(SI*1), SI
- CMPQ SI, (SP)
+ MOVL DX, DI
+ SUBL 12(SP), DI
+ LEAQ 5(CX)(DI*1), DI
+ CMPQ DI, (SP)
JB match_dst_size_check_encodeSnappyBlockAsm
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_dst_size_check_encodeSnappyBlockAsm:
- MOVL CX, SI
- MOVL 12(SP), DI
- CMPL DI, SI
+ MOVL DX, DI
+ MOVL 12(SP), R8
+ CMPL R8, DI
JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(DI*1), SI
- SUBL DI, R8
- LEAL -1(R8), DI
- CMPL DI, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(R8*1), DI
+ SUBL R8, R9
+ LEAL -1(R9), R8
+ CMPL R8, $0x3c
JB one_byte_match_emit_encodeSnappyBlockAsm
- CMPL DI, $0x00000100
+ CMPL R8, $0x00000100
JB two_bytes_match_emit_encodeSnappyBlockAsm
- CMPL DI, $0x00010000
+ CMPL R8, $0x00010000
JB three_bytes_match_emit_encodeSnappyBlockAsm
- CMPL DI, $0x01000000
+ CMPL R8, $0x01000000
JB four_bytes_match_emit_encodeSnappyBlockAsm
- MOVB $0xfc, (AX)
- MOVL DI, 1(AX)
- ADDQ $0x05, AX
+ MOVB $0xfc, (CX)
+ MOVL R8, 1(CX)
+ ADDQ $0x05, CX
JMP memmove_long_match_emit_encodeSnappyBlockAsm
four_bytes_match_emit_encodeSnappyBlockAsm:
- MOVL DI, R9
- SHRL $0x10, R9
- MOVB $0xf8, (AX)
- MOVW DI, 1(AX)
- MOVB R9, 3(AX)
- ADDQ $0x04, AX
+ MOVL R8, R10
+ SHRL $0x10, R10
+ MOVB $0xf8, (CX)
+ MOVW R8, 1(CX)
+ MOVB R10, 3(CX)
+ ADDQ $0x04, CX
JMP memmove_long_match_emit_encodeSnappyBlockAsm
three_bytes_match_emit_encodeSnappyBlockAsm:
- MOVB $0xf4, (AX)
- MOVW DI, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW R8, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_match_emit_encodeSnappyBlockAsm
two_bytes_match_emit_encodeSnappyBlockAsm:
- MOVB $0xf0, (AX)
- MOVB DI, 1(AX)
- ADDQ $0x02, AX
- CMPL DI, $0x40
+ MOVB $0xf0, (CX)
+ MOVB R8, 1(CX)
+ ADDQ $0x02, CX
+ CMPL R8, $0x40
JB memmove_match_emit_encodeSnappyBlockAsm
JMP memmove_long_match_emit_encodeSnappyBlockAsm
one_byte_match_emit_encodeSnappyBlockAsm:
- SHLB $0x02, DI
- MOVB DI, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, R8
+ MOVB R8, (CX)
+ ADDQ $0x01, CX
memmove_match_emit_encodeSnappyBlockAsm:
- LEAQ (AX)(R8*1), DI
+ LEAQ (CX)(R9*1), R8
// genMemMoveShort
- CMPQ R8, $0x08
+ CMPQ R9, $0x08
JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_8
- CMPQ R8, $0x10
+ CMPQ R9, $0x10
JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ R9, $0x20
JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_33through64
emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_8:
- MOVQ (SI), R9
- MOVQ R9, (AX)
+ MOVQ (DI), R10
+ MOVQ R10, (CX)
JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm
emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_8through16:
- MOVQ (SI), R9
- MOVQ -8(SI)(R8*1), SI
- MOVQ R9, (AX)
- MOVQ SI, -8(AX)(R8*1)
+ MOVQ (DI), R10
+ MOVQ -8(DI)(R9*1), DI
+ MOVQ R10, (CX)
+ MOVQ DI, -8(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm
emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_17through32:
- MOVOU (SI), X0
- MOVOU -16(SI)(R8*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU (DI), X0
+ MOVOU -16(DI)(R9*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm
emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_33through64:
- MOVOU (SI), X0
- MOVOU 16(SI), X1
- MOVOU -32(SI)(R8*1), X2
- MOVOU -16(SI)(R8*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU (DI), X0
+ MOVOU 16(DI), X1
+ MOVOU -32(DI)(R9*1), X2
+ MOVOU -16(DI)(R9*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
memmove_end_copy_match_emit_encodeSnappyBlockAsm:
- MOVQ DI, AX
+ MOVQ R8, CX
JMP emit_literal_done_match_emit_encodeSnappyBlockAsm
memmove_long_match_emit_encodeSnappyBlockAsm:
- LEAQ (AX)(R8*1), DI
+ LEAQ (CX)(R9*1), R8
// genMemMoveLong
- MOVOU (SI), X0
- MOVOU 16(SI), X1
- MOVOU -32(SI)(R8*1), X2
- MOVOU -16(SI)(R8*1), X3
- MOVQ R8, R10
- SHRQ $0x05, R10
- MOVQ AX, R9
- ANDL $0x0000001f, R9
- MOVQ $0x00000040, R11
- SUBQ R9, R11
- DECQ R10
+ MOVOU (DI), X0
+ MOVOU 16(DI), X1
+ MOVOU -32(DI)(R9*1), X2
+ MOVOU -16(DI)(R9*1), X3
+ MOVQ R9, R11
+ SHRQ $0x05, R11
+ MOVQ CX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R12
+ SUBQ R10, R12
+ DECQ R11
JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32
- LEAQ -32(SI)(R11*1), R9
- LEAQ -32(AX)(R11*1), R12
+ LEAQ -32(DI)(R12*1), R10
+ LEAQ -32(CX)(R12*1), R13
emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_big_loop_back:
- MOVOU (R9), X4
- MOVOU 16(R9), X5
- MOVOA X4, (R12)
- MOVOA X5, 16(R12)
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R13)
+ MOVOA X5, 16(R13)
+ ADDQ $0x20, R13
+ ADDQ $0x20, R10
ADDQ $0x20, R12
- ADDQ $0x20, R9
- ADDQ $0x20, R11
- DECQ R10
+ DECQ R11
JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_big_loop_back
emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32:
- MOVOU -32(SI)(R11*1), X4
- MOVOU -16(SI)(R11*1), X5
- MOVOA X4, -32(AX)(R11*1)
- MOVOA X5, -16(AX)(R11*1)
- ADDQ $0x20, R11
- CMPQ R8, R11
+ MOVOU -32(DI)(R12*1), X4
+ MOVOU -16(DI)(R12*1), X5
+ MOVOA X4, -32(CX)(R12*1)
+ MOVOA X5, -16(CX)(R12*1)
+ ADDQ $0x20, R12
+ CMPQ R9, R12
JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ DI, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
+ MOVQ R8, CX
emit_literal_done_match_emit_encodeSnappyBlockAsm:
match_nolit_loop_encodeSnappyBlockAsm:
- MOVL CX, SI
- SUBL BX, SI
- MOVL SI, 16(SP)
- ADDL $0x04, CX
- ADDL $0x04, BX
- MOVQ src_len+32(FP), SI
- SUBL CX, SI
- LEAQ (DX)(CX*1), DI
- LEAQ (DX)(BX*1), BX
+ MOVL DX, DI
+ SUBL SI, DI
+ MOVL DI, 16(SP)
+ ADDL $0x04, DX
+ ADDL $0x04, SI
+ MOVQ src_len+32(FP), DI
+ SUBL DX, DI
+ LEAQ (BX)(DX*1), R8
+ LEAQ (BX)(SI*1), SI
// matchLen
- XORL R9, R9
+ XORL R10, R10
matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm:
- CMPL SI, $0x10
+ CMPL DI, $0x10
JB matchlen_match8_match_nolit_encodeSnappyBlockAsm
- MOVQ (DI)(R9*1), R8
- MOVQ 8(DI)(R9*1), R10
- XORQ (BX)(R9*1), R8
+ MOVQ (R8)(R10*1), R9
+ MOVQ 8(R8)(R10*1), R11
+ XORQ (SI)(R10*1), R9
JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm
- XORQ 8(BX)(R9*1), R10
+ XORQ 8(SI)(R10*1), R11
JNZ matchlen_bsf_16match_nolit_encodeSnappyBlockAsm
- LEAL -16(SI), SI
- LEAL 16(R9), R9
+ LEAL -16(DI), DI
+ LEAL 16(R10), R10
JMP matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm
matchlen_bsf_16match_nolit_encodeSnappyBlockAsm:
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R11, R11
#else
- BSFQ R10, R10
+ BSFQ R11, R11
#endif
- SARQ $0x03, R10
- LEAL 8(R9)(R10*1), R9
+ SARQ $0x03, R11
+ LEAL 8(R10)(R11*1), R10
JMP match_nolit_end_encodeSnappyBlockAsm
matchlen_match8_match_nolit_encodeSnappyBlockAsm:
- CMPL SI, $0x08
+ CMPL DI, $0x08
JB matchlen_match4_match_nolit_encodeSnappyBlockAsm
- MOVQ (DI)(R9*1), R8
- XORQ (BX)(R9*1), R8
+ MOVQ (R8)(R10*1), R9
+ XORQ (SI)(R10*1), R9
JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm
- LEAL -8(SI), SI
- LEAL 8(R9), R9
+ LEAL -8(DI), DI
+ LEAL 8(R10), R10
JMP matchlen_match4_match_nolit_encodeSnappyBlockAsm
matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm:
#ifdef GOAMD64_v3
- TZCNTQ R8, R8
+ TZCNTQ R9, R9
#else
- BSFQ R8, R8
+ BSFQ R9, R9
#endif
- SARQ $0x03, R8
- LEAL (R9)(R8*1), R9
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
JMP match_nolit_end_encodeSnappyBlockAsm
matchlen_match4_match_nolit_encodeSnappyBlockAsm:
- CMPL SI, $0x04
+ CMPL DI, $0x04
JB matchlen_match2_match_nolit_encodeSnappyBlockAsm
- MOVL (DI)(R9*1), R8
- CMPL (BX)(R9*1), R8
+ MOVL (R8)(R10*1), R9
+ CMPL (SI)(R10*1), R9
JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm
- LEAL -4(SI), SI
- LEAL 4(R9), R9
+ LEAL -4(DI), DI
+ LEAL 4(R10), R10
matchlen_match2_match_nolit_encodeSnappyBlockAsm:
- CMPL SI, $0x01
+ CMPL DI, $0x01
JE matchlen_match1_match_nolit_encodeSnappyBlockAsm
JB match_nolit_end_encodeSnappyBlockAsm
- MOVW (DI)(R9*1), R8
- CMPW (BX)(R9*1), R8
+ MOVW (R8)(R10*1), R9
+ CMPW (SI)(R10*1), R9
JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm
- LEAL 2(R9), R9
- SUBL $0x02, SI
+ LEAL 2(R10), R10
+ SUBL $0x02, DI
JZ match_nolit_end_encodeSnappyBlockAsm
matchlen_match1_match_nolit_encodeSnappyBlockAsm:
- MOVB (DI)(R9*1), R8
- CMPB (BX)(R9*1), R8
+ MOVB (R8)(R10*1), R9
+ CMPB (SI)(R10*1), R9
JNE match_nolit_end_encodeSnappyBlockAsm
- LEAL 1(R9), R9
+ LEAL 1(R10), R10
match_nolit_end_encodeSnappyBlockAsm:
- ADDL R9, CX
- MOVL 16(SP), BX
- ADDL $0x04, R9
- MOVL CX, 12(SP)
+ ADDL R10, DX
+ MOVL 16(SP), SI
+ ADDL $0x04, R10
+ MOVL DX, 12(SP)
// emitCopy
- CMPL BX, $0x00010000
+ CMPL SI, $0x00010000
JB two_byte_offset_match_nolit_encodeSnappyBlockAsm
four_bytes_loop_back_match_nolit_encodeSnappyBlockAsm:
- CMPL R9, $0x40
+ CMPL R10, $0x40
JBE four_bytes_remain_match_nolit_encodeSnappyBlockAsm
- MOVB $0xff, (AX)
- MOVL BX, 1(AX)
- LEAL -64(R9), R9
- ADDQ $0x05, AX
- CMPL R9, $0x04
+ MOVB $0xff, (CX)
+ MOVL SI, 1(CX)
+ LEAL -64(R10), R10
+ ADDQ $0x05, CX
+ CMPL R10, $0x04
JB four_bytes_remain_match_nolit_encodeSnappyBlockAsm
JMP four_bytes_loop_back_match_nolit_encodeSnappyBlockAsm
four_bytes_remain_match_nolit_encodeSnappyBlockAsm:
- TESTL R9, R9
+ TESTL R10, R10
JZ match_nolit_emitcopy_end_encodeSnappyBlockAsm
- XORL SI, SI
- LEAL -1(SI)(R9*4), R9
- MOVB R9, (AX)
- MOVL BX, 1(AX)
- ADDQ $0x05, AX
+ XORL DI, DI
+ LEAL -1(DI)(R10*4), R10
+ MOVB R10, (CX)
+ MOVL SI, 1(CX)
+ ADDQ $0x05, CX
JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm
two_byte_offset_match_nolit_encodeSnappyBlockAsm:
- CMPL R9, $0x40
+ CMPL R10, $0x40
JBE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm
- MOVB $0xee, (AX)
- MOVW BX, 1(AX)
- LEAL -60(R9), R9
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW SI, 1(CX)
+ LEAL -60(R10), R10
+ ADDQ $0x03, CX
JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm
two_byte_offset_short_match_nolit_encodeSnappyBlockAsm:
- MOVL R9, SI
- SHLL $0x02, SI
- CMPL R9, $0x0c
+ MOVL R10, DI
+ SHLL $0x02, DI
+ CMPL R10, $0x0c
JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm
- CMPL BX, $0x00000800
+ CMPL SI, $0x00000800
JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm
- LEAL -15(SI), SI
- MOVB BL, 1(AX)
- SHRL $0x08, BX
- SHLL $0x05, BX
- ORL BX, SI
- MOVB SI, (AX)
- ADDQ $0x02, AX
+ LEAL -15(DI), DI
+ MOVB SI, 1(CX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm
emit_copy_three_match_nolit_encodeSnappyBlockAsm:
- LEAL -2(SI), SI
- MOVB SI, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(DI), DI
+ MOVB DI, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
match_nolit_emitcopy_end_encodeSnappyBlockAsm:
- CMPL CX, 8(SP)
+ CMPL DX, 8(SP)
JAE emit_remainder_encodeSnappyBlockAsm
- MOVQ -2(DX)(CX*1), SI
- CMPQ AX, (SP)
+ MOVQ -2(BX)(DX*1), DI
+ CMPQ CX, (SP)
JB match_nolit_dst_ok_encodeSnappyBlockAsm
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_nolit_dst_ok_encodeSnappyBlockAsm:
- MOVQ $0x0000cf1bbcdcbf9b, R8
- MOVQ SI, DI
- SHRQ $0x10, SI
- MOVQ SI, BX
- SHLQ $0x10, DI
- IMULQ R8, DI
- SHRQ $0x32, DI
- SHLQ $0x10, BX
- IMULQ R8, BX
- SHRQ $0x32, BX
- LEAL -2(CX), R8
- LEAQ 24(SP)(BX*4), R9
- MOVL (R9), BX
- MOVL R8, 24(SP)(DI*4)
- MOVL CX, (R9)
- CMPL (DX)(BX*1), SI
+ MOVQ $0x0000cf1bbcdcbf9b, R9
+ MOVQ DI, R8
+ SHRQ $0x10, DI
+ MOVQ DI, SI
+ SHLQ $0x10, R8
+ IMULQ R9, R8
+ SHRQ $0x32, R8
+ SHLQ $0x10, SI
+ IMULQ R9, SI
+ SHRQ $0x32, SI
+ LEAL -2(DX), R9
+ LEAQ (AX)(SI*4), R10
+ MOVL (R10), SI
+ MOVL R9, (AX)(R8*4)
+ MOVL DX, (R10)
+ CMPL (BX)(SI*1), DI
JEQ match_nolit_loop_encodeSnappyBlockAsm
- INCL CX
+ INCL DX
JMP search_loop_encodeSnappyBlockAsm
emit_remainder_encodeSnappyBlockAsm:
- MOVQ src_len+32(FP), CX
- SUBL 12(SP), CX
- LEAQ 5(AX)(CX*1), CX
- CMPQ CX, (SP)
+ MOVQ src_len+32(FP), AX
+ SUBL 12(SP), AX
+ LEAQ 5(CX)(AX*1), AX
+ CMPQ AX, (SP)
JB emit_remainder_ok_encodeSnappyBlockAsm
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
emit_remainder_ok_encodeSnappyBlockAsm:
- MOVQ src_len+32(FP), CX
- MOVL 12(SP), BX
- CMPL BX, CX
+ MOVQ src_len+32(FP), AX
+ MOVL 12(SP), DX
+ CMPL DX, AX
JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm
- MOVL CX, SI
- MOVL CX, 12(SP)
- LEAQ (DX)(BX*1), CX
- SUBL BX, SI
+ MOVL AX, SI
+ MOVL AX, 12(SP)
+ LEAQ (BX)(DX*1), AX
+ SUBL DX, SI
LEAL -1(SI), DX
CMPL DX, $0x3c
JB one_byte_emit_remainder_encodeSnappyBlockAsm
@@ -11496,41 +11507,41 @@ emit_remainder_ok_encodeSnappyBlockAsm:
JB three_bytes_emit_remainder_encodeSnappyBlockAsm
CMPL DX, $0x01000000
JB four_bytes_emit_remainder_encodeSnappyBlockAsm
- MOVB $0xfc, (AX)
- MOVL DX, 1(AX)
- ADDQ $0x05, AX
+ MOVB $0xfc, (CX)
+ MOVL DX, 1(CX)
+ ADDQ $0x05, CX
JMP memmove_long_emit_remainder_encodeSnappyBlockAsm
four_bytes_emit_remainder_encodeSnappyBlockAsm:
MOVL DX, BX
SHRL $0x10, BX
- MOVB $0xf8, (AX)
- MOVW DX, 1(AX)
- MOVB BL, 3(AX)
- ADDQ $0x04, AX
+ MOVB $0xf8, (CX)
+ MOVW DX, 1(CX)
+ MOVB BL, 3(CX)
+ ADDQ $0x04, CX
JMP memmove_long_emit_remainder_encodeSnappyBlockAsm
three_bytes_emit_remainder_encodeSnappyBlockAsm:
- MOVB $0xf4, (AX)
- MOVW DX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW DX, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_emit_remainder_encodeSnappyBlockAsm
two_bytes_emit_remainder_encodeSnappyBlockAsm:
- MOVB $0xf0, (AX)
- MOVB DL, 1(AX)
- ADDQ $0x02, AX
+ MOVB $0xf0, (CX)
+ MOVB DL, 1(CX)
+ ADDQ $0x02, CX
CMPL DX, $0x40
JB memmove_emit_remainder_encodeSnappyBlockAsm
JMP memmove_long_emit_remainder_encodeSnappyBlockAsm
one_byte_emit_remainder_encodeSnappyBlockAsm:
SHLB $0x02, DL
- MOVB DL, (AX)
- ADDQ $0x01, AX
+ MOVB DL, (CX)
+ ADDQ $0x01, CX
memmove_emit_remainder_encodeSnappyBlockAsm:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveShort
@@ -11546,73 +11557,73 @@ memmove_emit_remainder_encodeSnappyBlockAsm:
JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_33through64
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_1or2:
- MOVB (CX), SI
- MOVB -1(CX)(BX*1), CL
- MOVB SI, (AX)
- MOVB CL, -1(AX)(BX*1)
+ MOVB (AX), SI
+ MOVB -1(AX)(BX*1), AL
+ MOVB SI, (CX)
+ MOVB AL, -1(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_3:
- MOVW (CX), SI
- MOVB 2(CX), CL
- MOVW SI, (AX)
- MOVB CL, 2(AX)
+ MOVW (AX), SI
+ MOVB 2(AX), AL
+ MOVW SI, (CX)
+ MOVB AL, 2(CX)
JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_4through7:
- MOVL (CX), SI
- MOVL -4(CX)(BX*1), CX
- MOVL SI, (AX)
- MOVL CX, -4(AX)(BX*1)
+ MOVL (AX), SI
+ MOVL -4(AX)(BX*1), AX
+ MOVL SI, (CX)
+ MOVL AX, -4(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_8through16:
- MOVQ (CX), SI
- MOVQ -8(CX)(BX*1), CX
- MOVQ SI, (AX)
- MOVQ CX, -8(AX)(BX*1)
+ MOVQ (AX), SI
+ MOVQ -8(AX)(BX*1), AX
+ MOVQ SI, (CX)
+ MOVQ AX, -8(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_17through32:
- MOVOU (CX), X0
- MOVOU -16(CX)(BX*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU -16(AX)(BX*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_33through64:
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
memmove_end_copy_emit_remainder_encodeSnappyBlockAsm:
- MOVQ DX, AX
+ MOVQ DX, CX
JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm
memmove_long_emit_remainder_encodeSnappyBlockAsm:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveLong
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
MOVQ BX, DI
SHRQ $0x05, DI
- MOVQ AX, SI
+ MOVQ CX, SI
ANDL $0x0000001f, SI
MOVQ $0x00000040, R8
SUBQ SI, R8
DECQ DI
JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_forward_sse_loop_32
- LEAQ -32(CX)(R8*1), SI
- LEAQ -32(AX)(R8*1), R9
+ LEAQ -32(AX)(R8*1), SI
+ LEAQ -32(CX)(R8*1), R9
emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_big_loop_back:
MOVOU (SI), X4
@@ -11626,718 +11637,719 @@ emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_big_loop_back:
JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_big_loop_back
emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_forward_sse_loop_32:
- MOVOU -32(CX)(R8*1), X4
- MOVOU -16(CX)(R8*1), X5
- MOVOA X4, -32(AX)(R8*1)
- MOVOA X5, -16(AX)(R8*1)
+ MOVOU -32(AX)(R8*1), X4
+ MOVOU -16(AX)(R8*1), X5
+ MOVOA X4, -32(CX)(R8*1)
+ MOVOA X5, -16(CX)(R8*1)
ADDQ $0x20, R8
CMPQ BX, R8
JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
- MOVQ DX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
+ MOVQ DX, CX
emit_literal_done_emit_remainder_encodeSnappyBlockAsm:
- MOVQ dst_base+0(FP), CX
- SUBQ CX, AX
- MOVQ AX, ret+48(FP)
+ MOVQ dst_base+0(FP), AX
+ SUBQ AX, CX
+ MOVQ CX, ret+56(FP)
RET
-// func encodeSnappyBlockAsm64K(dst []byte, src []byte) int
+// func encodeSnappyBlockAsm64K(dst []byte, src []byte, tmp *[65536]byte) int
// Requires: BMI, SSE2
-TEXT ·encodeSnappyBlockAsm64K(SB), $65560-56
- MOVQ dst_base+0(FP), AX
- MOVQ $0x00000200, CX
- LEAQ 24(SP), DX
+TEXT ·encodeSnappyBlockAsm64K(SB), $24-64
+ MOVQ tmp+48(FP), AX
+ MOVQ dst_base+0(FP), CX
+ MOVQ $0x00000200, DX
+ MOVQ AX, BX
PXOR X0, X0
zero_loop_encodeSnappyBlockAsm64K:
- MOVOU X0, (DX)
- MOVOU X0, 16(DX)
- MOVOU X0, 32(DX)
- MOVOU X0, 48(DX)
- MOVOU X0, 64(DX)
- MOVOU X0, 80(DX)
- MOVOU X0, 96(DX)
- MOVOU X0, 112(DX)
- ADDQ $0x80, DX
- DECQ CX
+ MOVOU X0, (BX)
+ MOVOU X0, 16(BX)
+ MOVOU X0, 32(BX)
+ MOVOU X0, 48(BX)
+ MOVOU X0, 64(BX)
+ MOVOU X0, 80(BX)
+ MOVOU X0, 96(BX)
+ MOVOU X0, 112(BX)
+ ADDQ $0x80, BX
+ DECQ DX
JNZ zero_loop_encodeSnappyBlockAsm64K
MOVL $0x00000000, 12(SP)
- MOVQ src_len+32(FP), CX
- LEAQ -9(CX), DX
- LEAQ -8(CX), BX
- MOVL BX, 8(SP)
- SHRQ $0x05, CX
- SUBL CX, DX
- LEAQ (AX)(DX*1), DX
- MOVQ DX, (SP)
- MOVL $0x00000001, CX
- MOVL CX, 16(SP)
- MOVQ src_base+24(FP), DX
+ MOVQ src_len+32(FP), DX
+ LEAQ -9(DX), BX
+ LEAQ -8(DX), SI
+ MOVL SI, 8(SP)
+ SHRQ $0x05, DX
+ SUBL DX, BX
+ LEAQ (CX)(BX*1), BX
+ MOVQ BX, (SP)
+ MOVL $0x00000001, DX
+ MOVL DX, 16(SP)
+ MOVQ src_base+24(FP), BX
search_loop_encodeSnappyBlockAsm64K:
- MOVL CX, BX
- SUBL 12(SP), BX
- SHRL $0x06, BX
- LEAL 4(CX)(BX*1), BX
- CMPL BX, 8(SP)
+ MOVL DX, SI
+ SUBL 12(SP), SI
+ SHRL $0x06, SI
+ LEAL 4(DX)(SI*1), SI
+ CMPL SI, 8(SP)
JAE emit_remainder_encodeSnappyBlockAsm64K
- MOVQ (DX)(CX*1), SI
- MOVL BX, 20(SP)
- MOVQ $0x0000cf1bbcdcbf9b, R8
- MOVQ SI, R9
- MOVQ SI, R10
- SHRQ $0x08, R10
- SHLQ $0x10, R9
- IMULQ R8, R9
- SHRQ $0x32, R9
+ MOVQ (BX)(DX*1), DI
+ MOVL SI, 20(SP)
+ MOVQ $0x0000cf1bbcdcbf9b, R9
+ MOVQ DI, R10
+ MOVQ DI, R11
+ SHRQ $0x08, R11
SHLQ $0x10, R10
- IMULQ R8, R10
+ IMULQ R9, R10
SHRQ $0x32, R10
- MOVL 24(SP)(R9*4), BX
- MOVL 24(SP)(R10*4), DI
- MOVL CX, 24(SP)(R9*4)
- LEAL 1(CX), R9
- MOVL R9, 24(SP)(R10*4)
- MOVQ SI, R9
- SHRQ $0x10, R9
- SHLQ $0x10, R9
- IMULQ R8, R9
- SHRQ $0x32, R9
- MOVL CX, R8
- SUBL 16(SP), R8
- MOVL 1(DX)(R8*1), R10
- MOVQ SI, R8
- SHRQ $0x08, R8
- CMPL R8, R10
+ SHLQ $0x10, R11
+ IMULQ R9, R11
+ SHRQ $0x32, R11
+ MOVL (AX)(R10*4), SI
+ MOVL (AX)(R11*4), R8
+ MOVL DX, (AX)(R10*4)
+ LEAL 1(DX), R10
+ MOVL R10, (AX)(R11*4)
+ MOVQ DI, R10
+ SHRQ $0x10, R10
+ SHLQ $0x10, R10
+ IMULQ R9, R10
+ SHRQ $0x32, R10
+ MOVL DX, R9
+ SUBL 16(SP), R9
+ MOVL 1(BX)(R9*1), R11
+ MOVQ DI, R9
+ SHRQ $0x08, R9
+ CMPL R9, R11
JNE no_repeat_found_encodeSnappyBlockAsm64K
- LEAL 1(CX), SI
- MOVL 12(SP), BX
- MOVL SI, DI
- SUBL 16(SP), DI
+ LEAL 1(DX), DI
+ MOVL 12(SP), SI
+ MOVL DI, R8
+ SUBL 16(SP), R8
JZ repeat_extend_back_end_encodeSnappyBlockAsm64K
repeat_extend_back_loop_encodeSnappyBlockAsm64K:
- CMPL SI, BX
+ CMPL DI, SI
JBE repeat_extend_back_end_encodeSnappyBlockAsm64K
- MOVB -1(DX)(DI*1), R8
- MOVB -1(DX)(SI*1), R9
- CMPB R8, R9
+ MOVB -1(BX)(R8*1), R9
+ MOVB -1(BX)(DI*1), R10
+ CMPB R9, R10
JNE repeat_extend_back_end_encodeSnappyBlockAsm64K
- LEAL -1(SI), SI
- DECL DI
+ LEAL -1(DI), DI
+ DECL R8
JNZ repeat_extend_back_loop_encodeSnappyBlockAsm64K
repeat_extend_back_end_encodeSnappyBlockAsm64K:
- MOVL SI, BX
- SUBL 12(SP), BX
- LEAQ 3(AX)(BX*1), BX
- CMPQ BX, (SP)
+ MOVL DI, SI
+ SUBL 12(SP), SI
+ LEAQ 3(CX)(SI*1), SI
+ CMPQ SI, (SP)
JB repeat_dst_size_check_encodeSnappyBlockAsm64K
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
repeat_dst_size_check_encodeSnappyBlockAsm64K:
- MOVL 12(SP), BX
- CMPL BX, SI
+ MOVL 12(SP), SI
+ CMPL SI, DI
JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm64K
- MOVL SI, DI
- MOVL SI, 12(SP)
- LEAQ (DX)(BX*1), R8
- SUBL BX, DI
- LEAL -1(DI), BX
- CMPL BX, $0x3c
+ MOVL DI, R8
+ MOVL DI, 12(SP)
+ LEAQ (BX)(SI*1), R9
+ SUBL SI, R8
+ LEAL -1(R8), SI
+ CMPL SI, $0x3c
JB one_byte_repeat_emit_encodeSnappyBlockAsm64K
- CMPL BX, $0x00000100
+ CMPL SI, $0x00000100
JB two_bytes_repeat_emit_encodeSnappyBlockAsm64K
JB three_bytes_repeat_emit_encodeSnappyBlockAsm64K
three_bytes_repeat_emit_encodeSnappyBlockAsm64K:
- MOVB $0xf4, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_repeat_emit_encodeSnappyBlockAsm64K
two_bytes_repeat_emit_encodeSnappyBlockAsm64K:
- MOVB $0xf0, (AX)
- MOVB BL, 1(AX)
- ADDQ $0x02, AX
- CMPL BX, $0x40
+ MOVB $0xf0, (CX)
+ MOVB SI, 1(CX)
+ ADDQ $0x02, CX
+ CMPL SI, $0x40
JB memmove_repeat_emit_encodeSnappyBlockAsm64K
JMP memmove_long_repeat_emit_encodeSnappyBlockAsm64K
one_byte_repeat_emit_encodeSnappyBlockAsm64K:
- SHLB $0x02, BL
- MOVB BL, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, SI
+ MOVB SI, (CX)
+ ADDQ $0x01, CX
memmove_repeat_emit_encodeSnappyBlockAsm64K:
- LEAQ (AX)(DI*1), BX
+ LEAQ (CX)(R8*1), SI
// genMemMoveShort
- CMPQ DI, $0x08
+ CMPQ R8, $0x08
JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_8
- CMPQ DI, $0x10
+ CMPQ R8, $0x10
JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_8through16
- CMPQ DI, $0x20
+ CMPQ R8, $0x20
JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_17through32
JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_33through64
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_8:
- MOVQ (R8), R9
- MOVQ R9, (AX)
+ MOVQ (R9), R10
+ MOVQ R10, (CX)
JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm64K
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_8through16:
- MOVQ (R8), R9
- MOVQ -8(R8)(DI*1), R8
- MOVQ R9, (AX)
- MOVQ R8, -8(AX)(DI*1)
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (CX)
+ MOVQ R9, -8(CX)(R8*1)
JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm64K
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_17through32:
- MOVOU (R8), X0
- MOVOU -16(R8)(DI*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(DI*1)
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R8*1)
JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm64K
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_33through64:
- MOVOU (R8), X0
- MOVOU 16(R8), X1
- MOVOU -32(R8)(DI*1), X2
- MOVOU -16(R8)(DI*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(DI*1)
- MOVOU X3, -16(AX)(DI*1)
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R8*1)
+ MOVOU X3, -16(CX)(R8*1)
memmove_end_copy_repeat_emit_encodeSnappyBlockAsm64K:
- MOVQ BX, AX
+ MOVQ SI, CX
JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm64K
memmove_long_repeat_emit_encodeSnappyBlockAsm64K:
- LEAQ (AX)(DI*1), BX
+ LEAQ (CX)(R8*1), SI
// genMemMoveLong
- MOVOU (R8), X0
- MOVOU 16(R8), X1
- MOVOU -32(R8)(DI*1), X2
- MOVOU -16(R8)(DI*1), X3
- MOVQ DI, R10
- SHRQ $0x05, R10
- MOVQ AX, R9
- ANDL $0x0000001f, R9
- MOVQ $0x00000040, R11
- SUBQ R9, R11
- DECQ R10
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R11
+ SHRQ $0x05, R11
+ MOVQ CX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R12
+ SUBQ R10, R12
+ DECQ R11
JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32
- LEAQ -32(R8)(R11*1), R9
- LEAQ -32(AX)(R11*1), R12
+ LEAQ -32(R9)(R12*1), R10
+ LEAQ -32(CX)(R12*1), R13
emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_big_loop_back:
- MOVOU (R9), X4
- MOVOU 16(R9), X5
- MOVOA X4, (R12)
- MOVOA X5, 16(R12)
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R13)
+ MOVOA X5, 16(R13)
+ ADDQ $0x20, R13
+ ADDQ $0x20, R10
ADDQ $0x20, R12
- ADDQ $0x20, R9
- ADDQ $0x20, R11
- DECQ R10
+ DECQ R11
JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_big_loop_back
emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32:
- MOVOU -32(R8)(R11*1), X4
- MOVOU -16(R8)(R11*1), X5
- MOVOA X4, -32(AX)(R11*1)
- MOVOA X5, -16(AX)(R11*1)
- ADDQ $0x20, R11
- CMPQ DI, R11
+ MOVOU -32(R9)(R12*1), X4
+ MOVOU -16(R9)(R12*1), X5
+ MOVOA X4, -32(CX)(R12*1)
+ MOVOA X5, -16(CX)(R12*1)
+ ADDQ $0x20, R12
+ CMPQ R8, R12
JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(DI*1)
- MOVOU X3, -16(AX)(DI*1)
- MOVQ BX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R8*1)
+ MOVOU X3, -16(CX)(R8*1)
+ MOVQ SI, CX
emit_literal_done_repeat_emit_encodeSnappyBlockAsm64K:
- ADDL $0x05, CX
- MOVL CX, BX
- SUBL 16(SP), BX
- MOVQ src_len+32(FP), DI
- SUBL CX, DI
- LEAQ (DX)(CX*1), R8
- LEAQ (DX)(BX*1), BX
+ ADDL $0x05, DX
+ MOVL DX, SI
+ SUBL 16(SP), SI
+ MOVQ src_len+32(FP), R8
+ SUBL DX, R8
+ LEAQ (BX)(DX*1), R9
+ LEAQ (BX)(SI*1), SI
// matchLen
- XORL R10, R10
+ XORL R11, R11
matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm64K:
- CMPL DI, $0x10
+ CMPL R8, $0x10
JB matchlen_match8_repeat_extend_encodeSnappyBlockAsm64K
- MOVQ (R8)(R10*1), R9
- MOVQ 8(R8)(R10*1), R11
- XORQ (BX)(R10*1), R9
+ MOVQ (R9)(R11*1), R10
+ MOVQ 8(R9)(R11*1), R12
+ XORQ (SI)(R11*1), R10
JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm64K
- XORQ 8(BX)(R10*1), R11
+ XORQ 8(SI)(R11*1), R12
JNZ matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm64K
- LEAL -16(DI), DI
- LEAL 16(R10), R10
+ LEAL -16(R8), R8
+ LEAL 16(R11), R11
JMP matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm64K
matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm64K:
#ifdef GOAMD64_v3
- TZCNTQ R11, R11
+ TZCNTQ R12, R12
#else
- BSFQ R11, R11
+ BSFQ R12, R12
#endif
- SARQ $0x03, R11
- LEAL 8(R10)(R11*1), R10
+ SARQ $0x03, R12
+ LEAL 8(R11)(R12*1), R11
JMP repeat_extend_forward_end_encodeSnappyBlockAsm64K
matchlen_match8_repeat_extend_encodeSnappyBlockAsm64K:
- CMPL DI, $0x08
+ CMPL R8, $0x08
JB matchlen_match4_repeat_extend_encodeSnappyBlockAsm64K
- MOVQ (R8)(R10*1), R9
- XORQ (BX)(R10*1), R9
+ MOVQ (R9)(R11*1), R10
+ XORQ (SI)(R11*1), R10
JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm64K
- LEAL -8(DI), DI
- LEAL 8(R10), R10
+ LEAL -8(R8), R8
+ LEAL 8(R11), R11
JMP matchlen_match4_repeat_extend_encodeSnappyBlockAsm64K
matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm64K:
#ifdef GOAMD64_v3
- TZCNTQ R9, R9
+ TZCNTQ R10, R10
#else
- BSFQ R9, R9
+ BSFQ R10, R10
#endif
- SARQ $0x03, R9
- LEAL (R10)(R9*1), R10
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
JMP repeat_extend_forward_end_encodeSnappyBlockAsm64K
matchlen_match4_repeat_extend_encodeSnappyBlockAsm64K:
- CMPL DI, $0x04
+ CMPL R8, $0x04
JB matchlen_match2_repeat_extend_encodeSnappyBlockAsm64K
- MOVL (R8)(R10*1), R9
- CMPL (BX)(R10*1), R9
+ MOVL (R9)(R11*1), R10
+ CMPL (SI)(R11*1), R10
JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm64K
- LEAL -4(DI), DI
- LEAL 4(R10), R10
+ LEAL -4(R8), R8
+ LEAL 4(R11), R11
matchlen_match2_repeat_extend_encodeSnappyBlockAsm64K:
- CMPL DI, $0x01
+ CMPL R8, $0x01
JE matchlen_match1_repeat_extend_encodeSnappyBlockAsm64K
JB repeat_extend_forward_end_encodeSnappyBlockAsm64K
- MOVW (R8)(R10*1), R9
- CMPW (BX)(R10*1), R9
+ MOVW (R9)(R11*1), R10
+ CMPW (SI)(R11*1), R10
JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm64K
- LEAL 2(R10), R10
- SUBL $0x02, DI
+ LEAL 2(R11), R11
+ SUBL $0x02, R8
JZ repeat_extend_forward_end_encodeSnappyBlockAsm64K
matchlen_match1_repeat_extend_encodeSnappyBlockAsm64K:
- MOVB (R8)(R10*1), R9
- CMPB (BX)(R10*1), R9
+ MOVB (R9)(R11*1), R10
+ CMPB (SI)(R11*1), R10
JNE repeat_extend_forward_end_encodeSnappyBlockAsm64K
- LEAL 1(R10), R10
+ LEAL 1(R11), R11
repeat_extend_forward_end_encodeSnappyBlockAsm64K:
- ADDL R10, CX
- MOVL CX, BX
- SUBL SI, BX
- MOVL 16(SP), SI
+ ADDL R11, DX
+ MOVL DX, SI
+ SUBL DI, SI
+ MOVL 16(SP), DI
// emitCopy
two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm64K:
- CMPL BX, $0x40
+ CMPL SI, $0x40
JBE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm64K
- MOVB $0xee, (AX)
- MOVW SI, 1(AX)
- LEAL -60(BX), BX
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW DI, 1(CX)
+ LEAL -60(SI), SI
+ ADDQ $0x03, CX
JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm64K
two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm64K:
- MOVL BX, DI
- SHLL $0x02, DI
- CMPL BX, $0x0c
+ MOVL SI, R8
+ SHLL $0x02, R8
+ CMPL SI, $0x0c
JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm64K
- CMPL SI, $0x00000800
+ CMPL DI, $0x00000800
JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm64K
- LEAL -15(DI), DI
- MOVB SI, 1(AX)
- SHRL $0x08, SI
- SHLL $0x05, SI
- ORL SI, DI
- MOVB DI, (AX)
- ADDQ $0x02, AX
+ LEAL -15(R8), R8
+ MOVB DI, 1(CX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R8
+ MOVB R8, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeSnappyBlockAsm64K
emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm64K:
- LEAL -2(DI), DI
- MOVB DI, (AX)
- MOVW SI, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(R8), R8
+ MOVB R8, (CX)
+ MOVW DI, 1(CX)
+ ADDQ $0x03, CX
repeat_end_emit_encodeSnappyBlockAsm64K:
- MOVL CX, 12(SP)
+ MOVL DX, 12(SP)
JMP search_loop_encodeSnappyBlockAsm64K
no_repeat_found_encodeSnappyBlockAsm64K:
- CMPL (DX)(BX*1), SI
+ CMPL (BX)(SI*1), DI
JEQ candidate_match_encodeSnappyBlockAsm64K
- SHRQ $0x08, SI
- MOVL 24(SP)(R9*4), BX
- LEAL 2(CX), R8
- CMPL (DX)(DI*1), SI
+ SHRQ $0x08, DI
+ MOVL (AX)(R10*4), SI
+ LEAL 2(DX), R9
+ CMPL (BX)(R8*1), DI
JEQ candidate2_match_encodeSnappyBlockAsm64K
- MOVL R8, 24(SP)(R9*4)
- SHRQ $0x08, SI
- CMPL (DX)(BX*1), SI
+ MOVL R9, (AX)(R10*4)
+ SHRQ $0x08, DI
+ CMPL (BX)(SI*1), DI
JEQ candidate3_match_encodeSnappyBlockAsm64K
- MOVL 20(SP), CX
+ MOVL 20(SP), DX
JMP search_loop_encodeSnappyBlockAsm64K
candidate3_match_encodeSnappyBlockAsm64K:
- ADDL $0x02, CX
+ ADDL $0x02, DX
JMP candidate_match_encodeSnappyBlockAsm64K
candidate2_match_encodeSnappyBlockAsm64K:
- MOVL R8, 24(SP)(R9*4)
- INCL CX
- MOVL DI, BX
+ MOVL R9, (AX)(R10*4)
+ INCL DX
+ MOVL R8, SI
candidate_match_encodeSnappyBlockAsm64K:
- MOVL 12(SP), SI
- TESTL BX, BX
+ MOVL 12(SP), DI
+ TESTL SI, SI
JZ match_extend_back_end_encodeSnappyBlockAsm64K
match_extend_back_loop_encodeSnappyBlockAsm64K:
- CMPL CX, SI
+ CMPL DX, DI
JBE match_extend_back_end_encodeSnappyBlockAsm64K
- MOVB -1(DX)(BX*1), DI
- MOVB -1(DX)(CX*1), R8
- CMPB DI, R8
+ MOVB -1(BX)(SI*1), R8
+ MOVB -1(BX)(DX*1), R9
+ CMPB R8, R9
JNE match_extend_back_end_encodeSnappyBlockAsm64K
- LEAL -1(CX), CX
- DECL BX
+ LEAL -1(DX), DX
+ DECL SI
JZ match_extend_back_end_encodeSnappyBlockAsm64K
JMP match_extend_back_loop_encodeSnappyBlockAsm64K
match_extend_back_end_encodeSnappyBlockAsm64K:
- MOVL CX, SI
- SUBL 12(SP), SI
- LEAQ 3(AX)(SI*1), SI
- CMPQ SI, (SP)
+ MOVL DX, DI
+ SUBL 12(SP), DI
+ LEAQ 3(CX)(DI*1), DI
+ CMPQ DI, (SP)
JB match_dst_size_check_encodeSnappyBlockAsm64K
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_dst_size_check_encodeSnappyBlockAsm64K:
- MOVL CX, SI
- MOVL 12(SP), DI
- CMPL DI, SI
+ MOVL DX, DI
+ MOVL 12(SP), R8
+ CMPL R8, DI
JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm64K
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(DI*1), SI
- SUBL DI, R8
- LEAL -1(R8), DI
- CMPL DI, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(R8*1), DI
+ SUBL R8, R9
+ LEAL -1(R9), R8
+ CMPL R8, $0x3c
JB one_byte_match_emit_encodeSnappyBlockAsm64K
- CMPL DI, $0x00000100
+ CMPL R8, $0x00000100
JB two_bytes_match_emit_encodeSnappyBlockAsm64K
JB three_bytes_match_emit_encodeSnappyBlockAsm64K
three_bytes_match_emit_encodeSnappyBlockAsm64K:
- MOVB $0xf4, (AX)
- MOVW DI, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW R8, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_match_emit_encodeSnappyBlockAsm64K
two_bytes_match_emit_encodeSnappyBlockAsm64K:
- MOVB $0xf0, (AX)
- MOVB DI, 1(AX)
- ADDQ $0x02, AX
- CMPL DI, $0x40
+ MOVB $0xf0, (CX)
+ MOVB R8, 1(CX)
+ ADDQ $0x02, CX
+ CMPL R8, $0x40
JB memmove_match_emit_encodeSnappyBlockAsm64K
JMP memmove_long_match_emit_encodeSnappyBlockAsm64K
one_byte_match_emit_encodeSnappyBlockAsm64K:
- SHLB $0x02, DI
- MOVB DI, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, R8
+ MOVB R8, (CX)
+ ADDQ $0x01, CX
memmove_match_emit_encodeSnappyBlockAsm64K:
- LEAQ (AX)(R8*1), DI
+ LEAQ (CX)(R9*1), R8
// genMemMoveShort
- CMPQ R8, $0x08
+ CMPQ R9, $0x08
JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_8
- CMPQ R8, $0x10
+ CMPQ R9, $0x10
JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ R9, $0x20
JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_33through64
emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_8:
- MOVQ (SI), R9
- MOVQ R9, (AX)
+ MOVQ (DI), R10
+ MOVQ R10, (CX)
JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm64K
emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_8through16:
- MOVQ (SI), R9
- MOVQ -8(SI)(R8*1), SI
- MOVQ R9, (AX)
- MOVQ SI, -8(AX)(R8*1)
+ MOVQ (DI), R10
+ MOVQ -8(DI)(R9*1), DI
+ MOVQ R10, (CX)
+ MOVQ DI, -8(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm64K
emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_17through32:
- MOVOU (SI), X0
- MOVOU -16(SI)(R8*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU (DI), X0
+ MOVOU -16(DI)(R9*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm64K
emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_33through64:
- MOVOU (SI), X0
- MOVOU 16(SI), X1
- MOVOU -32(SI)(R8*1), X2
- MOVOU -16(SI)(R8*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU (DI), X0
+ MOVOU 16(DI), X1
+ MOVOU -32(DI)(R9*1), X2
+ MOVOU -16(DI)(R9*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
memmove_end_copy_match_emit_encodeSnappyBlockAsm64K:
- MOVQ DI, AX
+ MOVQ R8, CX
JMP emit_literal_done_match_emit_encodeSnappyBlockAsm64K
memmove_long_match_emit_encodeSnappyBlockAsm64K:
- LEAQ (AX)(R8*1), DI
+ LEAQ (CX)(R9*1), R8
// genMemMoveLong
- MOVOU (SI), X0
- MOVOU 16(SI), X1
- MOVOU -32(SI)(R8*1), X2
- MOVOU -16(SI)(R8*1), X3
- MOVQ R8, R10
- SHRQ $0x05, R10
- MOVQ AX, R9
- ANDL $0x0000001f, R9
- MOVQ $0x00000040, R11
- SUBQ R9, R11
- DECQ R10
+ MOVOU (DI), X0
+ MOVOU 16(DI), X1
+ MOVOU -32(DI)(R9*1), X2
+ MOVOU -16(DI)(R9*1), X3
+ MOVQ R9, R11
+ SHRQ $0x05, R11
+ MOVQ CX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R12
+ SUBQ R10, R12
+ DECQ R11
JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32
- LEAQ -32(SI)(R11*1), R9
- LEAQ -32(AX)(R11*1), R12
+ LEAQ -32(DI)(R12*1), R10
+ LEAQ -32(CX)(R12*1), R13
emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_big_loop_back:
- MOVOU (R9), X4
- MOVOU 16(R9), X5
- MOVOA X4, (R12)
- MOVOA X5, 16(R12)
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R13)
+ MOVOA X5, 16(R13)
+ ADDQ $0x20, R13
+ ADDQ $0x20, R10
ADDQ $0x20, R12
- ADDQ $0x20, R9
- ADDQ $0x20, R11
- DECQ R10
+ DECQ R11
JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_big_loop_back
emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32:
- MOVOU -32(SI)(R11*1), X4
- MOVOU -16(SI)(R11*1), X5
- MOVOA X4, -32(AX)(R11*1)
- MOVOA X5, -16(AX)(R11*1)
- ADDQ $0x20, R11
- CMPQ R8, R11
+ MOVOU -32(DI)(R12*1), X4
+ MOVOU -16(DI)(R12*1), X5
+ MOVOA X4, -32(CX)(R12*1)
+ MOVOA X5, -16(CX)(R12*1)
+ ADDQ $0x20, R12
+ CMPQ R9, R12
JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ DI, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
+ MOVQ R8, CX
emit_literal_done_match_emit_encodeSnappyBlockAsm64K:
match_nolit_loop_encodeSnappyBlockAsm64K:
- MOVL CX, SI
- SUBL BX, SI
- MOVL SI, 16(SP)
- ADDL $0x04, CX
- ADDL $0x04, BX
- MOVQ src_len+32(FP), SI
- SUBL CX, SI
- LEAQ (DX)(CX*1), DI
- LEAQ (DX)(BX*1), BX
+ MOVL DX, DI
+ SUBL SI, DI
+ MOVL DI, 16(SP)
+ ADDL $0x04, DX
+ ADDL $0x04, SI
+ MOVQ src_len+32(FP), DI
+ SUBL DX, DI
+ LEAQ (BX)(DX*1), R8
+ LEAQ (BX)(SI*1), SI
// matchLen
- XORL R9, R9
+ XORL R10, R10
matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm64K:
- CMPL SI, $0x10
+ CMPL DI, $0x10
JB matchlen_match8_match_nolit_encodeSnappyBlockAsm64K
- MOVQ (DI)(R9*1), R8
- MOVQ 8(DI)(R9*1), R10
- XORQ (BX)(R9*1), R8
+ MOVQ (R8)(R10*1), R9
+ MOVQ 8(R8)(R10*1), R11
+ XORQ (SI)(R10*1), R9
JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm64K
- XORQ 8(BX)(R9*1), R10
+ XORQ 8(SI)(R10*1), R11
JNZ matchlen_bsf_16match_nolit_encodeSnappyBlockAsm64K
- LEAL -16(SI), SI
- LEAL 16(R9), R9
+ LEAL -16(DI), DI
+ LEAL 16(R10), R10
JMP matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm64K
matchlen_bsf_16match_nolit_encodeSnappyBlockAsm64K:
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R11, R11
#else
- BSFQ R10, R10
+ BSFQ R11, R11
#endif
- SARQ $0x03, R10
- LEAL 8(R9)(R10*1), R9
+ SARQ $0x03, R11
+ LEAL 8(R10)(R11*1), R10
JMP match_nolit_end_encodeSnappyBlockAsm64K
matchlen_match8_match_nolit_encodeSnappyBlockAsm64K:
- CMPL SI, $0x08
+ CMPL DI, $0x08
JB matchlen_match4_match_nolit_encodeSnappyBlockAsm64K
- MOVQ (DI)(R9*1), R8
- XORQ (BX)(R9*1), R8
+ MOVQ (R8)(R10*1), R9
+ XORQ (SI)(R10*1), R9
JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm64K
- LEAL -8(SI), SI
- LEAL 8(R9), R9
+ LEAL -8(DI), DI
+ LEAL 8(R10), R10
JMP matchlen_match4_match_nolit_encodeSnappyBlockAsm64K
matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm64K:
#ifdef GOAMD64_v3
- TZCNTQ R8, R8
+ TZCNTQ R9, R9
#else
- BSFQ R8, R8
+ BSFQ R9, R9
#endif
- SARQ $0x03, R8
- LEAL (R9)(R8*1), R9
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
JMP match_nolit_end_encodeSnappyBlockAsm64K
matchlen_match4_match_nolit_encodeSnappyBlockAsm64K:
- CMPL SI, $0x04
+ CMPL DI, $0x04
JB matchlen_match2_match_nolit_encodeSnappyBlockAsm64K
- MOVL (DI)(R9*1), R8
- CMPL (BX)(R9*1), R8
+ MOVL (R8)(R10*1), R9
+ CMPL (SI)(R10*1), R9
JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm64K
- LEAL -4(SI), SI
- LEAL 4(R9), R9
+ LEAL -4(DI), DI
+ LEAL 4(R10), R10
matchlen_match2_match_nolit_encodeSnappyBlockAsm64K:
- CMPL SI, $0x01
+ CMPL DI, $0x01
JE matchlen_match1_match_nolit_encodeSnappyBlockAsm64K
JB match_nolit_end_encodeSnappyBlockAsm64K
- MOVW (DI)(R9*1), R8
- CMPW (BX)(R9*1), R8
+ MOVW (R8)(R10*1), R9
+ CMPW (SI)(R10*1), R9
JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm64K
- LEAL 2(R9), R9
- SUBL $0x02, SI
+ LEAL 2(R10), R10
+ SUBL $0x02, DI
JZ match_nolit_end_encodeSnappyBlockAsm64K
matchlen_match1_match_nolit_encodeSnappyBlockAsm64K:
- MOVB (DI)(R9*1), R8
- CMPB (BX)(R9*1), R8
+ MOVB (R8)(R10*1), R9
+ CMPB (SI)(R10*1), R9
JNE match_nolit_end_encodeSnappyBlockAsm64K
- LEAL 1(R9), R9
+ LEAL 1(R10), R10
match_nolit_end_encodeSnappyBlockAsm64K:
- ADDL R9, CX
- MOVL 16(SP), BX
- ADDL $0x04, R9
- MOVL CX, 12(SP)
+ ADDL R10, DX
+ MOVL 16(SP), SI
+ ADDL $0x04, R10
+ MOVL DX, 12(SP)
// emitCopy
two_byte_offset_match_nolit_encodeSnappyBlockAsm64K:
- CMPL R9, $0x40
+ CMPL R10, $0x40
JBE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm64K
- MOVB $0xee, (AX)
- MOVW BX, 1(AX)
- LEAL -60(R9), R9
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW SI, 1(CX)
+ LEAL -60(R10), R10
+ ADDQ $0x03, CX
JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm64K
two_byte_offset_short_match_nolit_encodeSnappyBlockAsm64K:
- MOVL R9, SI
- SHLL $0x02, SI
- CMPL R9, $0x0c
+ MOVL R10, DI
+ SHLL $0x02, DI
+ CMPL R10, $0x0c
JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm64K
- CMPL BX, $0x00000800
+ CMPL SI, $0x00000800
JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm64K
- LEAL -15(SI), SI
- MOVB BL, 1(AX)
- SHRL $0x08, BX
- SHLL $0x05, BX
- ORL BX, SI
- MOVB SI, (AX)
- ADDQ $0x02, AX
+ LEAL -15(DI), DI
+ MOVB SI, 1(CX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm64K
emit_copy_three_match_nolit_encodeSnappyBlockAsm64K:
- LEAL -2(SI), SI
- MOVB SI, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(DI), DI
+ MOVB DI, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
match_nolit_emitcopy_end_encodeSnappyBlockAsm64K:
- CMPL CX, 8(SP)
+ CMPL DX, 8(SP)
JAE emit_remainder_encodeSnappyBlockAsm64K
- MOVQ -2(DX)(CX*1), SI
- CMPQ AX, (SP)
+ MOVQ -2(BX)(DX*1), DI
+ CMPQ CX, (SP)
JB match_nolit_dst_ok_encodeSnappyBlockAsm64K
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_nolit_dst_ok_encodeSnappyBlockAsm64K:
- MOVQ $0x0000cf1bbcdcbf9b, R8
- MOVQ SI, DI
- SHRQ $0x10, SI
- MOVQ SI, BX
- SHLQ $0x10, DI
- IMULQ R8, DI
- SHRQ $0x32, DI
- SHLQ $0x10, BX
- IMULQ R8, BX
- SHRQ $0x32, BX
- LEAL -2(CX), R8
- LEAQ 24(SP)(BX*4), R9
- MOVL (R9), BX
- MOVL R8, 24(SP)(DI*4)
- MOVL CX, (R9)
- CMPL (DX)(BX*1), SI
+ MOVQ $0x0000cf1bbcdcbf9b, R9
+ MOVQ DI, R8
+ SHRQ $0x10, DI
+ MOVQ DI, SI
+ SHLQ $0x10, R8
+ IMULQ R9, R8
+ SHRQ $0x32, R8
+ SHLQ $0x10, SI
+ IMULQ R9, SI
+ SHRQ $0x32, SI
+ LEAL -2(DX), R9
+ LEAQ (AX)(SI*4), R10
+ MOVL (R10), SI
+ MOVL R9, (AX)(R8*4)
+ MOVL DX, (R10)
+ CMPL (BX)(SI*1), DI
JEQ match_nolit_loop_encodeSnappyBlockAsm64K
- INCL CX
+ INCL DX
JMP search_loop_encodeSnappyBlockAsm64K
emit_remainder_encodeSnappyBlockAsm64K:
- MOVQ src_len+32(FP), CX
- SUBL 12(SP), CX
- LEAQ 3(AX)(CX*1), CX
- CMPQ CX, (SP)
+ MOVQ src_len+32(FP), AX
+ SUBL 12(SP), AX
+ LEAQ 3(CX)(AX*1), AX
+ CMPQ AX, (SP)
JB emit_remainder_ok_encodeSnappyBlockAsm64K
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
emit_remainder_ok_encodeSnappyBlockAsm64K:
- MOVQ src_len+32(FP), CX
- MOVL 12(SP), BX
- CMPL BX, CX
+ MOVQ src_len+32(FP), AX
+ MOVL 12(SP), DX
+ CMPL DX, AX
JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm64K
- MOVL CX, SI
- MOVL CX, 12(SP)
- LEAQ (DX)(BX*1), CX
- SUBL BX, SI
+ MOVL AX, SI
+ MOVL AX, 12(SP)
+ LEAQ (BX)(DX*1), AX
+ SUBL DX, SI
LEAL -1(SI), DX
CMPL DX, $0x3c
JB one_byte_emit_remainder_encodeSnappyBlockAsm64K
@@ -12346,26 +12358,26 @@ emit_remainder_ok_encodeSnappyBlockAsm64K:
JB three_bytes_emit_remainder_encodeSnappyBlockAsm64K
three_bytes_emit_remainder_encodeSnappyBlockAsm64K:
- MOVB $0xf4, (AX)
- MOVW DX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW DX, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_emit_remainder_encodeSnappyBlockAsm64K
two_bytes_emit_remainder_encodeSnappyBlockAsm64K:
- MOVB $0xf0, (AX)
- MOVB DL, 1(AX)
- ADDQ $0x02, AX
+ MOVB $0xf0, (CX)
+ MOVB DL, 1(CX)
+ ADDQ $0x02, CX
CMPL DX, $0x40
JB memmove_emit_remainder_encodeSnappyBlockAsm64K
JMP memmove_long_emit_remainder_encodeSnappyBlockAsm64K
one_byte_emit_remainder_encodeSnappyBlockAsm64K:
SHLB $0x02, DL
- MOVB DL, (AX)
- ADDQ $0x01, AX
+ MOVB DL, (CX)
+ ADDQ $0x01, CX
memmove_emit_remainder_encodeSnappyBlockAsm64K:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveShort
@@ -12381,73 +12393,73 @@ memmove_emit_remainder_encodeSnappyBlockAsm64K:
JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_33through64
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_1or2:
- MOVB (CX), SI
- MOVB -1(CX)(BX*1), CL
- MOVB SI, (AX)
- MOVB CL, -1(AX)(BX*1)
+ MOVB (AX), SI
+ MOVB -1(AX)(BX*1), AL
+ MOVB SI, (CX)
+ MOVB AL, -1(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_3:
- MOVW (CX), SI
- MOVB 2(CX), CL
- MOVW SI, (AX)
- MOVB CL, 2(AX)
+ MOVW (AX), SI
+ MOVB 2(AX), AL
+ MOVW SI, (CX)
+ MOVB AL, 2(CX)
JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K
-emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_4through7:
- MOVL (CX), SI
- MOVL -4(CX)(BX*1), CX
- MOVL SI, (AX)
- MOVL CX, -4(AX)(BX*1)
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_4through7:
+ MOVL (AX), SI
+ MOVL -4(AX)(BX*1), AX
+ MOVL SI, (CX)
+ MOVL AX, -4(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_8through16:
- MOVQ (CX), SI
- MOVQ -8(CX)(BX*1), CX
- MOVQ SI, (AX)
- MOVQ CX, -8(AX)(BX*1)
+ MOVQ (AX), SI
+ MOVQ -8(AX)(BX*1), AX
+ MOVQ SI, (CX)
+ MOVQ AX, -8(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_17through32:
- MOVOU (CX), X0
- MOVOU -16(CX)(BX*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU -16(AX)(BX*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_33through64:
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K:
- MOVQ DX, AX
+ MOVQ DX, CX
JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm64K
memmove_long_emit_remainder_encodeSnappyBlockAsm64K:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveLong
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
MOVQ BX, DI
SHRQ $0x05, DI
- MOVQ AX, SI
+ MOVQ CX, SI
ANDL $0x0000001f, SI
MOVQ $0x00000040, R8
SUBQ SI, R8
DECQ DI
JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32
- LEAQ -32(CX)(R8*1), SI
- LEAQ -32(AX)(R8*1), R9
+ LEAQ -32(AX)(R8*1), SI
+ LEAQ -32(CX)(R8*1), R9
emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_big_loop_back:
MOVOU (SI), X4
@@ -12461,718 +12473,719 @@ emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_big_loop_back:
JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_big_loop_back
emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32:
- MOVOU -32(CX)(R8*1), X4
- MOVOU -16(CX)(R8*1), X5
- MOVOA X4, -32(AX)(R8*1)
- MOVOA X5, -16(AX)(R8*1)
+ MOVOU -32(AX)(R8*1), X4
+ MOVOU -16(AX)(R8*1), X5
+ MOVOA X4, -32(CX)(R8*1)
+ MOVOA X5, -16(CX)(R8*1)
ADDQ $0x20, R8
CMPQ BX, R8
JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
- MOVQ DX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
+ MOVQ DX, CX
emit_literal_done_emit_remainder_encodeSnappyBlockAsm64K:
- MOVQ dst_base+0(FP), CX
- SUBQ CX, AX
- MOVQ AX, ret+48(FP)
+ MOVQ dst_base+0(FP), AX
+ SUBQ AX, CX
+ MOVQ CX, ret+56(FP)
RET
-// func encodeSnappyBlockAsm12B(dst []byte, src []byte) int
+// func encodeSnappyBlockAsm12B(dst []byte, src []byte, tmp *[16384]byte) int
// Requires: BMI, SSE2
-TEXT ·encodeSnappyBlockAsm12B(SB), $16408-56
- MOVQ dst_base+0(FP), AX
- MOVQ $0x00000080, CX
- LEAQ 24(SP), DX
+TEXT ·encodeSnappyBlockAsm12B(SB), $24-64
+ MOVQ tmp+48(FP), AX
+ MOVQ dst_base+0(FP), CX
+ MOVQ $0x00000080, DX
+ MOVQ AX, BX
PXOR X0, X0
zero_loop_encodeSnappyBlockAsm12B:
- MOVOU X0, (DX)
- MOVOU X0, 16(DX)
- MOVOU X0, 32(DX)
- MOVOU X0, 48(DX)
- MOVOU X0, 64(DX)
- MOVOU X0, 80(DX)
- MOVOU X0, 96(DX)
- MOVOU X0, 112(DX)
- ADDQ $0x80, DX
- DECQ CX
+ MOVOU X0, (BX)
+ MOVOU X0, 16(BX)
+ MOVOU X0, 32(BX)
+ MOVOU X0, 48(BX)
+ MOVOU X0, 64(BX)
+ MOVOU X0, 80(BX)
+ MOVOU X0, 96(BX)
+ MOVOU X0, 112(BX)
+ ADDQ $0x80, BX
+ DECQ DX
JNZ zero_loop_encodeSnappyBlockAsm12B
MOVL $0x00000000, 12(SP)
- MOVQ src_len+32(FP), CX
- LEAQ -9(CX), DX
- LEAQ -8(CX), BX
- MOVL BX, 8(SP)
- SHRQ $0x05, CX
- SUBL CX, DX
- LEAQ (AX)(DX*1), DX
- MOVQ DX, (SP)
- MOVL $0x00000001, CX
- MOVL CX, 16(SP)
- MOVQ src_base+24(FP), DX
+ MOVQ src_len+32(FP), DX
+ LEAQ -9(DX), BX
+ LEAQ -8(DX), SI
+ MOVL SI, 8(SP)
+ SHRQ $0x05, DX
+ SUBL DX, BX
+ LEAQ (CX)(BX*1), BX
+ MOVQ BX, (SP)
+ MOVL $0x00000001, DX
+ MOVL DX, 16(SP)
+ MOVQ src_base+24(FP), BX
search_loop_encodeSnappyBlockAsm12B:
- MOVL CX, BX
- SUBL 12(SP), BX
- SHRL $0x05, BX
- LEAL 4(CX)(BX*1), BX
- CMPL BX, 8(SP)
+ MOVL DX, SI
+ SUBL 12(SP), SI
+ SHRL $0x05, SI
+ LEAL 4(DX)(SI*1), SI
+ CMPL SI, 8(SP)
JAE emit_remainder_encodeSnappyBlockAsm12B
- MOVQ (DX)(CX*1), SI
- MOVL BX, 20(SP)
- MOVQ $0x000000cf1bbcdcbb, R8
- MOVQ SI, R9
- MOVQ SI, R10
- SHRQ $0x08, R10
- SHLQ $0x18, R9
- IMULQ R8, R9
- SHRQ $0x34, R9
+ MOVQ (BX)(DX*1), DI
+ MOVL SI, 20(SP)
+ MOVQ $0x000000cf1bbcdcbb, R9
+ MOVQ DI, R10
+ MOVQ DI, R11
+ SHRQ $0x08, R11
SHLQ $0x18, R10
- IMULQ R8, R10
+ IMULQ R9, R10
SHRQ $0x34, R10
- MOVL 24(SP)(R9*4), BX
- MOVL 24(SP)(R10*4), DI
- MOVL CX, 24(SP)(R9*4)
- LEAL 1(CX), R9
- MOVL R9, 24(SP)(R10*4)
- MOVQ SI, R9
- SHRQ $0x10, R9
- SHLQ $0x18, R9
- IMULQ R8, R9
- SHRQ $0x34, R9
- MOVL CX, R8
- SUBL 16(SP), R8
- MOVL 1(DX)(R8*1), R10
- MOVQ SI, R8
- SHRQ $0x08, R8
- CMPL R8, R10
+ SHLQ $0x18, R11
+ IMULQ R9, R11
+ SHRQ $0x34, R11
+ MOVL (AX)(R10*4), SI
+ MOVL (AX)(R11*4), R8
+ MOVL DX, (AX)(R10*4)
+ LEAL 1(DX), R10
+ MOVL R10, (AX)(R11*4)
+ MOVQ DI, R10
+ SHRQ $0x10, R10
+ SHLQ $0x18, R10
+ IMULQ R9, R10
+ SHRQ $0x34, R10
+ MOVL DX, R9
+ SUBL 16(SP), R9
+ MOVL 1(BX)(R9*1), R11
+ MOVQ DI, R9
+ SHRQ $0x08, R9
+ CMPL R9, R11
JNE no_repeat_found_encodeSnappyBlockAsm12B
- LEAL 1(CX), SI
- MOVL 12(SP), BX
- MOVL SI, DI
- SUBL 16(SP), DI
+ LEAL 1(DX), DI
+ MOVL 12(SP), SI
+ MOVL DI, R8
+ SUBL 16(SP), R8
JZ repeat_extend_back_end_encodeSnappyBlockAsm12B
repeat_extend_back_loop_encodeSnappyBlockAsm12B:
- CMPL SI, BX
+ CMPL DI, SI
JBE repeat_extend_back_end_encodeSnappyBlockAsm12B
- MOVB -1(DX)(DI*1), R8
- MOVB -1(DX)(SI*1), R9
- CMPB R8, R9
+ MOVB -1(BX)(R8*1), R9
+ MOVB -1(BX)(DI*1), R10
+ CMPB R9, R10
JNE repeat_extend_back_end_encodeSnappyBlockAsm12B
- LEAL -1(SI), SI
- DECL DI
+ LEAL -1(DI), DI
+ DECL R8
JNZ repeat_extend_back_loop_encodeSnappyBlockAsm12B
repeat_extend_back_end_encodeSnappyBlockAsm12B:
- MOVL SI, BX
- SUBL 12(SP), BX
- LEAQ 3(AX)(BX*1), BX
- CMPQ BX, (SP)
+ MOVL DI, SI
+ SUBL 12(SP), SI
+ LEAQ 3(CX)(SI*1), SI
+ CMPQ SI, (SP)
JB repeat_dst_size_check_encodeSnappyBlockAsm12B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
repeat_dst_size_check_encodeSnappyBlockAsm12B:
- MOVL 12(SP), BX
- CMPL BX, SI
+ MOVL 12(SP), SI
+ CMPL SI, DI
JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm12B
- MOVL SI, DI
- MOVL SI, 12(SP)
- LEAQ (DX)(BX*1), R8
- SUBL BX, DI
- LEAL -1(DI), BX
- CMPL BX, $0x3c
+ MOVL DI, R8
+ MOVL DI, 12(SP)
+ LEAQ (BX)(SI*1), R9
+ SUBL SI, R8
+ LEAL -1(R8), SI
+ CMPL SI, $0x3c
JB one_byte_repeat_emit_encodeSnappyBlockAsm12B
- CMPL BX, $0x00000100
+ CMPL SI, $0x00000100
JB two_bytes_repeat_emit_encodeSnappyBlockAsm12B
JB three_bytes_repeat_emit_encodeSnappyBlockAsm12B
three_bytes_repeat_emit_encodeSnappyBlockAsm12B:
- MOVB $0xf4, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_repeat_emit_encodeSnappyBlockAsm12B
two_bytes_repeat_emit_encodeSnappyBlockAsm12B:
- MOVB $0xf0, (AX)
- MOVB BL, 1(AX)
- ADDQ $0x02, AX
- CMPL BX, $0x40
+ MOVB $0xf0, (CX)
+ MOVB SI, 1(CX)
+ ADDQ $0x02, CX
+ CMPL SI, $0x40
JB memmove_repeat_emit_encodeSnappyBlockAsm12B
JMP memmove_long_repeat_emit_encodeSnappyBlockAsm12B
one_byte_repeat_emit_encodeSnappyBlockAsm12B:
- SHLB $0x02, BL
- MOVB BL, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, SI
+ MOVB SI, (CX)
+ ADDQ $0x01, CX
memmove_repeat_emit_encodeSnappyBlockAsm12B:
- LEAQ (AX)(DI*1), BX
+ LEAQ (CX)(R8*1), SI
// genMemMoveShort
- CMPQ DI, $0x08
+ CMPQ R8, $0x08
JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_8
- CMPQ DI, $0x10
+ CMPQ R8, $0x10
JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_8through16
- CMPQ DI, $0x20
+ CMPQ R8, $0x20
JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_17through32
JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_33through64
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_8:
- MOVQ (R8), R9
- MOVQ R9, (AX)
+ MOVQ (R9), R10
+ MOVQ R10, (CX)
JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm12B
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_8through16:
- MOVQ (R8), R9
- MOVQ -8(R8)(DI*1), R8
- MOVQ R9, (AX)
- MOVQ R8, -8(AX)(DI*1)
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (CX)
+ MOVQ R9, -8(CX)(R8*1)
JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm12B
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_17through32:
- MOVOU (R8), X0
- MOVOU -16(R8)(DI*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(DI*1)
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R8*1)
JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm12B
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_33through64:
- MOVOU (R8), X0
- MOVOU 16(R8), X1
- MOVOU -32(R8)(DI*1), X2
- MOVOU -16(R8)(DI*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(DI*1)
- MOVOU X3, -16(AX)(DI*1)
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R8*1)
+ MOVOU X3, -16(CX)(R8*1)
memmove_end_copy_repeat_emit_encodeSnappyBlockAsm12B:
- MOVQ BX, AX
+ MOVQ SI, CX
JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm12B
memmove_long_repeat_emit_encodeSnappyBlockAsm12B:
- LEAQ (AX)(DI*1), BX
+ LEAQ (CX)(R8*1), SI
// genMemMoveLong
- MOVOU (R8), X0
- MOVOU 16(R8), X1
- MOVOU -32(R8)(DI*1), X2
- MOVOU -16(R8)(DI*1), X3
- MOVQ DI, R10
- SHRQ $0x05, R10
- MOVQ AX, R9
- ANDL $0x0000001f, R9
- MOVQ $0x00000040, R11
- SUBQ R9, R11
- DECQ R10
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R11
+ SHRQ $0x05, R11
+ MOVQ CX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R12
+ SUBQ R10, R12
+ DECQ R11
JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32
- LEAQ -32(R8)(R11*1), R9
- LEAQ -32(AX)(R11*1), R12
+ LEAQ -32(R9)(R12*1), R10
+ LEAQ -32(CX)(R12*1), R13
emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_big_loop_back:
- MOVOU (R9), X4
- MOVOU 16(R9), X5
- MOVOA X4, (R12)
- MOVOA X5, 16(R12)
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R13)
+ MOVOA X5, 16(R13)
+ ADDQ $0x20, R13
+ ADDQ $0x20, R10
ADDQ $0x20, R12
- ADDQ $0x20, R9
- ADDQ $0x20, R11
- DECQ R10
+ DECQ R11
JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_big_loop_back
emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32:
- MOVOU -32(R8)(R11*1), X4
- MOVOU -16(R8)(R11*1), X5
- MOVOA X4, -32(AX)(R11*1)
- MOVOA X5, -16(AX)(R11*1)
- ADDQ $0x20, R11
- CMPQ DI, R11
+ MOVOU -32(R9)(R12*1), X4
+ MOVOU -16(R9)(R12*1), X5
+ MOVOA X4, -32(CX)(R12*1)
+ MOVOA X5, -16(CX)(R12*1)
+ ADDQ $0x20, R12
+ CMPQ R8, R12
JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(DI*1)
- MOVOU X3, -16(AX)(DI*1)
- MOVQ BX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R8*1)
+ MOVOU X3, -16(CX)(R8*1)
+ MOVQ SI, CX
emit_literal_done_repeat_emit_encodeSnappyBlockAsm12B:
- ADDL $0x05, CX
- MOVL CX, BX
- SUBL 16(SP), BX
- MOVQ src_len+32(FP), DI
- SUBL CX, DI
- LEAQ (DX)(CX*1), R8
- LEAQ (DX)(BX*1), BX
+ ADDL $0x05, DX
+ MOVL DX, SI
+ SUBL 16(SP), SI
+ MOVQ src_len+32(FP), R8
+ SUBL DX, R8
+ LEAQ (BX)(DX*1), R9
+ LEAQ (BX)(SI*1), SI
// matchLen
- XORL R10, R10
+ XORL R11, R11
matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm12B:
- CMPL DI, $0x10
+ CMPL R8, $0x10
JB matchlen_match8_repeat_extend_encodeSnappyBlockAsm12B
- MOVQ (R8)(R10*1), R9
- MOVQ 8(R8)(R10*1), R11
- XORQ (BX)(R10*1), R9
+ MOVQ (R9)(R11*1), R10
+ MOVQ 8(R9)(R11*1), R12
+ XORQ (SI)(R11*1), R10
JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm12B
- XORQ 8(BX)(R10*1), R11
+ XORQ 8(SI)(R11*1), R12
JNZ matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm12B
- LEAL -16(DI), DI
- LEAL 16(R10), R10
+ LEAL -16(R8), R8
+ LEAL 16(R11), R11
JMP matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm12B
matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm12B:
#ifdef GOAMD64_v3
- TZCNTQ R11, R11
+ TZCNTQ R12, R12
#else
- BSFQ R11, R11
+ BSFQ R12, R12
#endif
- SARQ $0x03, R11
- LEAL 8(R10)(R11*1), R10
+ SARQ $0x03, R12
+ LEAL 8(R11)(R12*1), R11
JMP repeat_extend_forward_end_encodeSnappyBlockAsm12B
matchlen_match8_repeat_extend_encodeSnappyBlockAsm12B:
- CMPL DI, $0x08
+ CMPL R8, $0x08
JB matchlen_match4_repeat_extend_encodeSnappyBlockAsm12B
- MOVQ (R8)(R10*1), R9
- XORQ (BX)(R10*1), R9
+ MOVQ (R9)(R11*1), R10
+ XORQ (SI)(R11*1), R10
JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm12B
- LEAL -8(DI), DI
- LEAL 8(R10), R10
+ LEAL -8(R8), R8
+ LEAL 8(R11), R11
JMP matchlen_match4_repeat_extend_encodeSnappyBlockAsm12B
matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm12B:
#ifdef GOAMD64_v3
- TZCNTQ R9, R9
+ TZCNTQ R10, R10
#else
- BSFQ R9, R9
+ BSFQ R10, R10
#endif
- SARQ $0x03, R9
- LEAL (R10)(R9*1), R10
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
JMP repeat_extend_forward_end_encodeSnappyBlockAsm12B
matchlen_match4_repeat_extend_encodeSnappyBlockAsm12B:
- CMPL DI, $0x04
+ CMPL R8, $0x04
JB matchlen_match2_repeat_extend_encodeSnappyBlockAsm12B
- MOVL (R8)(R10*1), R9
- CMPL (BX)(R10*1), R9
+ MOVL (R9)(R11*1), R10
+ CMPL (SI)(R11*1), R10
JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm12B
- LEAL -4(DI), DI
- LEAL 4(R10), R10
+ LEAL -4(R8), R8
+ LEAL 4(R11), R11
matchlen_match2_repeat_extend_encodeSnappyBlockAsm12B:
- CMPL DI, $0x01
+ CMPL R8, $0x01
JE matchlen_match1_repeat_extend_encodeSnappyBlockAsm12B
JB repeat_extend_forward_end_encodeSnappyBlockAsm12B
- MOVW (R8)(R10*1), R9
- CMPW (BX)(R10*1), R9
+ MOVW (R9)(R11*1), R10
+ CMPW (SI)(R11*1), R10
JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm12B
- LEAL 2(R10), R10
- SUBL $0x02, DI
+ LEAL 2(R11), R11
+ SUBL $0x02, R8
JZ repeat_extend_forward_end_encodeSnappyBlockAsm12B
matchlen_match1_repeat_extend_encodeSnappyBlockAsm12B:
- MOVB (R8)(R10*1), R9
- CMPB (BX)(R10*1), R9
+ MOVB (R9)(R11*1), R10
+ CMPB (SI)(R11*1), R10
JNE repeat_extend_forward_end_encodeSnappyBlockAsm12B
- LEAL 1(R10), R10
+ LEAL 1(R11), R11
repeat_extend_forward_end_encodeSnappyBlockAsm12B:
- ADDL R10, CX
- MOVL CX, BX
- SUBL SI, BX
- MOVL 16(SP), SI
+ ADDL R11, DX
+ MOVL DX, SI
+ SUBL DI, SI
+ MOVL 16(SP), DI
// emitCopy
two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm12B:
- CMPL BX, $0x40
+ CMPL SI, $0x40
JBE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm12B
- MOVB $0xee, (AX)
- MOVW SI, 1(AX)
- LEAL -60(BX), BX
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW DI, 1(CX)
+ LEAL -60(SI), SI
+ ADDQ $0x03, CX
JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm12B
two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm12B:
- MOVL BX, DI
- SHLL $0x02, DI
- CMPL BX, $0x0c
+ MOVL SI, R8
+ SHLL $0x02, R8
+ CMPL SI, $0x0c
JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm12B
- CMPL SI, $0x00000800
+ CMPL DI, $0x00000800
JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm12B
- LEAL -15(DI), DI
- MOVB SI, 1(AX)
- SHRL $0x08, SI
- SHLL $0x05, SI
- ORL SI, DI
- MOVB DI, (AX)
- ADDQ $0x02, AX
+ LEAL -15(R8), R8
+ MOVB DI, 1(CX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R8
+ MOVB R8, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeSnappyBlockAsm12B
emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm12B:
- LEAL -2(DI), DI
- MOVB DI, (AX)
- MOVW SI, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(R8), R8
+ MOVB R8, (CX)
+ MOVW DI, 1(CX)
+ ADDQ $0x03, CX
repeat_end_emit_encodeSnappyBlockAsm12B:
- MOVL CX, 12(SP)
+ MOVL DX, 12(SP)
JMP search_loop_encodeSnappyBlockAsm12B
no_repeat_found_encodeSnappyBlockAsm12B:
- CMPL (DX)(BX*1), SI
+ CMPL (BX)(SI*1), DI
JEQ candidate_match_encodeSnappyBlockAsm12B
- SHRQ $0x08, SI
- MOVL 24(SP)(R9*4), BX
- LEAL 2(CX), R8
- CMPL (DX)(DI*1), SI
+ SHRQ $0x08, DI
+ MOVL (AX)(R10*4), SI
+ LEAL 2(DX), R9
+ CMPL (BX)(R8*1), DI
JEQ candidate2_match_encodeSnappyBlockAsm12B
- MOVL R8, 24(SP)(R9*4)
- SHRQ $0x08, SI
- CMPL (DX)(BX*1), SI
+ MOVL R9, (AX)(R10*4)
+ SHRQ $0x08, DI
+ CMPL (BX)(SI*1), DI
JEQ candidate3_match_encodeSnappyBlockAsm12B
- MOVL 20(SP), CX
+ MOVL 20(SP), DX
JMP search_loop_encodeSnappyBlockAsm12B
candidate3_match_encodeSnappyBlockAsm12B:
- ADDL $0x02, CX
+ ADDL $0x02, DX
JMP candidate_match_encodeSnappyBlockAsm12B
candidate2_match_encodeSnappyBlockAsm12B:
- MOVL R8, 24(SP)(R9*4)
- INCL CX
- MOVL DI, BX
+ MOVL R9, (AX)(R10*4)
+ INCL DX
+ MOVL R8, SI
candidate_match_encodeSnappyBlockAsm12B:
- MOVL 12(SP), SI
- TESTL BX, BX
+ MOVL 12(SP), DI
+ TESTL SI, SI
JZ match_extend_back_end_encodeSnappyBlockAsm12B
match_extend_back_loop_encodeSnappyBlockAsm12B:
- CMPL CX, SI
+ CMPL DX, DI
JBE match_extend_back_end_encodeSnappyBlockAsm12B
- MOVB -1(DX)(BX*1), DI
- MOVB -1(DX)(CX*1), R8
- CMPB DI, R8
+ MOVB -1(BX)(SI*1), R8
+ MOVB -1(BX)(DX*1), R9
+ CMPB R8, R9
JNE match_extend_back_end_encodeSnappyBlockAsm12B
- LEAL -1(CX), CX
- DECL BX
+ LEAL -1(DX), DX
+ DECL SI
JZ match_extend_back_end_encodeSnappyBlockAsm12B
JMP match_extend_back_loop_encodeSnappyBlockAsm12B
match_extend_back_end_encodeSnappyBlockAsm12B:
- MOVL CX, SI
- SUBL 12(SP), SI
- LEAQ 3(AX)(SI*1), SI
- CMPQ SI, (SP)
+ MOVL DX, DI
+ SUBL 12(SP), DI
+ LEAQ 3(CX)(DI*1), DI
+ CMPQ DI, (SP)
JB match_dst_size_check_encodeSnappyBlockAsm12B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_dst_size_check_encodeSnappyBlockAsm12B:
- MOVL CX, SI
- MOVL 12(SP), DI
- CMPL DI, SI
+ MOVL DX, DI
+ MOVL 12(SP), R8
+ CMPL R8, DI
JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm12B
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(DI*1), SI
- SUBL DI, R8
- LEAL -1(R8), DI
- CMPL DI, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(R8*1), DI
+ SUBL R8, R9
+ LEAL -1(R9), R8
+ CMPL R8, $0x3c
JB one_byte_match_emit_encodeSnappyBlockAsm12B
- CMPL DI, $0x00000100
+ CMPL R8, $0x00000100
JB two_bytes_match_emit_encodeSnappyBlockAsm12B
JB three_bytes_match_emit_encodeSnappyBlockAsm12B
three_bytes_match_emit_encodeSnappyBlockAsm12B:
- MOVB $0xf4, (AX)
- MOVW DI, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW R8, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_match_emit_encodeSnappyBlockAsm12B
two_bytes_match_emit_encodeSnappyBlockAsm12B:
- MOVB $0xf0, (AX)
- MOVB DI, 1(AX)
- ADDQ $0x02, AX
- CMPL DI, $0x40
+ MOVB $0xf0, (CX)
+ MOVB R8, 1(CX)
+ ADDQ $0x02, CX
+ CMPL R8, $0x40
JB memmove_match_emit_encodeSnappyBlockAsm12B
JMP memmove_long_match_emit_encodeSnappyBlockAsm12B
one_byte_match_emit_encodeSnappyBlockAsm12B:
- SHLB $0x02, DI
- MOVB DI, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, R8
+ MOVB R8, (CX)
+ ADDQ $0x01, CX
memmove_match_emit_encodeSnappyBlockAsm12B:
- LEAQ (AX)(R8*1), DI
+ LEAQ (CX)(R9*1), R8
// genMemMoveShort
- CMPQ R8, $0x08
+ CMPQ R9, $0x08
JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_8
- CMPQ R8, $0x10
+ CMPQ R9, $0x10
JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ R9, $0x20
JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_33through64
emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_8:
- MOVQ (SI), R9
- MOVQ R9, (AX)
+ MOVQ (DI), R10
+ MOVQ R10, (CX)
JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm12B
emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_8through16:
- MOVQ (SI), R9
- MOVQ -8(SI)(R8*1), SI
- MOVQ R9, (AX)
- MOVQ SI, -8(AX)(R8*1)
+ MOVQ (DI), R10
+ MOVQ -8(DI)(R9*1), DI
+ MOVQ R10, (CX)
+ MOVQ DI, -8(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm12B
emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_17through32:
- MOVOU (SI), X0
- MOVOU -16(SI)(R8*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU (DI), X0
+ MOVOU -16(DI)(R9*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm12B
emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_33through64:
- MOVOU (SI), X0
- MOVOU 16(SI), X1
- MOVOU -32(SI)(R8*1), X2
- MOVOU -16(SI)(R8*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU (DI), X0
+ MOVOU 16(DI), X1
+ MOVOU -32(DI)(R9*1), X2
+ MOVOU -16(DI)(R9*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
memmove_end_copy_match_emit_encodeSnappyBlockAsm12B:
- MOVQ DI, AX
+ MOVQ R8, CX
JMP emit_literal_done_match_emit_encodeSnappyBlockAsm12B
memmove_long_match_emit_encodeSnappyBlockAsm12B:
- LEAQ (AX)(R8*1), DI
+ LEAQ (CX)(R9*1), R8
// genMemMoveLong
- MOVOU (SI), X0
- MOVOU 16(SI), X1
- MOVOU -32(SI)(R8*1), X2
- MOVOU -16(SI)(R8*1), X3
- MOVQ R8, R10
- SHRQ $0x05, R10
- MOVQ AX, R9
- ANDL $0x0000001f, R9
- MOVQ $0x00000040, R11
- SUBQ R9, R11
- DECQ R10
+ MOVOU (DI), X0
+ MOVOU 16(DI), X1
+ MOVOU -32(DI)(R9*1), X2
+ MOVOU -16(DI)(R9*1), X3
+ MOVQ R9, R11
+ SHRQ $0x05, R11
+ MOVQ CX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R12
+ SUBQ R10, R12
+ DECQ R11
JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32
- LEAQ -32(SI)(R11*1), R9
- LEAQ -32(AX)(R11*1), R12
+ LEAQ -32(DI)(R12*1), R10
+ LEAQ -32(CX)(R12*1), R13
emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_big_loop_back:
- MOVOU (R9), X4
- MOVOU 16(R9), X5
- MOVOA X4, (R12)
- MOVOA X5, 16(R12)
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R13)
+ MOVOA X5, 16(R13)
+ ADDQ $0x20, R13
+ ADDQ $0x20, R10
ADDQ $0x20, R12
- ADDQ $0x20, R9
- ADDQ $0x20, R11
- DECQ R10
+ DECQ R11
JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_big_loop_back
emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32:
- MOVOU -32(SI)(R11*1), X4
- MOVOU -16(SI)(R11*1), X5
- MOVOA X4, -32(AX)(R11*1)
- MOVOA X5, -16(AX)(R11*1)
- ADDQ $0x20, R11
- CMPQ R8, R11
+ MOVOU -32(DI)(R12*1), X4
+ MOVOU -16(DI)(R12*1), X5
+ MOVOA X4, -32(CX)(R12*1)
+ MOVOA X5, -16(CX)(R12*1)
+ ADDQ $0x20, R12
+ CMPQ R9, R12
JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ DI, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
+ MOVQ R8, CX
emit_literal_done_match_emit_encodeSnappyBlockAsm12B:
match_nolit_loop_encodeSnappyBlockAsm12B:
- MOVL CX, SI
- SUBL BX, SI
- MOVL SI, 16(SP)
- ADDL $0x04, CX
- ADDL $0x04, BX
- MOVQ src_len+32(FP), SI
- SUBL CX, SI
- LEAQ (DX)(CX*1), DI
- LEAQ (DX)(BX*1), BX
+ MOVL DX, DI
+ SUBL SI, DI
+ MOVL DI, 16(SP)
+ ADDL $0x04, DX
+ ADDL $0x04, SI
+ MOVQ src_len+32(FP), DI
+ SUBL DX, DI
+ LEAQ (BX)(DX*1), R8
+ LEAQ (BX)(SI*1), SI
// matchLen
- XORL R9, R9
+ XORL R10, R10
matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm12B:
- CMPL SI, $0x10
+ CMPL DI, $0x10
JB matchlen_match8_match_nolit_encodeSnappyBlockAsm12B
- MOVQ (DI)(R9*1), R8
- MOVQ 8(DI)(R9*1), R10
- XORQ (BX)(R9*1), R8
+ MOVQ (R8)(R10*1), R9
+ MOVQ 8(R8)(R10*1), R11
+ XORQ (SI)(R10*1), R9
JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm12B
- XORQ 8(BX)(R9*1), R10
+ XORQ 8(SI)(R10*1), R11
JNZ matchlen_bsf_16match_nolit_encodeSnappyBlockAsm12B
- LEAL -16(SI), SI
- LEAL 16(R9), R9
+ LEAL -16(DI), DI
+ LEAL 16(R10), R10
JMP matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm12B
matchlen_bsf_16match_nolit_encodeSnappyBlockAsm12B:
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R11, R11
#else
- BSFQ R10, R10
+ BSFQ R11, R11
#endif
- SARQ $0x03, R10
- LEAL 8(R9)(R10*1), R9
+ SARQ $0x03, R11
+ LEAL 8(R10)(R11*1), R10
JMP match_nolit_end_encodeSnappyBlockAsm12B
matchlen_match8_match_nolit_encodeSnappyBlockAsm12B:
- CMPL SI, $0x08
+ CMPL DI, $0x08
JB matchlen_match4_match_nolit_encodeSnappyBlockAsm12B
- MOVQ (DI)(R9*1), R8
- XORQ (BX)(R9*1), R8
+ MOVQ (R8)(R10*1), R9
+ XORQ (SI)(R10*1), R9
JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm12B
- LEAL -8(SI), SI
- LEAL 8(R9), R9
+ LEAL -8(DI), DI
+ LEAL 8(R10), R10
JMP matchlen_match4_match_nolit_encodeSnappyBlockAsm12B
matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm12B:
#ifdef GOAMD64_v3
- TZCNTQ R8, R8
+ TZCNTQ R9, R9
#else
- BSFQ R8, R8
+ BSFQ R9, R9
#endif
- SARQ $0x03, R8
- LEAL (R9)(R8*1), R9
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
JMP match_nolit_end_encodeSnappyBlockAsm12B
matchlen_match4_match_nolit_encodeSnappyBlockAsm12B:
- CMPL SI, $0x04
+ CMPL DI, $0x04
JB matchlen_match2_match_nolit_encodeSnappyBlockAsm12B
- MOVL (DI)(R9*1), R8
- CMPL (BX)(R9*1), R8
+ MOVL (R8)(R10*1), R9
+ CMPL (SI)(R10*1), R9
JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm12B
- LEAL -4(SI), SI
- LEAL 4(R9), R9
+ LEAL -4(DI), DI
+ LEAL 4(R10), R10
matchlen_match2_match_nolit_encodeSnappyBlockAsm12B:
- CMPL SI, $0x01
+ CMPL DI, $0x01
JE matchlen_match1_match_nolit_encodeSnappyBlockAsm12B
JB match_nolit_end_encodeSnappyBlockAsm12B
- MOVW (DI)(R9*1), R8
- CMPW (BX)(R9*1), R8
+ MOVW (R8)(R10*1), R9
+ CMPW (SI)(R10*1), R9
JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm12B
- LEAL 2(R9), R9
- SUBL $0x02, SI
+ LEAL 2(R10), R10
+ SUBL $0x02, DI
JZ match_nolit_end_encodeSnappyBlockAsm12B
matchlen_match1_match_nolit_encodeSnappyBlockAsm12B:
- MOVB (DI)(R9*1), R8
- CMPB (BX)(R9*1), R8
+ MOVB (R8)(R10*1), R9
+ CMPB (SI)(R10*1), R9
JNE match_nolit_end_encodeSnappyBlockAsm12B
- LEAL 1(R9), R9
+ LEAL 1(R10), R10
match_nolit_end_encodeSnappyBlockAsm12B:
- ADDL R9, CX
- MOVL 16(SP), BX
- ADDL $0x04, R9
- MOVL CX, 12(SP)
+ ADDL R10, DX
+ MOVL 16(SP), SI
+ ADDL $0x04, R10
+ MOVL DX, 12(SP)
// emitCopy
two_byte_offset_match_nolit_encodeSnappyBlockAsm12B:
- CMPL R9, $0x40
+ CMPL R10, $0x40
JBE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm12B
- MOVB $0xee, (AX)
- MOVW BX, 1(AX)
- LEAL -60(R9), R9
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW SI, 1(CX)
+ LEAL -60(R10), R10
+ ADDQ $0x03, CX
JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm12B
two_byte_offset_short_match_nolit_encodeSnappyBlockAsm12B:
- MOVL R9, SI
- SHLL $0x02, SI
- CMPL R9, $0x0c
+ MOVL R10, DI
+ SHLL $0x02, DI
+ CMPL R10, $0x0c
JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm12B
- CMPL BX, $0x00000800
+ CMPL SI, $0x00000800
JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm12B
- LEAL -15(SI), SI
- MOVB BL, 1(AX)
- SHRL $0x08, BX
- SHLL $0x05, BX
- ORL BX, SI
- MOVB SI, (AX)
- ADDQ $0x02, AX
+ LEAL -15(DI), DI
+ MOVB SI, 1(CX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm12B
emit_copy_three_match_nolit_encodeSnappyBlockAsm12B:
- LEAL -2(SI), SI
- MOVB SI, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(DI), DI
+ MOVB DI, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
match_nolit_emitcopy_end_encodeSnappyBlockAsm12B:
- CMPL CX, 8(SP)
+ CMPL DX, 8(SP)
JAE emit_remainder_encodeSnappyBlockAsm12B
- MOVQ -2(DX)(CX*1), SI
- CMPQ AX, (SP)
+ MOVQ -2(BX)(DX*1), DI
+ CMPQ CX, (SP)
JB match_nolit_dst_ok_encodeSnappyBlockAsm12B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_nolit_dst_ok_encodeSnappyBlockAsm12B:
- MOVQ $0x000000cf1bbcdcbb, R8
- MOVQ SI, DI
- SHRQ $0x10, SI
- MOVQ SI, BX
- SHLQ $0x18, DI
- IMULQ R8, DI
- SHRQ $0x34, DI
- SHLQ $0x18, BX
- IMULQ R8, BX
- SHRQ $0x34, BX
- LEAL -2(CX), R8
- LEAQ 24(SP)(BX*4), R9
- MOVL (R9), BX
- MOVL R8, 24(SP)(DI*4)
- MOVL CX, (R9)
- CMPL (DX)(BX*1), SI
+ MOVQ $0x000000cf1bbcdcbb, R9
+ MOVQ DI, R8
+ SHRQ $0x10, DI
+ MOVQ DI, SI
+ SHLQ $0x18, R8
+ IMULQ R9, R8
+ SHRQ $0x34, R8
+ SHLQ $0x18, SI
+ IMULQ R9, SI
+ SHRQ $0x34, SI
+ LEAL -2(DX), R9
+ LEAQ (AX)(SI*4), R10
+ MOVL (R10), SI
+ MOVL R9, (AX)(R8*4)
+ MOVL DX, (R10)
+ CMPL (BX)(SI*1), DI
JEQ match_nolit_loop_encodeSnappyBlockAsm12B
- INCL CX
+ INCL DX
JMP search_loop_encodeSnappyBlockAsm12B
emit_remainder_encodeSnappyBlockAsm12B:
- MOVQ src_len+32(FP), CX
- SUBL 12(SP), CX
- LEAQ 3(AX)(CX*1), CX
- CMPQ CX, (SP)
+ MOVQ src_len+32(FP), AX
+ SUBL 12(SP), AX
+ LEAQ 3(CX)(AX*1), AX
+ CMPQ AX, (SP)
JB emit_remainder_ok_encodeSnappyBlockAsm12B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
emit_remainder_ok_encodeSnappyBlockAsm12B:
- MOVQ src_len+32(FP), CX
- MOVL 12(SP), BX
- CMPL BX, CX
+ MOVQ src_len+32(FP), AX
+ MOVL 12(SP), DX
+ CMPL DX, AX
JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm12B
- MOVL CX, SI
- MOVL CX, 12(SP)
- LEAQ (DX)(BX*1), CX
- SUBL BX, SI
+ MOVL AX, SI
+ MOVL AX, 12(SP)
+ LEAQ (BX)(DX*1), AX
+ SUBL DX, SI
LEAL -1(SI), DX
CMPL DX, $0x3c
JB one_byte_emit_remainder_encodeSnappyBlockAsm12B
@@ -13181,26 +13194,26 @@ emit_remainder_ok_encodeSnappyBlockAsm12B:
JB three_bytes_emit_remainder_encodeSnappyBlockAsm12B
three_bytes_emit_remainder_encodeSnappyBlockAsm12B:
- MOVB $0xf4, (AX)
- MOVW DX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW DX, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_emit_remainder_encodeSnappyBlockAsm12B
two_bytes_emit_remainder_encodeSnappyBlockAsm12B:
- MOVB $0xf0, (AX)
- MOVB DL, 1(AX)
- ADDQ $0x02, AX
+ MOVB $0xf0, (CX)
+ MOVB DL, 1(CX)
+ ADDQ $0x02, CX
CMPL DX, $0x40
JB memmove_emit_remainder_encodeSnappyBlockAsm12B
JMP memmove_long_emit_remainder_encodeSnappyBlockAsm12B
one_byte_emit_remainder_encodeSnappyBlockAsm12B:
SHLB $0x02, DL
- MOVB DL, (AX)
- ADDQ $0x01, AX
+ MOVB DL, (CX)
+ ADDQ $0x01, CX
memmove_emit_remainder_encodeSnappyBlockAsm12B:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveShort
@@ -13216,73 +13229,73 @@ memmove_emit_remainder_encodeSnappyBlockAsm12B:
JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_33through64
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_1or2:
- MOVB (CX), SI
- MOVB -1(CX)(BX*1), CL
- MOVB SI, (AX)
- MOVB CL, -1(AX)(BX*1)
+ MOVB (AX), SI
+ MOVB -1(AX)(BX*1), AL
+ MOVB SI, (CX)
+ MOVB AL, -1(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_3:
- MOVW (CX), SI
- MOVB 2(CX), CL
- MOVW SI, (AX)
- MOVB CL, 2(AX)
+ MOVW (AX), SI
+ MOVB 2(AX), AL
+ MOVW SI, (CX)
+ MOVB AL, 2(CX)
JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_4through7:
- MOVL (CX), SI
- MOVL -4(CX)(BX*1), CX
- MOVL SI, (AX)
- MOVL CX, -4(AX)(BX*1)
+ MOVL (AX), SI
+ MOVL -4(AX)(BX*1), AX
+ MOVL SI, (CX)
+ MOVL AX, -4(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_8through16:
- MOVQ (CX), SI
- MOVQ -8(CX)(BX*1), CX
- MOVQ SI, (AX)
- MOVQ CX, -8(AX)(BX*1)
+ MOVQ (AX), SI
+ MOVQ -8(AX)(BX*1), AX
+ MOVQ SI, (CX)
+ MOVQ AX, -8(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_17through32:
- MOVOU (CX), X0
- MOVOU -16(CX)(BX*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU -16(AX)(BX*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_33through64:
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B:
- MOVQ DX, AX
+ MOVQ DX, CX
JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm12B
memmove_long_emit_remainder_encodeSnappyBlockAsm12B:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveLong
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
MOVQ BX, DI
SHRQ $0x05, DI
- MOVQ AX, SI
+ MOVQ CX, SI
ANDL $0x0000001f, SI
MOVQ $0x00000040, R8
SUBQ SI, R8
DECQ DI
JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32
- LEAQ -32(CX)(R8*1), SI
- LEAQ -32(AX)(R8*1), R9
+ LEAQ -32(AX)(R8*1), SI
+ LEAQ -32(CX)(R8*1), R9
emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_big_loop_back:
MOVOU (SI), X4
@@ -13296,718 +13309,719 @@ emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_big_loop_back:
JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_big_loop_back
emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32:
- MOVOU -32(CX)(R8*1), X4
- MOVOU -16(CX)(R8*1), X5
- MOVOA X4, -32(AX)(R8*1)
- MOVOA X5, -16(AX)(R8*1)
+ MOVOU -32(AX)(R8*1), X4
+ MOVOU -16(AX)(R8*1), X5
+ MOVOA X4, -32(CX)(R8*1)
+ MOVOA X5, -16(CX)(R8*1)
ADDQ $0x20, R8
CMPQ BX, R8
JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
- MOVQ DX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
+ MOVQ DX, CX
emit_literal_done_emit_remainder_encodeSnappyBlockAsm12B:
- MOVQ dst_base+0(FP), CX
- SUBQ CX, AX
- MOVQ AX, ret+48(FP)
+ MOVQ dst_base+0(FP), AX
+ SUBQ AX, CX
+ MOVQ CX, ret+56(FP)
RET
-// func encodeSnappyBlockAsm10B(dst []byte, src []byte) int
+// func encodeSnappyBlockAsm10B(dst []byte, src []byte, tmp *[4096]byte) int
// Requires: BMI, SSE2
-TEXT ·encodeSnappyBlockAsm10B(SB), $4120-56
- MOVQ dst_base+0(FP), AX
- MOVQ $0x00000020, CX
- LEAQ 24(SP), DX
+TEXT ·encodeSnappyBlockAsm10B(SB), $24-64
+ MOVQ tmp+48(FP), AX
+ MOVQ dst_base+0(FP), CX
+ MOVQ $0x00000020, DX
+ MOVQ AX, BX
PXOR X0, X0
zero_loop_encodeSnappyBlockAsm10B:
- MOVOU X0, (DX)
- MOVOU X0, 16(DX)
- MOVOU X0, 32(DX)
- MOVOU X0, 48(DX)
- MOVOU X0, 64(DX)
- MOVOU X0, 80(DX)
- MOVOU X0, 96(DX)
- MOVOU X0, 112(DX)
- ADDQ $0x80, DX
- DECQ CX
+ MOVOU X0, (BX)
+ MOVOU X0, 16(BX)
+ MOVOU X0, 32(BX)
+ MOVOU X0, 48(BX)
+ MOVOU X0, 64(BX)
+ MOVOU X0, 80(BX)
+ MOVOU X0, 96(BX)
+ MOVOU X0, 112(BX)
+ ADDQ $0x80, BX
+ DECQ DX
JNZ zero_loop_encodeSnappyBlockAsm10B
MOVL $0x00000000, 12(SP)
- MOVQ src_len+32(FP), CX
- LEAQ -9(CX), DX
- LEAQ -8(CX), BX
- MOVL BX, 8(SP)
- SHRQ $0x05, CX
- SUBL CX, DX
- LEAQ (AX)(DX*1), DX
- MOVQ DX, (SP)
- MOVL $0x00000001, CX
- MOVL CX, 16(SP)
- MOVQ src_base+24(FP), DX
+ MOVQ src_len+32(FP), DX
+ LEAQ -9(DX), BX
+ LEAQ -8(DX), SI
+ MOVL SI, 8(SP)
+ SHRQ $0x05, DX
+ SUBL DX, BX
+ LEAQ (CX)(BX*1), BX
+ MOVQ BX, (SP)
+ MOVL $0x00000001, DX
+ MOVL DX, 16(SP)
+ MOVQ src_base+24(FP), BX
search_loop_encodeSnappyBlockAsm10B:
- MOVL CX, BX
- SUBL 12(SP), BX
- SHRL $0x05, BX
- LEAL 4(CX)(BX*1), BX
- CMPL BX, 8(SP)
+ MOVL DX, SI
+ SUBL 12(SP), SI
+ SHRL $0x05, SI
+ LEAL 4(DX)(SI*1), SI
+ CMPL SI, 8(SP)
JAE emit_remainder_encodeSnappyBlockAsm10B
- MOVQ (DX)(CX*1), SI
- MOVL BX, 20(SP)
- MOVQ $0x9e3779b1, R8
- MOVQ SI, R9
- MOVQ SI, R10
- SHRQ $0x08, R10
- SHLQ $0x20, R9
- IMULQ R8, R9
- SHRQ $0x36, R9
+ MOVQ (BX)(DX*1), DI
+ MOVL SI, 20(SP)
+ MOVQ $0x9e3779b1, R9
+ MOVQ DI, R10
+ MOVQ DI, R11
+ SHRQ $0x08, R11
SHLQ $0x20, R10
- IMULQ R8, R10
+ IMULQ R9, R10
SHRQ $0x36, R10
- MOVL 24(SP)(R9*4), BX
- MOVL 24(SP)(R10*4), DI
- MOVL CX, 24(SP)(R9*4)
- LEAL 1(CX), R9
- MOVL R9, 24(SP)(R10*4)
- MOVQ SI, R9
- SHRQ $0x10, R9
- SHLQ $0x20, R9
- IMULQ R8, R9
- SHRQ $0x36, R9
- MOVL CX, R8
- SUBL 16(SP), R8
- MOVL 1(DX)(R8*1), R10
- MOVQ SI, R8
- SHRQ $0x08, R8
- CMPL R8, R10
+ SHLQ $0x20, R11
+ IMULQ R9, R11
+ SHRQ $0x36, R11
+ MOVL (AX)(R10*4), SI
+ MOVL (AX)(R11*4), R8
+ MOVL DX, (AX)(R10*4)
+ LEAL 1(DX), R10
+ MOVL R10, (AX)(R11*4)
+ MOVQ DI, R10
+ SHRQ $0x10, R10
+ SHLQ $0x20, R10
+ IMULQ R9, R10
+ SHRQ $0x36, R10
+ MOVL DX, R9
+ SUBL 16(SP), R9
+ MOVL 1(BX)(R9*1), R11
+ MOVQ DI, R9
+ SHRQ $0x08, R9
+ CMPL R9, R11
JNE no_repeat_found_encodeSnappyBlockAsm10B
- LEAL 1(CX), SI
- MOVL 12(SP), BX
- MOVL SI, DI
- SUBL 16(SP), DI
+ LEAL 1(DX), DI
+ MOVL 12(SP), SI
+ MOVL DI, R8
+ SUBL 16(SP), R8
JZ repeat_extend_back_end_encodeSnappyBlockAsm10B
repeat_extend_back_loop_encodeSnappyBlockAsm10B:
- CMPL SI, BX
+ CMPL DI, SI
JBE repeat_extend_back_end_encodeSnappyBlockAsm10B
- MOVB -1(DX)(DI*1), R8
- MOVB -1(DX)(SI*1), R9
- CMPB R8, R9
+ MOVB -1(BX)(R8*1), R9
+ MOVB -1(BX)(DI*1), R10
+ CMPB R9, R10
JNE repeat_extend_back_end_encodeSnappyBlockAsm10B
- LEAL -1(SI), SI
- DECL DI
+ LEAL -1(DI), DI
+ DECL R8
JNZ repeat_extend_back_loop_encodeSnappyBlockAsm10B
repeat_extend_back_end_encodeSnappyBlockAsm10B:
- MOVL SI, BX
- SUBL 12(SP), BX
- LEAQ 3(AX)(BX*1), BX
- CMPQ BX, (SP)
+ MOVL DI, SI
+ SUBL 12(SP), SI
+ LEAQ 3(CX)(SI*1), SI
+ CMPQ SI, (SP)
JB repeat_dst_size_check_encodeSnappyBlockAsm10B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
repeat_dst_size_check_encodeSnappyBlockAsm10B:
- MOVL 12(SP), BX
- CMPL BX, SI
+ MOVL 12(SP), SI
+ CMPL SI, DI
JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm10B
- MOVL SI, DI
- MOVL SI, 12(SP)
- LEAQ (DX)(BX*1), R8
- SUBL BX, DI
- LEAL -1(DI), BX
- CMPL BX, $0x3c
+ MOVL DI, R8
+ MOVL DI, 12(SP)
+ LEAQ (BX)(SI*1), R9
+ SUBL SI, R8
+ LEAL -1(R8), SI
+ CMPL SI, $0x3c
JB one_byte_repeat_emit_encodeSnappyBlockAsm10B
- CMPL BX, $0x00000100
+ CMPL SI, $0x00000100
JB two_bytes_repeat_emit_encodeSnappyBlockAsm10B
JB three_bytes_repeat_emit_encodeSnappyBlockAsm10B
three_bytes_repeat_emit_encodeSnappyBlockAsm10B:
- MOVB $0xf4, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_repeat_emit_encodeSnappyBlockAsm10B
two_bytes_repeat_emit_encodeSnappyBlockAsm10B:
- MOVB $0xf0, (AX)
- MOVB BL, 1(AX)
- ADDQ $0x02, AX
- CMPL BX, $0x40
+ MOVB $0xf0, (CX)
+ MOVB SI, 1(CX)
+ ADDQ $0x02, CX
+ CMPL SI, $0x40
JB memmove_repeat_emit_encodeSnappyBlockAsm10B
JMP memmove_long_repeat_emit_encodeSnappyBlockAsm10B
one_byte_repeat_emit_encodeSnappyBlockAsm10B:
- SHLB $0x02, BL
- MOVB BL, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, SI
+ MOVB SI, (CX)
+ ADDQ $0x01, CX
memmove_repeat_emit_encodeSnappyBlockAsm10B:
- LEAQ (AX)(DI*1), BX
+ LEAQ (CX)(R8*1), SI
// genMemMoveShort
- CMPQ DI, $0x08
+ CMPQ R8, $0x08
JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_8
- CMPQ DI, $0x10
+ CMPQ R8, $0x10
JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_8through16
- CMPQ DI, $0x20
+ CMPQ R8, $0x20
JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_17through32
JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_33through64
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_8:
- MOVQ (R8), R9
- MOVQ R9, (AX)
+ MOVQ (R9), R10
+ MOVQ R10, (CX)
JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm10B
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_8through16:
- MOVQ (R8), R9
- MOVQ -8(R8)(DI*1), R8
- MOVQ R9, (AX)
- MOVQ R8, -8(AX)(DI*1)
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (CX)
+ MOVQ R9, -8(CX)(R8*1)
JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm10B
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_17through32:
- MOVOU (R8), X0
- MOVOU -16(R8)(DI*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(DI*1)
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R8*1)
JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm10B
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_33through64:
- MOVOU (R8), X0
- MOVOU 16(R8), X1
- MOVOU -32(R8)(DI*1), X2
- MOVOU -16(R8)(DI*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(DI*1)
- MOVOU X3, -16(AX)(DI*1)
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R8*1)
+ MOVOU X3, -16(CX)(R8*1)
memmove_end_copy_repeat_emit_encodeSnappyBlockAsm10B:
- MOVQ BX, AX
+ MOVQ SI, CX
JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm10B
memmove_long_repeat_emit_encodeSnappyBlockAsm10B:
- LEAQ (AX)(DI*1), BX
+ LEAQ (CX)(R8*1), SI
// genMemMoveLong
- MOVOU (R8), X0
- MOVOU 16(R8), X1
- MOVOU -32(R8)(DI*1), X2
- MOVOU -16(R8)(DI*1), X3
- MOVQ DI, R10
- SHRQ $0x05, R10
- MOVQ AX, R9
- ANDL $0x0000001f, R9
- MOVQ $0x00000040, R11
- SUBQ R9, R11
- DECQ R10
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R11
+ SHRQ $0x05, R11
+ MOVQ CX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R12
+ SUBQ R10, R12
+ DECQ R11
JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32
- LEAQ -32(R8)(R11*1), R9
- LEAQ -32(AX)(R11*1), R12
+ LEAQ -32(R9)(R12*1), R10
+ LEAQ -32(CX)(R12*1), R13
emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_big_loop_back:
- MOVOU (R9), X4
- MOVOU 16(R9), X5
- MOVOA X4, (R12)
- MOVOA X5, 16(R12)
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R13)
+ MOVOA X5, 16(R13)
+ ADDQ $0x20, R13
+ ADDQ $0x20, R10
ADDQ $0x20, R12
- ADDQ $0x20, R9
- ADDQ $0x20, R11
- DECQ R10
+ DECQ R11
JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_big_loop_back
emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32:
- MOVOU -32(R8)(R11*1), X4
- MOVOU -16(R8)(R11*1), X5
- MOVOA X4, -32(AX)(R11*1)
- MOVOA X5, -16(AX)(R11*1)
- ADDQ $0x20, R11
- CMPQ DI, R11
+ MOVOU -32(R9)(R12*1), X4
+ MOVOU -16(R9)(R12*1), X5
+ MOVOA X4, -32(CX)(R12*1)
+ MOVOA X5, -16(CX)(R12*1)
+ ADDQ $0x20, R12
+ CMPQ R8, R12
JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(DI*1)
- MOVOU X3, -16(AX)(DI*1)
- MOVQ BX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R8*1)
+ MOVOU X3, -16(CX)(R8*1)
+ MOVQ SI, CX
emit_literal_done_repeat_emit_encodeSnappyBlockAsm10B:
- ADDL $0x05, CX
- MOVL CX, BX
- SUBL 16(SP), BX
- MOVQ src_len+32(FP), DI
- SUBL CX, DI
- LEAQ (DX)(CX*1), R8
- LEAQ (DX)(BX*1), BX
+ ADDL $0x05, DX
+ MOVL DX, SI
+ SUBL 16(SP), SI
+ MOVQ src_len+32(FP), R8
+ SUBL DX, R8
+ LEAQ (BX)(DX*1), R9
+ LEAQ (BX)(SI*1), SI
// matchLen
- XORL R10, R10
+ XORL R11, R11
matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm10B:
- CMPL DI, $0x10
+ CMPL R8, $0x10
JB matchlen_match8_repeat_extend_encodeSnappyBlockAsm10B
- MOVQ (R8)(R10*1), R9
- MOVQ 8(R8)(R10*1), R11
- XORQ (BX)(R10*1), R9
+ MOVQ (R9)(R11*1), R10
+ MOVQ 8(R9)(R11*1), R12
+ XORQ (SI)(R11*1), R10
JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm10B
- XORQ 8(BX)(R10*1), R11
+ XORQ 8(SI)(R11*1), R12
JNZ matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm10B
- LEAL -16(DI), DI
- LEAL 16(R10), R10
+ LEAL -16(R8), R8
+ LEAL 16(R11), R11
JMP matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm10B
matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm10B:
#ifdef GOAMD64_v3
- TZCNTQ R11, R11
+ TZCNTQ R12, R12
#else
- BSFQ R11, R11
+ BSFQ R12, R12
#endif
- SARQ $0x03, R11
- LEAL 8(R10)(R11*1), R10
+ SARQ $0x03, R12
+ LEAL 8(R11)(R12*1), R11
JMP repeat_extend_forward_end_encodeSnappyBlockAsm10B
matchlen_match8_repeat_extend_encodeSnappyBlockAsm10B:
- CMPL DI, $0x08
+ CMPL R8, $0x08
JB matchlen_match4_repeat_extend_encodeSnappyBlockAsm10B
- MOVQ (R8)(R10*1), R9
- XORQ (BX)(R10*1), R9
+ MOVQ (R9)(R11*1), R10
+ XORQ (SI)(R11*1), R10
JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm10B
- LEAL -8(DI), DI
- LEAL 8(R10), R10
+ LEAL -8(R8), R8
+ LEAL 8(R11), R11
JMP matchlen_match4_repeat_extend_encodeSnappyBlockAsm10B
matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm10B:
#ifdef GOAMD64_v3
- TZCNTQ R9, R9
+ TZCNTQ R10, R10
#else
- BSFQ R9, R9
+ BSFQ R10, R10
#endif
- SARQ $0x03, R9
- LEAL (R10)(R9*1), R10
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
JMP repeat_extend_forward_end_encodeSnappyBlockAsm10B
matchlen_match4_repeat_extend_encodeSnappyBlockAsm10B:
- CMPL DI, $0x04
+ CMPL R8, $0x04
JB matchlen_match2_repeat_extend_encodeSnappyBlockAsm10B
- MOVL (R8)(R10*1), R9
- CMPL (BX)(R10*1), R9
+ MOVL (R9)(R11*1), R10
+ CMPL (SI)(R11*1), R10
JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm10B
- LEAL -4(DI), DI
- LEAL 4(R10), R10
+ LEAL -4(R8), R8
+ LEAL 4(R11), R11
matchlen_match2_repeat_extend_encodeSnappyBlockAsm10B:
- CMPL DI, $0x01
+ CMPL R8, $0x01
JE matchlen_match1_repeat_extend_encodeSnappyBlockAsm10B
JB repeat_extend_forward_end_encodeSnappyBlockAsm10B
- MOVW (R8)(R10*1), R9
- CMPW (BX)(R10*1), R9
+ MOVW (R9)(R11*1), R10
+ CMPW (SI)(R11*1), R10
JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm10B
- LEAL 2(R10), R10
- SUBL $0x02, DI
+ LEAL 2(R11), R11
+ SUBL $0x02, R8
JZ repeat_extend_forward_end_encodeSnappyBlockAsm10B
matchlen_match1_repeat_extend_encodeSnappyBlockAsm10B:
- MOVB (R8)(R10*1), R9
- CMPB (BX)(R10*1), R9
+ MOVB (R9)(R11*1), R10
+ CMPB (SI)(R11*1), R10
JNE repeat_extend_forward_end_encodeSnappyBlockAsm10B
- LEAL 1(R10), R10
+ LEAL 1(R11), R11
repeat_extend_forward_end_encodeSnappyBlockAsm10B:
- ADDL R10, CX
- MOVL CX, BX
- SUBL SI, BX
- MOVL 16(SP), SI
+ ADDL R11, DX
+ MOVL DX, SI
+ SUBL DI, SI
+ MOVL 16(SP), DI
// emitCopy
two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm10B:
- CMPL BX, $0x40
+ CMPL SI, $0x40
JBE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm10B
- MOVB $0xee, (AX)
- MOVW SI, 1(AX)
- LEAL -60(BX), BX
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW DI, 1(CX)
+ LEAL -60(SI), SI
+ ADDQ $0x03, CX
JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm10B
two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm10B:
- MOVL BX, DI
- SHLL $0x02, DI
- CMPL BX, $0x0c
+ MOVL SI, R8
+ SHLL $0x02, R8
+ CMPL SI, $0x0c
JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm10B
- CMPL SI, $0x00000800
+ CMPL DI, $0x00000800
JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm10B
- LEAL -15(DI), DI
- MOVB SI, 1(AX)
- SHRL $0x08, SI
- SHLL $0x05, SI
- ORL SI, DI
- MOVB DI, (AX)
- ADDQ $0x02, AX
+ LEAL -15(R8), R8
+ MOVB DI, 1(CX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R8
+ MOVB R8, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeSnappyBlockAsm10B
emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm10B:
- LEAL -2(DI), DI
- MOVB DI, (AX)
- MOVW SI, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(R8), R8
+ MOVB R8, (CX)
+ MOVW DI, 1(CX)
+ ADDQ $0x03, CX
repeat_end_emit_encodeSnappyBlockAsm10B:
- MOVL CX, 12(SP)
+ MOVL DX, 12(SP)
JMP search_loop_encodeSnappyBlockAsm10B
no_repeat_found_encodeSnappyBlockAsm10B:
- CMPL (DX)(BX*1), SI
+ CMPL (BX)(SI*1), DI
JEQ candidate_match_encodeSnappyBlockAsm10B
- SHRQ $0x08, SI
- MOVL 24(SP)(R9*4), BX
- LEAL 2(CX), R8
- CMPL (DX)(DI*1), SI
+ SHRQ $0x08, DI
+ MOVL (AX)(R10*4), SI
+ LEAL 2(DX), R9
+ CMPL (BX)(R8*1), DI
JEQ candidate2_match_encodeSnappyBlockAsm10B
- MOVL R8, 24(SP)(R9*4)
- SHRQ $0x08, SI
- CMPL (DX)(BX*1), SI
+ MOVL R9, (AX)(R10*4)
+ SHRQ $0x08, DI
+ CMPL (BX)(SI*1), DI
JEQ candidate3_match_encodeSnappyBlockAsm10B
- MOVL 20(SP), CX
+ MOVL 20(SP), DX
JMP search_loop_encodeSnappyBlockAsm10B
candidate3_match_encodeSnappyBlockAsm10B:
- ADDL $0x02, CX
+ ADDL $0x02, DX
JMP candidate_match_encodeSnappyBlockAsm10B
candidate2_match_encodeSnappyBlockAsm10B:
- MOVL R8, 24(SP)(R9*4)
- INCL CX
- MOVL DI, BX
+ MOVL R9, (AX)(R10*4)
+ INCL DX
+ MOVL R8, SI
candidate_match_encodeSnappyBlockAsm10B:
- MOVL 12(SP), SI
- TESTL BX, BX
+ MOVL 12(SP), DI
+ TESTL SI, SI
JZ match_extend_back_end_encodeSnappyBlockAsm10B
match_extend_back_loop_encodeSnappyBlockAsm10B:
- CMPL CX, SI
+ CMPL DX, DI
JBE match_extend_back_end_encodeSnappyBlockAsm10B
- MOVB -1(DX)(BX*1), DI
- MOVB -1(DX)(CX*1), R8
- CMPB DI, R8
+ MOVB -1(BX)(SI*1), R8
+ MOVB -1(BX)(DX*1), R9
+ CMPB R8, R9
JNE match_extend_back_end_encodeSnappyBlockAsm10B
- LEAL -1(CX), CX
- DECL BX
+ LEAL -1(DX), DX
+ DECL SI
JZ match_extend_back_end_encodeSnappyBlockAsm10B
JMP match_extend_back_loop_encodeSnappyBlockAsm10B
match_extend_back_end_encodeSnappyBlockAsm10B:
- MOVL CX, SI
- SUBL 12(SP), SI
- LEAQ 3(AX)(SI*1), SI
- CMPQ SI, (SP)
+ MOVL DX, DI
+ SUBL 12(SP), DI
+ LEAQ 3(CX)(DI*1), DI
+ CMPQ DI, (SP)
JB match_dst_size_check_encodeSnappyBlockAsm10B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_dst_size_check_encodeSnappyBlockAsm10B:
- MOVL CX, SI
- MOVL 12(SP), DI
- CMPL DI, SI
+ MOVL DX, DI
+ MOVL 12(SP), R8
+ CMPL R8, DI
JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm10B
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(DI*1), SI
- SUBL DI, R8
- LEAL -1(R8), DI
- CMPL DI, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(R8*1), DI
+ SUBL R8, R9
+ LEAL -1(R9), R8
+ CMPL R8, $0x3c
JB one_byte_match_emit_encodeSnappyBlockAsm10B
- CMPL DI, $0x00000100
+ CMPL R8, $0x00000100
JB two_bytes_match_emit_encodeSnappyBlockAsm10B
JB three_bytes_match_emit_encodeSnappyBlockAsm10B
three_bytes_match_emit_encodeSnappyBlockAsm10B:
- MOVB $0xf4, (AX)
- MOVW DI, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW R8, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_match_emit_encodeSnappyBlockAsm10B
two_bytes_match_emit_encodeSnappyBlockAsm10B:
- MOVB $0xf0, (AX)
- MOVB DI, 1(AX)
- ADDQ $0x02, AX
- CMPL DI, $0x40
+ MOVB $0xf0, (CX)
+ MOVB R8, 1(CX)
+ ADDQ $0x02, CX
+ CMPL R8, $0x40
JB memmove_match_emit_encodeSnappyBlockAsm10B
JMP memmove_long_match_emit_encodeSnappyBlockAsm10B
one_byte_match_emit_encodeSnappyBlockAsm10B:
- SHLB $0x02, DI
- MOVB DI, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, R8
+ MOVB R8, (CX)
+ ADDQ $0x01, CX
memmove_match_emit_encodeSnappyBlockAsm10B:
- LEAQ (AX)(R8*1), DI
+ LEAQ (CX)(R9*1), R8
// genMemMoveShort
- CMPQ R8, $0x08
+ CMPQ R9, $0x08
JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_8
- CMPQ R8, $0x10
+ CMPQ R9, $0x10
JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ R9, $0x20
JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_33through64
emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_8:
- MOVQ (SI), R9
- MOVQ R9, (AX)
+ MOVQ (DI), R10
+ MOVQ R10, (CX)
JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm10B
emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_8through16:
- MOVQ (SI), R9
- MOVQ -8(SI)(R8*1), SI
- MOVQ R9, (AX)
- MOVQ SI, -8(AX)(R8*1)
+ MOVQ (DI), R10
+ MOVQ -8(DI)(R9*1), DI
+ MOVQ R10, (CX)
+ MOVQ DI, -8(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm10B
emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_17through32:
- MOVOU (SI), X0
- MOVOU -16(SI)(R8*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU (DI), X0
+ MOVOU -16(DI)(R9*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm10B
emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_33through64:
- MOVOU (SI), X0
- MOVOU 16(SI), X1
- MOVOU -32(SI)(R8*1), X2
- MOVOU -16(SI)(R8*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU (DI), X0
+ MOVOU 16(DI), X1
+ MOVOU -32(DI)(R9*1), X2
+ MOVOU -16(DI)(R9*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
memmove_end_copy_match_emit_encodeSnappyBlockAsm10B:
- MOVQ DI, AX
+ MOVQ R8, CX
JMP emit_literal_done_match_emit_encodeSnappyBlockAsm10B
memmove_long_match_emit_encodeSnappyBlockAsm10B:
- LEAQ (AX)(R8*1), DI
+ LEAQ (CX)(R9*1), R8
// genMemMoveLong
- MOVOU (SI), X0
- MOVOU 16(SI), X1
- MOVOU -32(SI)(R8*1), X2
- MOVOU -16(SI)(R8*1), X3
- MOVQ R8, R10
- SHRQ $0x05, R10
- MOVQ AX, R9
- ANDL $0x0000001f, R9
- MOVQ $0x00000040, R11
- SUBQ R9, R11
- DECQ R10
+ MOVOU (DI), X0
+ MOVOU 16(DI), X1
+ MOVOU -32(DI)(R9*1), X2
+ MOVOU -16(DI)(R9*1), X3
+ MOVQ R9, R11
+ SHRQ $0x05, R11
+ MOVQ CX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R12
+ SUBQ R10, R12
+ DECQ R11
JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32
- LEAQ -32(SI)(R11*1), R9
- LEAQ -32(AX)(R11*1), R12
+ LEAQ -32(DI)(R12*1), R10
+ LEAQ -32(CX)(R12*1), R13
emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_big_loop_back:
- MOVOU (R9), X4
- MOVOU 16(R9), X5
- MOVOA X4, (R12)
- MOVOA X5, 16(R12)
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R13)
+ MOVOA X5, 16(R13)
+ ADDQ $0x20, R13
+ ADDQ $0x20, R10
ADDQ $0x20, R12
- ADDQ $0x20, R9
- ADDQ $0x20, R11
- DECQ R10
+ DECQ R11
JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_big_loop_back
emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32:
- MOVOU -32(SI)(R11*1), X4
- MOVOU -16(SI)(R11*1), X5
- MOVOA X4, -32(AX)(R11*1)
- MOVOA X5, -16(AX)(R11*1)
- ADDQ $0x20, R11
- CMPQ R8, R11
+ MOVOU -32(DI)(R12*1), X4
+ MOVOU -16(DI)(R12*1), X5
+ MOVOA X4, -32(CX)(R12*1)
+ MOVOA X5, -16(CX)(R12*1)
+ ADDQ $0x20, R12
+ CMPQ R9, R12
JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ DI, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
+ MOVQ R8, CX
emit_literal_done_match_emit_encodeSnappyBlockAsm10B:
match_nolit_loop_encodeSnappyBlockAsm10B:
- MOVL CX, SI
- SUBL BX, SI
- MOVL SI, 16(SP)
- ADDL $0x04, CX
- ADDL $0x04, BX
- MOVQ src_len+32(FP), SI
- SUBL CX, SI
- LEAQ (DX)(CX*1), DI
- LEAQ (DX)(BX*1), BX
+ MOVL DX, DI
+ SUBL SI, DI
+ MOVL DI, 16(SP)
+ ADDL $0x04, DX
+ ADDL $0x04, SI
+ MOVQ src_len+32(FP), DI
+ SUBL DX, DI
+ LEAQ (BX)(DX*1), R8
+ LEAQ (BX)(SI*1), SI
// matchLen
- XORL R9, R9
+ XORL R10, R10
matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm10B:
- CMPL SI, $0x10
+ CMPL DI, $0x10
JB matchlen_match8_match_nolit_encodeSnappyBlockAsm10B
- MOVQ (DI)(R9*1), R8
- MOVQ 8(DI)(R9*1), R10
- XORQ (BX)(R9*1), R8
+ MOVQ (R8)(R10*1), R9
+ MOVQ 8(R8)(R10*1), R11
+ XORQ (SI)(R10*1), R9
JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm10B
- XORQ 8(BX)(R9*1), R10
+ XORQ 8(SI)(R10*1), R11
JNZ matchlen_bsf_16match_nolit_encodeSnappyBlockAsm10B
- LEAL -16(SI), SI
- LEAL 16(R9), R9
+ LEAL -16(DI), DI
+ LEAL 16(R10), R10
JMP matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm10B
matchlen_bsf_16match_nolit_encodeSnappyBlockAsm10B:
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R11, R11
#else
- BSFQ R10, R10
+ BSFQ R11, R11
#endif
- SARQ $0x03, R10
- LEAL 8(R9)(R10*1), R9
+ SARQ $0x03, R11
+ LEAL 8(R10)(R11*1), R10
JMP match_nolit_end_encodeSnappyBlockAsm10B
matchlen_match8_match_nolit_encodeSnappyBlockAsm10B:
- CMPL SI, $0x08
+ CMPL DI, $0x08
JB matchlen_match4_match_nolit_encodeSnappyBlockAsm10B
- MOVQ (DI)(R9*1), R8
- XORQ (BX)(R9*1), R8
+ MOVQ (R8)(R10*1), R9
+ XORQ (SI)(R10*1), R9
JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm10B
- LEAL -8(SI), SI
- LEAL 8(R9), R9
+ LEAL -8(DI), DI
+ LEAL 8(R10), R10
JMP matchlen_match4_match_nolit_encodeSnappyBlockAsm10B
matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm10B:
#ifdef GOAMD64_v3
- TZCNTQ R8, R8
+ TZCNTQ R9, R9
#else
- BSFQ R8, R8
+ BSFQ R9, R9
#endif
- SARQ $0x03, R8
- LEAL (R9)(R8*1), R9
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
JMP match_nolit_end_encodeSnappyBlockAsm10B
matchlen_match4_match_nolit_encodeSnappyBlockAsm10B:
- CMPL SI, $0x04
+ CMPL DI, $0x04
JB matchlen_match2_match_nolit_encodeSnappyBlockAsm10B
- MOVL (DI)(R9*1), R8
- CMPL (BX)(R9*1), R8
+ MOVL (R8)(R10*1), R9
+ CMPL (SI)(R10*1), R9
JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm10B
- LEAL -4(SI), SI
- LEAL 4(R9), R9
+ LEAL -4(DI), DI
+ LEAL 4(R10), R10
matchlen_match2_match_nolit_encodeSnappyBlockAsm10B:
- CMPL SI, $0x01
+ CMPL DI, $0x01
JE matchlen_match1_match_nolit_encodeSnappyBlockAsm10B
JB match_nolit_end_encodeSnappyBlockAsm10B
- MOVW (DI)(R9*1), R8
- CMPW (BX)(R9*1), R8
+ MOVW (R8)(R10*1), R9
+ CMPW (SI)(R10*1), R9
JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm10B
- LEAL 2(R9), R9
- SUBL $0x02, SI
+ LEAL 2(R10), R10
+ SUBL $0x02, DI
JZ match_nolit_end_encodeSnappyBlockAsm10B
matchlen_match1_match_nolit_encodeSnappyBlockAsm10B:
- MOVB (DI)(R9*1), R8
- CMPB (BX)(R9*1), R8
+ MOVB (R8)(R10*1), R9
+ CMPB (SI)(R10*1), R9
JNE match_nolit_end_encodeSnappyBlockAsm10B
- LEAL 1(R9), R9
+ LEAL 1(R10), R10
match_nolit_end_encodeSnappyBlockAsm10B:
- ADDL R9, CX
- MOVL 16(SP), BX
- ADDL $0x04, R9
- MOVL CX, 12(SP)
+ ADDL R10, DX
+ MOVL 16(SP), SI
+ ADDL $0x04, R10
+ MOVL DX, 12(SP)
// emitCopy
two_byte_offset_match_nolit_encodeSnappyBlockAsm10B:
- CMPL R9, $0x40
+ CMPL R10, $0x40
JBE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm10B
- MOVB $0xee, (AX)
- MOVW BX, 1(AX)
- LEAL -60(R9), R9
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW SI, 1(CX)
+ LEAL -60(R10), R10
+ ADDQ $0x03, CX
JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm10B
two_byte_offset_short_match_nolit_encodeSnappyBlockAsm10B:
- MOVL R9, SI
- SHLL $0x02, SI
- CMPL R9, $0x0c
+ MOVL R10, DI
+ SHLL $0x02, DI
+ CMPL R10, $0x0c
JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm10B
- CMPL BX, $0x00000800
+ CMPL SI, $0x00000800
JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm10B
- LEAL -15(SI), SI
- MOVB BL, 1(AX)
- SHRL $0x08, BX
- SHLL $0x05, BX
- ORL BX, SI
- MOVB SI, (AX)
- ADDQ $0x02, AX
+ LEAL -15(DI), DI
+ MOVB SI, 1(CX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm10B
emit_copy_three_match_nolit_encodeSnappyBlockAsm10B:
- LEAL -2(SI), SI
- MOVB SI, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(DI), DI
+ MOVB DI, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
match_nolit_emitcopy_end_encodeSnappyBlockAsm10B:
- CMPL CX, 8(SP)
+ CMPL DX, 8(SP)
JAE emit_remainder_encodeSnappyBlockAsm10B
- MOVQ -2(DX)(CX*1), SI
- CMPQ AX, (SP)
+ MOVQ -2(BX)(DX*1), DI
+ CMPQ CX, (SP)
JB match_nolit_dst_ok_encodeSnappyBlockAsm10B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_nolit_dst_ok_encodeSnappyBlockAsm10B:
- MOVQ $0x9e3779b1, R8
- MOVQ SI, DI
- SHRQ $0x10, SI
- MOVQ SI, BX
- SHLQ $0x20, DI
- IMULQ R8, DI
- SHRQ $0x36, DI
- SHLQ $0x20, BX
- IMULQ R8, BX
- SHRQ $0x36, BX
- LEAL -2(CX), R8
- LEAQ 24(SP)(BX*4), R9
- MOVL (R9), BX
- MOVL R8, 24(SP)(DI*4)
- MOVL CX, (R9)
- CMPL (DX)(BX*1), SI
+ MOVQ $0x9e3779b1, R9
+ MOVQ DI, R8
+ SHRQ $0x10, DI
+ MOVQ DI, SI
+ SHLQ $0x20, R8
+ IMULQ R9, R8
+ SHRQ $0x36, R8
+ SHLQ $0x20, SI
+ IMULQ R9, SI
+ SHRQ $0x36, SI
+ LEAL -2(DX), R9
+ LEAQ (AX)(SI*4), R10
+ MOVL (R10), SI
+ MOVL R9, (AX)(R8*4)
+ MOVL DX, (R10)
+ CMPL (BX)(SI*1), DI
JEQ match_nolit_loop_encodeSnappyBlockAsm10B
- INCL CX
+ INCL DX
JMP search_loop_encodeSnappyBlockAsm10B
emit_remainder_encodeSnappyBlockAsm10B:
- MOVQ src_len+32(FP), CX
- SUBL 12(SP), CX
- LEAQ 3(AX)(CX*1), CX
- CMPQ CX, (SP)
+ MOVQ src_len+32(FP), AX
+ SUBL 12(SP), AX
+ LEAQ 3(CX)(AX*1), AX
+ CMPQ AX, (SP)
JB emit_remainder_ok_encodeSnappyBlockAsm10B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
emit_remainder_ok_encodeSnappyBlockAsm10B:
- MOVQ src_len+32(FP), CX
- MOVL 12(SP), BX
- CMPL BX, CX
+ MOVQ src_len+32(FP), AX
+ MOVL 12(SP), DX
+ CMPL DX, AX
JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm10B
- MOVL CX, SI
- MOVL CX, 12(SP)
- LEAQ (DX)(BX*1), CX
- SUBL BX, SI
+ MOVL AX, SI
+ MOVL AX, 12(SP)
+ LEAQ (BX)(DX*1), AX
+ SUBL DX, SI
LEAL -1(SI), DX
CMPL DX, $0x3c
JB one_byte_emit_remainder_encodeSnappyBlockAsm10B
@@ -14016,26 +14030,26 @@ emit_remainder_ok_encodeSnappyBlockAsm10B:
JB three_bytes_emit_remainder_encodeSnappyBlockAsm10B
three_bytes_emit_remainder_encodeSnappyBlockAsm10B:
- MOVB $0xf4, (AX)
- MOVW DX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW DX, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_emit_remainder_encodeSnappyBlockAsm10B
two_bytes_emit_remainder_encodeSnappyBlockAsm10B:
- MOVB $0xf0, (AX)
- MOVB DL, 1(AX)
- ADDQ $0x02, AX
+ MOVB $0xf0, (CX)
+ MOVB DL, 1(CX)
+ ADDQ $0x02, CX
CMPL DX, $0x40
JB memmove_emit_remainder_encodeSnappyBlockAsm10B
JMP memmove_long_emit_remainder_encodeSnappyBlockAsm10B
one_byte_emit_remainder_encodeSnappyBlockAsm10B:
SHLB $0x02, DL
- MOVB DL, (AX)
- ADDQ $0x01, AX
+ MOVB DL, (CX)
+ ADDQ $0x01, CX
memmove_emit_remainder_encodeSnappyBlockAsm10B:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveShort
@@ -14051,73 +14065,73 @@ memmove_emit_remainder_encodeSnappyBlockAsm10B:
JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_33through64
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_1or2:
- MOVB (CX), SI
- MOVB -1(CX)(BX*1), CL
- MOVB SI, (AX)
- MOVB CL, -1(AX)(BX*1)
+ MOVB (AX), SI
+ MOVB -1(AX)(BX*1), AL
+ MOVB SI, (CX)
+ MOVB AL, -1(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_3:
- MOVW (CX), SI
- MOVB 2(CX), CL
- MOVW SI, (AX)
- MOVB CL, 2(AX)
+ MOVW (AX), SI
+ MOVB 2(AX), AL
+ MOVW SI, (CX)
+ MOVB AL, 2(CX)
JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_4through7:
- MOVL (CX), SI
- MOVL -4(CX)(BX*1), CX
- MOVL SI, (AX)
- MOVL CX, -4(AX)(BX*1)
+ MOVL (AX), SI
+ MOVL -4(AX)(BX*1), AX
+ MOVL SI, (CX)
+ MOVL AX, -4(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_8through16:
- MOVQ (CX), SI
- MOVQ -8(CX)(BX*1), CX
- MOVQ SI, (AX)
- MOVQ CX, -8(AX)(BX*1)
+ MOVQ (AX), SI
+ MOVQ -8(AX)(BX*1), AX
+ MOVQ SI, (CX)
+ MOVQ AX, -8(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_17through32:
- MOVOU (CX), X0
- MOVOU -16(CX)(BX*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU -16(AX)(BX*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_33through64:
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B:
- MOVQ DX, AX
+ MOVQ DX, CX
JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm10B
memmove_long_emit_remainder_encodeSnappyBlockAsm10B:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveLong
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
MOVQ BX, DI
SHRQ $0x05, DI
- MOVQ AX, SI
+ MOVQ CX, SI
ANDL $0x0000001f, SI
MOVQ $0x00000040, R8
SUBQ SI, R8
DECQ DI
JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32
- LEAQ -32(CX)(R8*1), SI
- LEAQ -32(AX)(R8*1), R9
+ LEAQ -32(AX)(R8*1), SI
+ LEAQ -32(CX)(R8*1), R9
emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_big_loop_back:
MOVOU (SI), X4
@@ -14131,714 +14145,715 @@ emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_big_loop_back:
JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_big_loop_back
emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32:
- MOVOU -32(CX)(R8*1), X4
- MOVOU -16(CX)(R8*1), X5
- MOVOA X4, -32(AX)(R8*1)
- MOVOA X5, -16(AX)(R8*1)
+ MOVOU -32(AX)(R8*1), X4
+ MOVOU -16(AX)(R8*1), X5
+ MOVOA X4, -32(CX)(R8*1)
+ MOVOA X5, -16(CX)(R8*1)
ADDQ $0x20, R8
CMPQ BX, R8
JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
- MOVQ DX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
+ MOVQ DX, CX
emit_literal_done_emit_remainder_encodeSnappyBlockAsm10B:
- MOVQ dst_base+0(FP), CX
- SUBQ CX, AX
- MOVQ AX, ret+48(FP)
+ MOVQ dst_base+0(FP), AX
+ SUBQ AX, CX
+ MOVQ CX, ret+56(FP)
RET
-// func encodeSnappyBlockAsm8B(dst []byte, src []byte) int
+// func encodeSnappyBlockAsm8B(dst []byte, src []byte, tmp *[1024]byte) int
// Requires: BMI, SSE2
-TEXT ·encodeSnappyBlockAsm8B(SB), $1048-56
- MOVQ dst_base+0(FP), AX
- MOVQ $0x00000008, CX
- LEAQ 24(SP), DX
+TEXT ·encodeSnappyBlockAsm8B(SB), $24-64
+ MOVQ tmp+48(FP), AX
+ MOVQ dst_base+0(FP), CX
+ MOVQ $0x00000008, DX
+ MOVQ AX, BX
PXOR X0, X0
zero_loop_encodeSnappyBlockAsm8B:
- MOVOU X0, (DX)
- MOVOU X0, 16(DX)
- MOVOU X0, 32(DX)
- MOVOU X0, 48(DX)
- MOVOU X0, 64(DX)
- MOVOU X0, 80(DX)
- MOVOU X0, 96(DX)
- MOVOU X0, 112(DX)
- ADDQ $0x80, DX
- DECQ CX
+ MOVOU X0, (BX)
+ MOVOU X0, 16(BX)
+ MOVOU X0, 32(BX)
+ MOVOU X0, 48(BX)
+ MOVOU X0, 64(BX)
+ MOVOU X0, 80(BX)
+ MOVOU X0, 96(BX)
+ MOVOU X0, 112(BX)
+ ADDQ $0x80, BX
+ DECQ DX
JNZ zero_loop_encodeSnappyBlockAsm8B
MOVL $0x00000000, 12(SP)
- MOVQ src_len+32(FP), CX
- LEAQ -9(CX), DX
- LEAQ -8(CX), BX
- MOVL BX, 8(SP)
- SHRQ $0x05, CX
- SUBL CX, DX
- LEAQ (AX)(DX*1), DX
- MOVQ DX, (SP)
- MOVL $0x00000001, CX
- MOVL CX, 16(SP)
- MOVQ src_base+24(FP), DX
+ MOVQ src_len+32(FP), DX
+ LEAQ -9(DX), BX
+ LEAQ -8(DX), SI
+ MOVL SI, 8(SP)
+ SHRQ $0x05, DX
+ SUBL DX, BX
+ LEAQ (CX)(BX*1), BX
+ MOVQ BX, (SP)
+ MOVL $0x00000001, DX
+ MOVL DX, 16(SP)
+ MOVQ src_base+24(FP), BX
search_loop_encodeSnappyBlockAsm8B:
- MOVL CX, BX
- SUBL 12(SP), BX
- SHRL $0x04, BX
- LEAL 4(CX)(BX*1), BX
- CMPL BX, 8(SP)
+ MOVL DX, SI
+ SUBL 12(SP), SI
+ SHRL $0x04, SI
+ LEAL 4(DX)(SI*1), SI
+ CMPL SI, 8(SP)
JAE emit_remainder_encodeSnappyBlockAsm8B
- MOVQ (DX)(CX*1), SI
- MOVL BX, 20(SP)
- MOVQ $0x9e3779b1, R8
- MOVQ SI, R9
- MOVQ SI, R10
- SHRQ $0x08, R10
- SHLQ $0x20, R9
- IMULQ R8, R9
- SHRQ $0x38, R9
+ MOVQ (BX)(DX*1), DI
+ MOVL SI, 20(SP)
+ MOVQ $0x9e3779b1, R9
+ MOVQ DI, R10
+ MOVQ DI, R11
+ SHRQ $0x08, R11
SHLQ $0x20, R10
- IMULQ R8, R10
+ IMULQ R9, R10
SHRQ $0x38, R10
- MOVL 24(SP)(R9*4), BX
- MOVL 24(SP)(R10*4), DI
- MOVL CX, 24(SP)(R9*4)
- LEAL 1(CX), R9
- MOVL R9, 24(SP)(R10*4)
- MOVQ SI, R9
- SHRQ $0x10, R9
- SHLQ $0x20, R9
- IMULQ R8, R9
- SHRQ $0x38, R9
- MOVL CX, R8
- SUBL 16(SP), R8
- MOVL 1(DX)(R8*1), R10
- MOVQ SI, R8
- SHRQ $0x08, R8
- CMPL R8, R10
+ SHLQ $0x20, R11
+ IMULQ R9, R11
+ SHRQ $0x38, R11
+ MOVL (AX)(R10*4), SI
+ MOVL (AX)(R11*4), R8
+ MOVL DX, (AX)(R10*4)
+ LEAL 1(DX), R10
+ MOVL R10, (AX)(R11*4)
+ MOVQ DI, R10
+ SHRQ $0x10, R10
+ SHLQ $0x20, R10
+ IMULQ R9, R10
+ SHRQ $0x38, R10
+ MOVL DX, R9
+ SUBL 16(SP), R9
+ MOVL 1(BX)(R9*1), R11
+ MOVQ DI, R9
+ SHRQ $0x08, R9
+ CMPL R9, R11
JNE no_repeat_found_encodeSnappyBlockAsm8B
- LEAL 1(CX), SI
- MOVL 12(SP), BX
- MOVL SI, DI
- SUBL 16(SP), DI
+ LEAL 1(DX), DI
+ MOVL 12(SP), SI
+ MOVL DI, R8
+ SUBL 16(SP), R8
JZ repeat_extend_back_end_encodeSnappyBlockAsm8B
repeat_extend_back_loop_encodeSnappyBlockAsm8B:
- CMPL SI, BX
+ CMPL DI, SI
JBE repeat_extend_back_end_encodeSnappyBlockAsm8B
- MOVB -1(DX)(DI*1), R8
- MOVB -1(DX)(SI*1), R9
- CMPB R8, R9
+ MOVB -1(BX)(R8*1), R9
+ MOVB -1(BX)(DI*1), R10
+ CMPB R9, R10
JNE repeat_extend_back_end_encodeSnappyBlockAsm8B
- LEAL -1(SI), SI
- DECL DI
+ LEAL -1(DI), DI
+ DECL R8
JNZ repeat_extend_back_loop_encodeSnappyBlockAsm8B
repeat_extend_back_end_encodeSnappyBlockAsm8B:
- MOVL SI, BX
- SUBL 12(SP), BX
- LEAQ 3(AX)(BX*1), BX
- CMPQ BX, (SP)
+ MOVL DI, SI
+ SUBL 12(SP), SI
+ LEAQ 3(CX)(SI*1), SI
+ CMPQ SI, (SP)
JB repeat_dst_size_check_encodeSnappyBlockAsm8B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
repeat_dst_size_check_encodeSnappyBlockAsm8B:
- MOVL 12(SP), BX
- CMPL BX, SI
+ MOVL 12(SP), SI
+ CMPL SI, DI
JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm8B
- MOVL SI, DI
- MOVL SI, 12(SP)
- LEAQ (DX)(BX*1), R8
- SUBL BX, DI
- LEAL -1(DI), BX
- CMPL BX, $0x3c
+ MOVL DI, R8
+ MOVL DI, 12(SP)
+ LEAQ (BX)(SI*1), R9
+ SUBL SI, R8
+ LEAL -1(R8), SI
+ CMPL SI, $0x3c
JB one_byte_repeat_emit_encodeSnappyBlockAsm8B
- CMPL BX, $0x00000100
+ CMPL SI, $0x00000100
JB two_bytes_repeat_emit_encodeSnappyBlockAsm8B
JB three_bytes_repeat_emit_encodeSnappyBlockAsm8B
three_bytes_repeat_emit_encodeSnappyBlockAsm8B:
- MOVB $0xf4, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_repeat_emit_encodeSnappyBlockAsm8B
two_bytes_repeat_emit_encodeSnappyBlockAsm8B:
- MOVB $0xf0, (AX)
- MOVB BL, 1(AX)
- ADDQ $0x02, AX
- CMPL BX, $0x40
+ MOVB $0xf0, (CX)
+ MOVB SI, 1(CX)
+ ADDQ $0x02, CX
+ CMPL SI, $0x40
JB memmove_repeat_emit_encodeSnappyBlockAsm8B
JMP memmove_long_repeat_emit_encodeSnappyBlockAsm8B
one_byte_repeat_emit_encodeSnappyBlockAsm8B:
- SHLB $0x02, BL
- MOVB BL, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, SI
+ MOVB SI, (CX)
+ ADDQ $0x01, CX
memmove_repeat_emit_encodeSnappyBlockAsm8B:
- LEAQ (AX)(DI*1), BX
+ LEAQ (CX)(R8*1), SI
// genMemMoveShort
- CMPQ DI, $0x08
+ CMPQ R8, $0x08
JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_8
- CMPQ DI, $0x10
+ CMPQ R8, $0x10
JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_8through16
- CMPQ DI, $0x20
+ CMPQ R8, $0x20
JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_17through32
JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_33through64
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_8:
- MOVQ (R8), R9
- MOVQ R9, (AX)
+ MOVQ (R9), R10
+ MOVQ R10, (CX)
JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm8B
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_8through16:
- MOVQ (R8), R9
- MOVQ -8(R8)(DI*1), R8
- MOVQ R9, (AX)
- MOVQ R8, -8(AX)(DI*1)
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (CX)
+ MOVQ R9, -8(CX)(R8*1)
JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm8B
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_17through32:
- MOVOU (R8), X0
- MOVOU -16(R8)(DI*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(DI*1)
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R8*1)
JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm8B
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_33through64:
- MOVOU (R8), X0
- MOVOU 16(R8), X1
- MOVOU -32(R8)(DI*1), X2
- MOVOU -16(R8)(DI*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(DI*1)
- MOVOU X3, -16(AX)(DI*1)
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R8*1)
+ MOVOU X3, -16(CX)(R8*1)
memmove_end_copy_repeat_emit_encodeSnappyBlockAsm8B:
- MOVQ BX, AX
+ MOVQ SI, CX
JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm8B
memmove_long_repeat_emit_encodeSnappyBlockAsm8B:
- LEAQ (AX)(DI*1), BX
+ LEAQ (CX)(R8*1), SI
// genMemMoveLong
- MOVOU (R8), X0
- MOVOU 16(R8), X1
- MOVOU -32(R8)(DI*1), X2
- MOVOU -16(R8)(DI*1), X3
- MOVQ DI, R10
- SHRQ $0x05, R10
- MOVQ AX, R9
- ANDL $0x0000001f, R9
- MOVQ $0x00000040, R11
- SUBQ R9, R11
- DECQ R10
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R11
+ SHRQ $0x05, R11
+ MOVQ CX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R12
+ SUBQ R10, R12
+ DECQ R11
JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32
- LEAQ -32(R8)(R11*1), R9
- LEAQ -32(AX)(R11*1), R12
+ LEAQ -32(R9)(R12*1), R10
+ LEAQ -32(CX)(R12*1), R13
emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_big_loop_back:
- MOVOU (R9), X4
- MOVOU 16(R9), X5
- MOVOA X4, (R12)
- MOVOA X5, 16(R12)
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R13)
+ MOVOA X5, 16(R13)
+ ADDQ $0x20, R13
+ ADDQ $0x20, R10
ADDQ $0x20, R12
- ADDQ $0x20, R9
- ADDQ $0x20, R11
- DECQ R10
+ DECQ R11
JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_big_loop_back
emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32:
- MOVOU -32(R8)(R11*1), X4
- MOVOU -16(R8)(R11*1), X5
- MOVOA X4, -32(AX)(R11*1)
- MOVOA X5, -16(AX)(R11*1)
- ADDQ $0x20, R11
- CMPQ DI, R11
+ MOVOU -32(R9)(R12*1), X4
+ MOVOU -16(R9)(R12*1), X5
+ MOVOA X4, -32(CX)(R12*1)
+ MOVOA X5, -16(CX)(R12*1)
+ ADDQ $0x20, R12
+ CMPQ R8, R12
JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(DI*1)
- MOVOU X3, -16(AX)(DI*1)
- MOVQ BX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R8*1)
+ MOVOU X3, -16(CX)(R8*1)
+ MOVQ SI, CX
emit_literal_done_repeat_emit_encodeSnappyBlockAsm8B:
- ADDL $0x05, CX
- MOVL CX, BX
- SUBL 16(SP), BX
- MOVQ src_len+32(FP), DI
- SUBL CX, DI
- LEAQ (DX)(CX*1), R8
- LEAQ (DX)(BX*1), BX
+ ADDL $0x05, DX
+ MOVL DX, SI
+ SUBL 16(SP), SI
+ MOVQ src_len+32(FP), R8
+ SUBL DX, R8
+ LEAQ (BX)(DX*1), R9
+ LEAQ (BX)(SI*1), SI
// matchLen
- XORL R10, R10
+ XORL R11, R11
matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm8B:
- CMPL DI, $0x10
+ CMPL R8, $0x10
JB matchlen_match8_repeat_extend_encodeSnappyBlockAsm8B
- MOVQ (R8)(R10*1), R9
- MOVQ 8(R8)(R10*1), R11
- XORQ (BX)(R10*1), R9
+ MOVQ (R9)(R11*1), R10
+ MOVQ 8(R9)(R11*1), R12
+ XORQ (SI)(R11*1), R10
JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm8B
- XORQ 8(BX)(R10*1), R11
+ XORQ 8(SI)(R11*1), R12
JNZ matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm8B
- LEAL -16(DI), DI
- LEAL 16(R10), R10
+ LEAL -16(R8), R8
+ LEAL 16(R11), R11
JMP matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm8B
matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm8B:
#ifdef GOAMD64_v3
- TZCNTQ R11, R11
+ TZCNTQ R12, R12
#else
- BSFQ R11, R11
+ BSFQ R12, R12
#endif
- SARQ $0x03, R11
- LEAL 8(R10)(R11*1), R10
+ SARQ $0x03, R12
+ LEAL 8(R11)(R12*1), R11
JMP repeat_extend_forward_end_encodeSnappyBlockAsm8B
matchlen_match8_repeat_extend_encodeSnappyBlockAsm8B:
- CMPL DI, $0x08
+ CMPL R8, $0x08
JB matchlen_match4_repeat_extend_encodeSnappyBlockAsm8B
- MOVQ (R8)(R10*1), R9
- XORQ (BX)(R10*1), R9
+ MOVQ (R9)(R11*1), R10
+ XORQ (SI)(R11*1), R10
JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm8B
- LEAL -8(DI), DI
- LEAL 8(R10), R10
+ LEAL -8(R8), R8
+ LEAL 8(R11), R11
JMP matchlen_match4_repeat_extend_encodeSnappyBlockAsm8B
matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm8B:
#ifdef GOAMD64_v3
- TZCNTQ R9, R9
+ TZCNTQ R10, R10
#else
- BSFQ R9, R9
+ BSFQ R10, R10
#endif
- SARQ $0x03, R9
- LEAL (R10)(R9*1), R10
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
JMP repeat_extend_forward_end_encodeSnappyBlockAsm8B
matchlen_match4_repeat_extend_encodeSnappyBlockAsm8B:
- CMPL DI, $0x04
+ CMPL R8, $0x04
JB matchlen_match2_repeat_extend_encodeSnappyBlockAsm8B
- MOVL (R8)(R10*1), R9
- CMPL (BX)(R10*1), R9
+ MOVL (R9)(R11*1), R10
+ CMPL (SI)(R11*1), R10
JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm8B
- LEAL -4(DI), DI
- LEAL 4(R10), R10
+ LEAL -4(R8), R8
+ LEAL 4(R11), R11
matchlen_match2_repeat_extend_encodeSnappyBlockAsm8B:
- CMPL DI, $0x01
+ CMPL R8, $0x01
JE matchlen_match1_repeat_extend_encodeSnappyBlockAsm8B
JB repeat_extend_forward_end_encodeSnappyBlockAsm8B
- MOVW (R8)(R10*1), R9
- CMPW (BX)(R10*1), R9
+ MOVW (R9)(R11*1), R10
+ CMPW (SI)(R11*1), R10
JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm8B
- LEAL 2(R10), R10
- SUBL $0x02, DI
+ LEAL 2(R11), R11
+ SUBL $0x02, R8
JZ repeat_extend_forward_end_encodeSnappyBlockAsm8B
matchlen_match1_repeat_extend_encodeSnappyBlockAsm8B:
- MOVB (R8)(R10*1), R9
- CMPB (BX)(R10*1), R9
+ MOVB (R9)(R11*1), R10
+ CMPB (SI)(R11*1), R10
JNE repeat_extend_forward_end_encodeSnappyBlockAsm8B
- LEAL 1(R10), R10
+ LEAL 1(R11), R11
repeat_extend_forward_end_encodeSnappyBlockAsm8B:
- ADDL R10, CX
- MOVL CX, BX
- SUBL SI, BX
- MOVL 16(SP), SI
+ ADDL R11, DX
+ MOVL DX, SI
+ SUBL DI, SI
+ MOVL 16(SP), DI
// emitCopy
two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm8B:
- CMPL BX, $0x40
+ CMPL SI, $0x40
JBE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm8B
- MOVB $0xee, (AX)
- MOVW SI, 1(AX)
- LEAL -60(BX), BX
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW DI, 1(CX)
+ LEAL -60(SI), SI
+ ADDQ $0x03, CX
JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm8B
two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm8B:
- MOVL BX, DI
- SHLL $0x02, DI
- CMPL BX, $0x0c
+ MOVL SI, R8
+ SHLL $0x02, R8
+ CMPL SI, $0x0c
JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm8B
- LEAL -15(DI), DI
- MOVB SI, 1(AX)
- SHRL $0x08, SI
- SHLL $0x05, SI
- ORL SI, DI
- MOVB DI, (AX)
- ADDQ $0x02, AX
+ LEAL -15(R8), R8
+ MOVB DI, 1(CX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R8
+ MOVB R8, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeSnappyBlockAsm8B
emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm8B:
- LEAL -2(DI), DI
- MOVB DI, (AX)
- MOVW SI, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(R8), R8
+ MOVB R8, (CX)
+ MOVW DI, 1(CX)
+ ADDQ $0x03, CX
repeat_end_emit_encodeSnappyBlockAsm8B:
- MOVL CX, 12(SP)
+ MOVL DX, 12(SP)
JMP search_loop_encodeSnappyBlockAsm8B
no_repeat_found_encodeSnappyBlockAsm8B:
- CMPL (DX)(BX*1), SI
+ CMPL (BX)(SI*1), DI
JEQ candidate_match_encodeSnappyBlockAsm8B
- SHRQ $0x08, SI
- MOVL 24(SP)(R9*4), BX
- LEAL 2(CX), R8
- CMPL (DX)(DI*1), SI
+ SHRQ $0x08, DI
+ MOVL (AX)(R10*4), SI
+ LEAL 2(DX), R9
+ CMPL (BX)(R8*1), DI
JEQ candidate2_match_encodeSnappyBlockAsm8B
- MOVL R8, 24(SP)(R9*4)
- SHRQ $0x08, SI
- CMPL (DX)(BX*1), SI
+ MOVL R9, (AX)(R10*4)
+ SHRQ $0x08, DI
+ CMPL (BX)(SI*1), DI
JEQ candidate3_match_encodeSnappyBlockAsm8B
- MOVL 20(SP), CX
+ MOVL 20(SP), DX
JMP search_loop_encodeSnappyBlockAsm8B
candidate3_match_encodeSnappyBlockAsm8B:
- ADDL $0x02, CX
+ ADDL $0x02, DX
JMP candidate_match_encodeSnappyBlockAsm8B
candidate2_match_encodeSnappyBlockAsm8B:
- MOVL R8, 24(SP)(R9*4)
- INCL CX
- MOVL DI, BX
+ MOVL R9, (AX)(R10*4)
+ INCL DX
+ MOVL R8, SI
candidate_match_encodeSnappyBlockAsm8B:
- MOVL 12(SP), SI
- TESTL BX, BX
+ MOVL 12(SP), DI
+ TESTL SI, SI
JZ match_extend_back_end_encodeSnappyBlockAsm8B
match_extend_back_loop_encodeSnappyBlockAsm8B:
- CMPL CX, SI
+ CMPL DX, DI
JBE match_extend_back_end_encodeSnappyBlockAsm8B
- MOVB -1(DX)(BX*1), DI
- MOVB -1(DX)(CX*1), R8
- CMPB DI, R8
+ MOVB -1(BX)(SI*1), R8
+ MOVB -1(BX)(DX*1), R9
+ CMPB R8, R9
JNE match_extend_back_end_encodeSnappyBlockAsm8B
- LEAL -1(CX), CX
- DECL BX
+ LEAL -1(DX), DX
+ DECL SI
JZ match_extend_back_end_encodeSnappyBlockAsm8B
JMP match_extend_back_loop_encodeSnappyBlockAsm8B
match_extend_back_end_encodeSnappyBlockAsm8B:
- MOVL CX, SI
- SUBL 12(SP), SI
- LEAQ 3(AX)(SI*1), SI
- CMPQ SI, (SP)
+ MOVL DX, DI
+ SUBL 12(SP), DI
+ LEAQ 3(CX)(DI*1), DI
+ CMPQ DI, (SP)
JB match_dst_size_check_encodeSnappyBlockAsm8B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_dst_size_check_encodeSnappyBlockAsm8B:
- MOVL CX, SI
- MOVL 12(SP), DI
- CMPL DI, SI
+ MOVL DX, DI
+ MOVL 12(SP), R8
+ CMPL R8, DI
JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm8B
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(DI*1), SI
- SUBL DI, R8
- LEAL -1(R8), DI
- CMPL DI, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(R8*1), DI
+ SUBL R8, R9
+ LEAL -1(R9), R8
+ CMPL R8, $0x3c
JB one_byte_match_emit_encodeSnappyBlockAsm8B
- CMPL DI, $0x00000100
+ CMPL R8, $0x00000100
JB two_bytes_match_emit_encodeSnappyBlockAsm8B
JB three_bytes_match_emit_encodeSnappyBlockAsm8B
three_bytes_match_emit_encodeSnappyBlockAsm8B:
- MOVB $0xf4, (AX)
- MOVW DI, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW R8, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_match_emit_encodeSnappyBlockAsm8B
two_bytes_match_emit_encodeSnappyBlockAsm8B:
- MOVB $0xf0, (AX)
- MOVB DI, 1(AX)
- ADDQ $0x02, AX
- CMPL DI, $0x40
+ MOVB $0xf0, (CX)
+ MOVB R8, 1(CX)
+ ADDQ $0x02, CX
+ CMPL R8, $0x40
JB memmove_match_emit_encodeSnappyBlockAsm8B
JMP memmove_long_match_emit_encodeSnappyBlockAsm8B
one_byte_match_emit_encodeSnappyBlockAsm8B:
- SHLB $0x02, DI
- MOVB DI, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, R8
+ MOVB R8, (CX)
+ ADDQ $0x01, CX
memmove_match_emit_encodeSnappyBlockAsm8B:
- LEAQ (AX)(R8*1), DI
+ LEAQ (CX)(R9*1), R8
// genMemMoveShort
- CMPQ R8, $0x08
+ CMPQ R9, $0x08
JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_8
- CMPQ R8, $0x10
+ CMPQ R9, $0x10
JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ R9, $0x20
JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_33through64
emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_8:
- MOVQ (SI), R9
- MOVQ R9, (AX)
+ MOVQ (DI), R10
+ MOVQ R10, (CX)
JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm8B
emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_8through16:
- MOVQ (SI), R9
- MOVQ -8(SI)(R8*1), SI
- MOVQ R9, (AX)
- MOVQ SI, -8(AX)(R8*1)
+ MOVQ (DI), R10
+ MOVQ -8(DI)(R9*1), DI
+ MOVQ R10, (CX)
+ MOVQ DI, -8(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm8B
emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_17through32:
- MOVOU (SI), X0
- MOVOU -16(SI)(R8*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU (DI), X0
+ MOVOU -16(DI)(R9*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm8B
emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_33through64:
- MOVOU (SI), X0
- MOVOU 16(SI), X1
- MOVOU -32(SI)(R8*1), X2
- MOVOU -16(SI)(R8*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU (DI), X0
+ MOVOU 16(DI), X1
+ MOVOU -32(DI)(R9*1), X2
+ MOVOU -16(DI)(R9*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
memmove_end_copy_match_emit_encodeSnappyBlockAsm8B:
- MOVQ DI, AX
+ MOVQ R8, CX
JMP emit_literal_done_match_emit_encodeSnappyBlockAsm8B
memmove_long_match_emit_encodeSnappyBlockAsm8B:
- LEAQ (AX)(R8*1), DI
+ LEAQ (CX)(R9*1), R8
// genMemMoveLong
- MOVOU (SI), X0
- MOVOU 16(SI), X1
- MOVOU -32(SI)(R8*1), X2
- MOVOU -16(SI)(R8*1), X3
- MOVQ R8, R10
- SHRQ $0x05, R10
- MOVQ AX, R9
- ANDL $0x0000001f, R9
- MOVQ $0x00000040, R11
- SUBQ R9, R11
- DECQ R10
+ MOVOU (DI), X0
+ MOVOU 16(DI), X1
+ MOVOU -32(DI)(R9*1), X2
+ MOVOU -16(DI)(R9*1), X3
+ MOVQ R9, R11
+ SHRQ $0x05, R11
+ MOVQ CX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R12
+ SUBQ R10, R12
+ DECQ R11
JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32
- LEAQ -32(SI)(R11*1), R9
- LEAQ -32(AX)(R11*1), R12
+ LEAQ -32(DI)(R12*1), R10
+ LEAQ -32(CX)(R12*1), R13
emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_big_loop_back:
- MOVOU (R9), X4
- MOVOU 16(R9), X5
- MOVOA X4, (R12)
- MOVOA X5, 16(R12)
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R13)
+ MOVOA X5, 16(R13)
+ ADDQ $0x20, R13
+ ADDQ $0x20, R10
ADDQ $0x20, R12
- ADDQ $0x20, R9
- ADDQ $0x20, R11
- DECQ R10
+ DECQ R11
JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_big_loop_back
emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32:
- MOVOU -32(SI)(R11*1), X4
- MOVOU -16(SI)(R11*1), X5
- MOVOA X4, -32(AX)(R11*1)
- MOVOA X5, -16(AX)(R11*1)
- ADDQ $0x20, R11
- CMPQ R8, R11
+ MOVOU -32(DI)(R12*1), X4
+ MOVOU -16(DI)(R12*1), X5
+ MOVOA X4, -32(CX)(R12*1)
+ MOVOA X5, -16(CX)(R12*1)
+ ADDQ $0x20, R12
+ CMPQ R9, R12
JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ DI, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
+ MOVQ R8, CX
emit_literal_done_match_emit_encodeSnappyBlockAsm8B:
match_nolit_loop_encodeSnappyBlockAsm8B:
- MOVL CX, SI
- SUBL BX, SI
- MOVL SI, 16(SP)
- ADDL $0x04, CX
- ADDL $0x04, BX
- MOVQ src_len+32(FP), SI
- SUBL CX, SI
- LEAQ (DX)(CX*1), DI
- LEAQ (DX)(BX*1), BX
+ MOVL DX, DI
+ SUBL SI, DI
+ MOVL DI, 16(SP)
+ ADDL $0x04, DX
+ ADDL $0x04, SI
+ MOVQ src_len+32(FP), DI
+ SUBL DX, DI
+ LEAQ (BX)(DX*1), R8
+ LEAQ (BX)(SI*1), SI
// matchLen
- XORL R9, R9
+ XORL R10, R10
matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm8B:
- CMPL SI, $0x10
+ CMPL DI, $0x10
JB matchlen_match8_match_nolit_encodeSnappyBlockAsm8B
- MOVQ (DI)(R9*1), R8
- MOVQ 8(DI)(R9*1), R10
- XORQ (BX)(R9*1), R8
+ MOVQ (R8)(R10*1), R9
+ MOVQ 8(R8)(R10*1), R11
+ XORQ (SI)(R10*1), R9
JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm8B
- XORQ 8(BX)(R9*1), R10
+ XORQ 8(SI)(R10*1), R11
JNZ matchlen_bsf_16match_nolit_encodeSnappyBlockAsm8B
- LEAL -16(SI), SI
- LEAL 16(R9), R9
+ LEAL -16(DI), DI
+ LEAL 16(R10), R10
JMP matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm8B
matchlen_bsf_16match_nolit_encodeSnappyBlockAsm8B:
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R11, R11
#else
- BSFQ R10, R10
+ BSFQ R11, R11
#endif
- SARQ $0x03, R10
- LEAL 8(R9)(R10*1), R9
+ SARQ $0x03, R11
+ LEAL 8(R10)(R11*1), R10
JMP match_nolit_end_encodeSnappyBlockAsm8B
matchlen_match8_match_nolit_encodeSnappyBlockAsm8B:
- CMPL SI, $0x08
+ CMPL DI, $0x08
JB matchlen_match4_match_nolit_encodeSnappyBlockAsm8B
- MOVQ (DI)(R9*1), R8
- XORQ (BX)(R9*1), R8
+ MOVQ (R8)(R10*1), R9
+ XORQ (SI)(R10*1), R9
JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm8B
- LEAL -8(SI), SI
- LEAL 8(R9), R9
+ LEAL -8(DI), DI
+ LEAL 8(R10), R10
JMP matchlen_match4_match_nolit_encodeSnappyBlockAsm8B
matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm8B:
#ifdef GOAMD64_v3
- TZCNTQ R8, R8
+ TZCNTQ R9, R9
#else
- BSFQ R8, R8
+ BSFQ R9, R9
#endif
- SARQ $0x03, R8
- LEAL (R9)(R8*1), R9
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
JMP match_nolit_end_encodeSnappyBlockAsm8B
matchlen_match4_match_nolit_encodeSnappyBlockAsm8B:
- CMPL SI, $0x04
+ CMPL DI, $0x04
JB matchlen_match2_match_nolit_encodeSnappyBlockAsm8B
- MOVL (DI)(R9*1), R8
- CMPL (BX)(R9*1), R8
+ MOVL (R8)(R10*1), R9
+ CMPL (SI)(R10*1), R9
JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm8B
- LEAL -4(SI), SI
- LEAL 4(R9), R9
+ LEAL -4(DI), DI
+ LEAL 4(R10), R10
matchlen_match2_match_nolit_encodeSnappyBlockAsm8B:
- CMPL SI, $0x01
+ CMPL DI, $0x01
JE matchlen_match1_match_nolit_encodeSnappyBlockAsm8B
JB match_nolit_end_encodeSnappyBlockAsm8B
- MOVW (DI)(R9*1), R8
- CMPW (BX)(R9*1), R8
+ MOVW (R8)(R10*1), R9
+ CMPW (SI)(R10*1), R9
JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm8B
- LEAL 2(R9), R9
- SUBL $0x02, SI
+ LEAL 2(R10), R10
+ SUBL $0x02, DI
JZ match_nolit_end_encodeSnappyBlockAsm8B
matchlen_match1_match_nolit_encodeSnappyBlockAsm8B:
- MOVB (DI)(R9*1), R8
- CMPB (BX)(R9*1), R8
+ MOVB (R8)(R10*1), R9
+ CMPB (SI)(R10*1), R9
JNE match_nolit_end_encodeSnappyBlockAsm8B
- LEAL 1(R9), R9
+ LEAL 1(R10), R10
match_nolit_end_encodeSnappyBlockAsm8B:
- ADDL R9, CX
- MOVL 16(SP), BX
- ADDL $0x04, R9
- MOVL CX, 12(SP)
+ ADDL R10, DX
+ MOVL 16(SP), SI
+ ADDL $0x04, R10
+ MOVL DX, 12(SP)
// emitCopy
two_byte_offset_match_nolit_encodeSnappyBlockAsm8B:
- CMPL R9, $0x40
+ CMPL R10, $0x40
JBE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm8B
- MOVB $0xee, (AX)
- MOVW BX, 1(AX)
- LEAL -60(R9), R9
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW SI, 1(CX)
+ LEAL -60(R10), R10
+ ADDQ $0x03, CX
JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm8B
two_byte_offset_short_match_nolit_encodeSnappyBlockAsm8B:
- MOVL R9, SI
- SHLL $0x02, SI
- CMPL R9, $0x0c
+ MOVL R10, DI
+ SHLL $0x02, DI
+ CMPL R10, $0x0c
JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm8B
- LEAL -15(SI), SI
- MOVB BL, 1(AX)
- SHRL $0x08, BX
- SHLL $0x05, BX
- ORL BX, SI
- MOVB SI, (AX)
- ADDQ $0x02, AX
+ LEAL -15(DI), DI
+ MOVB SI, 1(CX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm8B
emit_copy_three_match_nolit_encodeSnappyBlockAsm8B:
- LEAL -2(SI), SI
- MOVB SI, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(DI), DI
+ MOVB DI, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
match_nolit_emitcopy_end_encodeSnappyBlockAsm8B:
- CMPL CX, 8(SP)
+ CMPL DX, 8(SP)
JAE emit_remainder_encodeSnappyBlockAsm8B
- MOVQ -2(DX)(CX*1), SI
- CMPQ AX, (SP)
+ MOVQ -2(BX)(DX*1), DI
+ CMPQ CX, (SP)
JB match_nolit_dst_ok_encodeSnappyBlockAsm8B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_nolit_dst_ok_encodeSnappyBlockAsm8B:
- MOVQ $0x9e3779b1, R8
- MOVQ SI, DI
- SHRQ $0x10, SI
- MOVQ SI, BX
- SHLQ $0x20, DI
- IMULQ R8, DI
- SHRQ $0x38, DI
- SHLQ $0x20, BX
- IMULQ R8, BX
- SHRQ $0x38, BX
- LEAL -2(CX), R8
- LEAQ 24(SP)(BX*4), R9
- MOVL (R9), BX
- MOVL R8, 24(SP)(DI*4)
- MOVL CX, (R9)
- CMPL (DX)(BX*1), SI
+ MOVQ $0x9e3779b1, R9
+ MOVQ DI, R8
+ SHRQ $0x10, DI
+ MOVQ DI, SI
+ SHLQ $0x20, R8
+ IMULQ R9, R8
+ SHRQ $0x38, R8
+ SHLQ $0x20, SI
+ IMULQ R9, SI
+ SHRQ $0x38, SI
+ LEAL -2(DX), R9
+ LEAQ (AX)(SI*4), R10
+ MOVL (R10), SI
+ MOVL R9, (AX)(R8*4)
+ MOVL DX, (R10)
+ CMPL (BX)(SI*1), DI
JEQ match_nolit_loop_encodeSnappyBlockAsm8B
- INCL CX
+ INCL DX
JMP search_loop_encodeSnappyBlockAsm8B
emit_remainder_encodeSnappyBlockAsm8B:
- MOVQ src_len+32(FP), CX
- SUBL 12(SP), CX
- LEAQ 3(AX)(CX*1), CX
- CMPQ CX, (SP)
+ MOVQ src_len+32(FP), AX
+ SUBL 12(SP), AX
+ LEAQ 3(CX)(AX*1), AX
+ CMPQ AX, (SP)
JB emit_remainder_ok_encodeSnappyBlockAsm8B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
emit_remainder_ok_encodeSnappyBlockAsm8B:
- MOVQ src_len+32(FP), CX
- MOVL 12(SP), BX
- CMPL BX, CX
+ MOVQ src_len+32(FP), AX
+ MOVL 12(SP), DX
+ CMPL DX, AX
JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm8B
- MOVL CX, SI
- MOVL CX, 12(SP)
- LEAQ (DX)(BX*1), CX
- SUBL BX, SI
+ MOVL AX, SI
+ MOVL AX, 12(SP)
+ LEAQ (BX)(DX*1), AX
+ SUBL DX, SI
LEAL -1(SI), DX
CMPL DX, $0x3c
JB one_byte_emit_remainder_encodeSnappyBlockAsm8B
@@ -14847,26 +14862,26 @@ emit_remainder_ok_encodeSnappyBlockAsm8B:
JB three_bytes_emit_remainder_encodeSnappyBlockAsm8B
three_bytes_emit_remainder_encodeSnappyBlockAsm8B:
- MOVB $0xf4, (AX)
- MOVW DX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW DX, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_emit_remainder_encodeSnappyBlockAsm8B
two_bytes_emit_remainder_encodeSnappyBlockAsm8B:
- MOVB $0xf0, (AX)
- MOVB DL, 1(AX)
- ADDQ $0x02, AX
+ MOVB $0xf0, (CX)
+ MOVB DL, 1(CX)
+ ADDQ $0x02, CX
CMPL DX, $0x40
JB memmove_emit_remainder_encodeSnappyBlockAsm8B
JMP memmove_long_emit_remainder_encodeSnappyBlockAsm8B
one_byte_emit_remainder_encodeSnappyBlockAsm8B:
SHLB $0x02, DL
- MOVB DL, (AX)
- ADDQ $0x01, AX
+ MOVB DL, (CX)
+ ADDQ $0x01, CX
memmove_emit_remainder_encodeSnappyBlockAsm8B:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveShort
@@ -14882,73 +14897,73 @@ memmove_emit_remainder_encodeSnappyBlockAsm8B:
JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_33through64
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_1or2:
- MOVB (CX), SI
- MOVB -1(CX)(BX*1), CL
- MOVB SI, (AX)
- MOVB CL, -1(AX)(BX*1)
+ MOVB (AX), SI
+ MOVB -1(AX)(BX*1), AL
+ MOVB SI, (CX)
+ MOVB AL, -1(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_3:
- MOVW (CX), SI
- MOVB 2(CX), CL
- MOVW SI, (AX)
- MOVB CL, 2(AX)
+ MOVW (AX), SI
+ MOVB 2(AX), AL
+ MOVW SI, (CX)
+ MOVB AL, 2(CX)
JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_4through7:
- MOVL (CX), SI
- MOVL -4(CX)(BX*1), CX
- MOVL SI, (AX)
- MOVL CX, -4(AX)(BX*1)
+ MOVL (AX), SI
+ MOVL -4(AX)(BX*1), AX
+ MOVL SI, (CX)
+ MOVL AX, -4(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_8through16:
- MOVQ (CX), SI
- MOVQ -8(CX)(BX*1), CX
- MOVQ SI, (AX)
- MOVQ CX, -8(AX)(BX*1)
+ MOVQ (AX), SI
+ MOVQ -8(AX)(BX*1), AX
+ MOVQ SI, (CX)
+ MOVQ AX, -8(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_17through32:
- MOVOU (CX), X0
- MOVOU -16(CX)(BX*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU -16(AX)(BX*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_33through64:
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B:
- MOVQ DX, AX
+ MOVQ DX, CX
JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm8B
memmove_long_emit_remainder_encodeSnappyBlockAsm8B:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveLong
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
MOVQ BX, DI
SHRQ $0x05, DI
- MOVQ AX, SI
+ MOVQ CX, SI
ANDL $0x0000001f, SI
MOVQ $0x00000040, R8
SUBQ SI, R8
DECQ DI
JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32
- LEAQ -32(CX)(R8*1), SI
- LEAQ -32(AX)(R8*1), R9
+ LEAQ -32(AX)(R8*1), SI
+ LEAQ -32(CX)(R8*1), R9
emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_big_loop_back:
MOVOU (SI), X4
@@ -14962,520 +14977,521 @@ emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_big_loop_back:
JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_big_loop_back
emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32:
- MOVOU -32(CX)(R8*1), X4
- MOVOU -16(CX)(R8*1), X5
- MOVOA X4, -32(AX)(R8*1)
- MOVOA X5, -16(AX)(R8*1)
+ MOVOU -32(AX)(R8*1), X4
+ MOVOU -16(AX)(R8*1), X5
+ MOVOA X4, -32(CX)(R8*1)
+ MOVOA X5, -16(CX)(R8*1)
ADDQ $0x20, R8
CMPQ BX, R8
JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
- MOVQ DX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
+ MOVQ DX, CX
emit_literal_done_emit_remainder_encodeSnappyBlockAsm8B:
- MOVQ dst_base+0(FP), CX
- SUBQ CX, AX
- MOVQ AX, ret+48(FP)
+ MOVQ dst_base+0(FP), AX
+ SUBQ AX, CX
+ MOVQ CX, ret+56(FP)
RET
-// func encodeSnappyBetterBlockAsm(dst []byte, src []byte) int
+// func encodeSnappyBetterBlockAsm(dst []byte, src []byte, tmp *[589824]byte) int
// Requires: BMI, SSE2
-TEXT ·encodeSnappyBetterBlockAsm(SB), $589848-56
- MOVQ dst_base+0(FP), AX
- MOVQ $0x00001200, CX
- LEAQ 24(SP), DX
+TEXT ·encodeSnappyBetterBlockAsm(SB), $24-64
+ MOVQ tmp+48(FP), AX
+ MOVQ dst_base+0(FP), CX
+ MOVQ $0x00001200, DX
+ MOVQ AX, BX
PXOR X0, X0
zero_loop_encodeSnappyBetterBlockAsm:
- MOVOU X0, (DX)
- MOVOU X0, 16(DX)
- MOVOU X0, 32(DX)
- MOVOU X0, 48(DX)
- MOVOU X0, 64(DX)
- MOVOU X0, 80(DX)
- MOVOU X0, 96(DX)
- MOVOU X0, 112(DX)
- ADDQ $0x80, DX
- DECQ CX
+ MOVOU X0, (BX)
+ MOVOU X0, 16(BX)
+ MOVOU X0, 32(BX)
+ MOVOU X0, 48(BX)
+ MOVOU X0, 64(BX)
+ MOVOU X0, 80(BX)
+ MOVOU X0, 96(BX)
+ MOVOU X0, 112(BX)
+ ADDQ $0x80, BX
+ DECQ DX
JNZ zero_loop_encodeSnappyBetterBlockAsm
MOVL $0x00000000, 12(SP)
- MOVQ src_len+32(FP), CX
- LEAQ -9(CX), DX
- LEAQ -8(CX), BX
- MOVL BX, 8(SP)
- SHRQ $0x05, CX
- SUBL CX, DX
- LEAQ (AX)(DX*1), DX
- MOVQ DX, (SP)
- MOVL $0x00000001, CX
+ MOVQ src_len+32(FP), DX
+ LEAQ -9(DX), BX
+ LEAQ -8(DX), SI
+ MOVL SI, 8(SP)
+ SHRQ $0x05, DX
+ SUBL DX, BX
+ LEAQ (CX)(BX*1), BX
+ MOVQ BX, (SP)
+ MOVL $0x00000001, DX
MOVL $0x00000000, 16(SP)
- MOVQ src_base+24(FP), DX
+ MOVQ src_base+24(FP), BX
search_loop_encodeSnappyBetterBlockAsm:
- MOVL CX, BX
- SUBL 12(SP), BX
- SHRL $0x07, BX
- CMPL BX, $0x63
+ MOVL DX, SI
+ SUBL 12(SP), SI
+ SHRL $0x07, SI
+ CMPL SI, $0x63
JBE check_maxskip_ok_encodeSnappyBetterBlockAsm
- LEAL 100(CX), BX
+ LEAL 100(DX), SI
JMP check_maxskip_cont_encodeSnappyBetterBlockAsm
check_maxskip_ok_encodeSnappyBetterBlockAsm:
- LEAL 1(CX)(BX*1), BX
+ LEAL 1(DX)(SI*1), SI
check_maxskip_cont_encodeSnappyBetterBlockAsm:
- CMPL BX, 8(SP)
+ CMPL SI, 8(SP)
JAE emit_remainder_encodeSnappyBetterBlockAsm
- MOVQ (DX)(CX*1), SI
- MOVL BX, 20(SP)
- MOVQ $0x00cf1bbcdcbfa563, R8
- MOVQ $0x9e3779b1, BX
- MOVQ SI, R9
- MOVQ SI, R10
- SHLQ $0x08, R9
- IMULQ R8, R9
- SHRQ $0x2f, R9
- SHLQ $0x20, R10
- IMULQ BX, R10
- SHRQ $0x32, R10
- MOVL 24(SP)(R9*4), BX
- MOVL 524312(SP)(R10*4), DI
- MOVL CX, 24(SP)(R9*4)
- MOVL CX, 524312(SP)(R10*4)
- MOVQ (DX)(BX*1), R9
- MOVQ (DX)(DI*1), R10
- CMPQ R9, SI
+ MOVQ (BX)(DX*1), DI
+ MOVL SI, 20(SP)
+ MOVQ $0x00cf1bbcdcbfa563, R9
+ MOVQ $0x9e3779b1, SI
+ MOVQ DI, R10
+ MOVQ DI, R11
+ SHLQ $0x08, R10
+ IMULQ R9, R10
+ SHRQ $0x2f, R10
+ SHLQ $0x20, R11
+ IMULQ SI, R11
+ SHRQ $0x32, R11
+ MOVL (AX)(R10*4), SI
+ MOVL 524288(AX)(R11*4), R8
+ MOVL DX, (AX)(R10*4)
+ MOVL DX, 524288(AX)(R11*4)
+ MOVQ (BX)(SI*1), R10
+ MOVQ (BX)(R8*1), R11
+ CMPQ R10, DI
JEQ candidate_match_encodeSnappyBetterBlockAsm
- CMPQ R10, SI
+ CMPQ R11, DI
JNE no_short_found_encodeSnappyBetterBlockAsm
- MOVL DI, BX
+ MOVL R8, SI
JMP candidate_match_encodeSnappyBetterBlockAsm
no_short_found_encodeSnappyBetterBlockAsm:
- CMPL R9, SI
+ CMPL R10, DI
JEQ candidate_match_encodeSnappyBetterBlockAsm
- CMPL R10, SI
+ CMPL R11, DI
JEQ candidateS_match_encodeSnappyBetterBlockAsm
- MOVL 20(SP), CX
+ MOVL 20(SP), DX
JMP search_loop_encodeSnappyBetterBlockAsm
candidateS_match_encodeSnappyBetterBlockAsm:
- SHRQ $0x08, SI
- MOVQ SI, R9
- SHLQ $0x08, R9
- IMULQ R8, R9
- SHRQ $0x2f, R9
- MOVL 24(SP)(R9*4), BX
- INCL CX
- MOVL CX, 24(SP)(R9*4)
- CMPL (DX)(BX*1), SI
+ SHRQ $0x08, DI
+ MOVQ DI, R10
+ SHLQ $0x08, R10
+ IMULQ R9, R10
+ SHRQ $0x2f, R10
+ MOVL (AX)(R10*4), SI
+ INCL DX
+ MOVL DX, (AX)(R10*4)
+ CMPL (BX)(SI*1), DI
JEQ candidate_match_encodeSnappyBetterBlockAsm
- DECL CX
- MOVL DI, BX
+ DECL DX
+ MOVL R8, SI
candidate_match_encodeSnappyBetterBlockAsm:
- MOVL 12(SP), SI
- TESTL BX, BX
+ MOVL 12(SP), DI
+ TESTL SI, SI
JZ match_extend_back_end_encodeSnappyBetterBlockAsm
match_extend_back_loop_encodeSnappyBetterBlockAsm:
- CMPL CX, SI
+ CMPL DX, DI
JBE match_extend_back_end_encodeSnappyBetterBlockAsm
- MOVB -1(DX)(BX*1), DI
- MOVB -1(DX)(CX*1), R8
- CMPB DI, R8
+ MOVB -1(BX)(SI*1), R8
+ MOVB -1(BX)(DX*1), R9
+ CMPB R8, R9
JNE match_extend_back_end_encodeSnappyBetterBlockAsm
- LEAL -1(CX), CX
- DECL BX
+ LEAL -1(DX), DX
+ DECL SI
JZ match_extend_back_end_encodeSnappyBetterBlockAsm
JMP match_extend_back_loop_encodeSnappyBetterBlockAsm
match_extend_back_end_encodeSnappyBetterBlockAsm:
- MOVL CX, SI
- SUBL 12(SP), SI
- LEAQ 5(AX)(SI*1), SI
- CMPQ SI, (SP)
+ MOVL DX, DI
+ SUBL 12(SP), DI
+ LEAQ 5(CX)(DI*1), DI
+ CMPQ DI, (SP)
JB match_dst_size_check_encodeSnappyBetterBlockAsm
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_dst_size_check_encodeSnappyBetterBlockAsm:
- MOVL CX, SI
- ADDL $0x04, CX
- ADDL $0x04, BX
- MOVQ src_len+32(FP), DI
- SUBL CX, DI
- LEAQ (DX)(CX*1), R8
- LEAQ (DX)(BX*1), R9
+ MOVL DX, DI
+ ADDL $0x04, DX
+ ADDL $0x04, SI
+ MOVQ src_len+32(FP), R8
+ SUBL DX, R8
+ LEAQ (BX)(DX*1), R9
+ LEAQ (BX)(SI*1), R10
// matchLen
- XORL R11, R11
+ XORL R12, R12
matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm:
- CMPL DI, $0x10
+ CMPL R8, $0x10
JB matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm
- MOVQ (R8)(R11*1), R10
- MOVQ 8(R8)(R11*1), R12
- XORQ (R9)(R11*1), R10
+ MOVQ (R9)(R12*1), R11
+ MOVQ 8(R9)(R12*1), R13
+ XORQ (R10)(R12*1), R11
JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm
- XORQ 8(R9)(R11*1), R12
+ XORQ 8(R10)(R12*1), R13
JNZ matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm
- LEAL -16(DI), DI
- LEAL 16(R11), R11
+ LEAL -16(R8), R8
+ LEAL 16(R12), R12
JMP matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm
matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm:
#ifdef GOAMD64_v3
- TZCNTQ R12, R12
+ TZCNTQ R13, R13
#else
- BSFQ R12, R12
+ BSFQ R13, R13
#endif
- SARQ $0x03, R12
- LEAL 8(R11)(R12*1), R11
+ SARQ $0x03, R13
+ LEAL 8(R12)(R13*1), R12
JMP match_nolit_end_encodeSnappyBetterBlockAsm
matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm:
- CMPL DI, $0x08
+ CMPL R8, $0x08
JB matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm
- MOVQ (R8)(R11*1), R10
- XORQ (R9)(R11*1), R10
+ MOVQ (R9)(R12*1), R11
+ XORQ (R10)(R12*1), R11
JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm
- LEAL -8(DI), DI
- LEAL 8(R11), R11
+ LEAL -8(R8), R8
+ LEAL 8(R12), R12
JMP matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm
matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm:
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R11, R11
#else
- BSFQ R10, R10
+ BSFQ R11, R11
#endif
- SARQ $0x03, R10
- LEAL (R11)(R10*1), R11
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
JMP match_nolit_end_encodeSnappyBetterBlockAsm
matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm:
- CMPL DI, $0x04
+ CMPL R8, $0x04
JB matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm
- MOVL (R8)(R11*1), R10
- CMPL (R9)(R11*1), R10
+ MOVL (R9)(R12*1), R11
+ CMPL (R10)(R12*1), R11
JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm
- LEAL -4(DI), DI
- LEAL 4(R11), R11
+ LEAL -4(R8), R8
+ LEAL 4(R12), R12
matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm:
- CMPL DI, $0x01
+ CMPL R8, $0x01
JE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm
JB match_nolit_end_encodeSnappyBetterBlockAsm
- MOVW (R8)(R11*1), R10
- CMPW (R9)(R11*1), R10
+ MOVW (R9)(R12*1), R11
+ CMPW (R10)(R12*1), R11
JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm
- LEAL 2(R11), R11
- SUBL $0x02, DI
+ LEAL 2(R12), R12
+ SUBL $0x02, R8
JZ match_nolit_end_encodeSnappyBetterBlockAsm
matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm:
- MOVB (R8)(R11*1), R10
- CMPB (R9)(R11*1), R10
+ MOVB (R9)(R12*1), R11
+ CMPB (R10)(R12*1), R11
JNE match_nolit_end_encodeSnappyBetterBlockAsm
- LEAL 1(R11), R11
+ LEAL 1(R12), R12
match_nolit_end_encodeSnappyBetterBlockAsm:
- MOVL CX, DI
- SUBL BX, DI
+ MOVL DX, R8
+ SUBL SI, R8
// Check if repeat
- CMPL R11, $0x01
+ CMPL R12, $0x01
JA match_length_ok_encodeSnappyBetterBlockAsm
- CMPL DI, $0x0000ffff
+ CMPL R8, $0x0000ffff
JBE match_length_ok_encodeSnappyBetterBlockAsm
- MOVL 20(SP), CX
- INCL CX
+ MOVL 20(SP), DX
+ INCL DX
JMP search_loop_encodeSnappyBetterBlockAsm
match_length_ok_encodeSnappyBetterBlockAsm:
- MOVL DI, 16(SP)
- MOVL 12(SP), BX
- CMPL BX, SI
+ MOVL R8, 16(SP)
+ MOVL 12(SP), SI
+ CMPL SI, DI
JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(BX*1), R9
- SUBL BX, R8
- LEAL -1(R8), BX
- CMPL BX, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(SI*1), R10
+ SUBL SI, R9
+ LEAL -1(R9), SI
+ CMPL SI, $0x3c
JB one_byte_match_emit_encodeSnappyBetterBlockAsm
- CMPL BX, $0x00000100
+ CMPL SI, $0x00000100
JB two_bytes_match_emit_encodeSnappyBetterBlockAsm
- CMPL BX, $0x00010000
+ CMPL SI, $0x00010000
JB three_bytes_match_emit_encodeSnappyBetterBlockAsm
- CMPL BX, $0x01000000
+ CMPL SI, $0x01000000
JB four_bytes_match_emit_encodeSnappyBetterBlockAsm
- MOVB $0xfc, (AX)
- MOVL BX, 1(AX)
- ADDQ $0x05, AX
+ MOVB $0xfc, (CX)
+ MOVL SI, 1(CX)
+ ADDQ $0x05, CX
JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm
four_bytes_match_emit_encodeSnappyBetterBlockAsm:
- MOVL BX, R10
- SHRL $0x10, R10
- MOVB $0xf8, (AX)
- MOVW BX, 1(AX)
- MOVB R10, 3(AX)
- ADDQ $0x04, AX
+ MOVL SI, R11
+ SHRL $0x10, R11
+ MOVB $0xf8, (CX)
+ MOVW SI, 1(CX)
+ MOVB R11, 3(CX)
+ ADDQ $0x04, CX
JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm
three_bytes_match_emit_encodeSnappyBetterBlockAsm:
- MOVB $0xf4, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm
two_bytes_match_emit_encodeSnappyBetterBlockAsm:
- MOVB $0xf0, (AX)
- MOVB BL, 1(AX)
- ADDQ $0x02, AX
- CMPL BX, $0x40
+ MOVB $0xf0, (CX)
+ MOVB SI, 1(CX)
+ ADDQ $0x02, CX
+ CMPL SI, $0x40
JB memmove_match_emit_encodeSnappyBetterBlockAsm
JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm
one_byte_match_emit_encodeSnappyBetterBlockAsm:
- SHLB $0x02, BL
- MOVB BL, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, SI
+ MOVB SI, (CX)
+ ADDQ $0x01, CX
memmove_match_emit_encodeSnappyBetterBlockAsm:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveShort
- CMPQ R8, $0x08
+ CMPQ R9, $0x08
JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_8
- CMPQ R8, $0x10
+ CMPQ R9, $0x10
JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ R9, $0x20
JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_33through64
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_8:
- MOVQ (R9), R10
- MOVQ R10, (AX)
+ MOVQ (R10), R11
+ MOVQ R11, (CX)
JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_8through16:
- MOVQ (R9), R10
- MOVQ -8(R9)(R8*1), R9
- MOVQ R10, (AX)
- MOVQ R9, -8(AX)(R8*1)
+ MOVQ (R10), R11
+ MOVQ -8(R10)(R9*1), R10
+ MOVQ R11, (CX)
+ MOVQ R10, -8(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_17through32:
- MOVOU (R9), X0
- MOVOU -16(R9)(R8*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU -16(R10)(R9*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_33through64:
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm:
- MOVQ BX, AX
+ MOVQ SI, CX
JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm
memmove_long_match_emit_encodeSnappyBetterBlockAsm:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveLong
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVQ R8, R12
- SHRQ $0x05, R12
- MOVQ AX, R10
- ANDL $0x0000001f, R10
- MOVQ $0x00000040, R13
- SUBQ R10, R13
- DECQ R12
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVQ R9, R13
+ SHRQ $0x05, R13
+ MOVQ CX, R11
+ ANDL $0x0000001f, R11
+ MOVQ $0x00000040, R14
+ SUBQ R11, R14
+ DECQ R13
JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32
- LEAQ -32(R9)(R13*1), R10
- LEAQ -32(AX)(R13*1), R14
+ LEAQ -32(R10)(R14*1), R11
+ LEAQ -32(CX)(R14*1), R15
emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_big_loop_back:
- MOVOU (R10), X4
- MOVOU 16(R10), X5
- MOVOA X4, (R14)
- MOVOA X5, 16(R14)
+ MOVOU (R11), X4
+ MOVOU 16(R11), X5
+ MOVOA X4, (R15)
+ MOVOA X5, 16(R15)
+ ADDQ $0x20, R15
+ ADDQ $0x20, R11
ADDQ $0x20, R14
- ADDQ $0x20, R10
- ADDQ $0x20, R13
- DECQ R12
+ DECQ R13
JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_big_loop_back
emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32:
- MOVOU -32(R9)(R13*1), X4
- MOVOU -16(R9)(R13*1), X5
- MOVOA X4, -32(AX)(R13*1)
- MOVOA X5, -16(AX)(R13*1)
- ADDQ $0x20, R13
- CMPQ R8, R13
+ MOVOU -32(R10)(R14*1), X4
+ MOVOU -16(R10)(R14*1), X5
+ MOVOA X4, -32(CX)(R14*1)
+ MOVOA X5, -16(CX)(R14*1)
+ ADDQ $0x20, R14
+ CMPQ R9, R14
JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ BX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
+ MOVQ SI, CX
emit_literal_done_match_emit_encodeSnappyBetterBlockAsm:
- ADDL R11, CX
- ADDL $0x04, R11
- MOVL CX, 12(SP)
+ ADDL R12, DX
+ ADDL $0x04, R12
+ MOVL DX, 12(SP)
// emitCopy
- CMPL DI, $0x00010000
+ CMPL R8, $0x00010000
JB two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm
four_bytes_loop_back_match_nolit_encodeSnappyBetterBlockAsm:
- CMPL R11, $0x40
+ CMPL R12, $0x40
JBE four_bytes_remain_match_nolit_encodeSnappyBetterBlockAsm
- MOVB $0xff, (AX)
- MOVL DI, 1(AX)
- LEAL -64(R11), R11
- ADDQ $0x05, AX
- CMPL R11, $0x04
+ MOVB $0xff, (CX)
+ MOVL R8, 1(CX)
+ LEAL -64(R12), R12
+ ADDQ $0x05, CX
+ CMPL R12, $0x04
JB four_bytes_remain_match_nolit_encodeSnappyBetterBlockAsm
JMP four_bytes_loop_back_match_nolit_encodeSnappyBetterBlockAsm
four_bytes_remain_match_nolit_encodeSnappyBetterBlockAsm:
- TESTL R11, R11
+ TESTL R12, R12
JZ match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm
- XORL BX, BX
- LEAL -1(BX)(R11*4), R11
- MOVB R11, (AX)
- MOVL DI, 1(AX)
- ADDQ $0x05, AX
+ XORL SI, SI
+ LEAL -1(SI)(R12*4), R12
+ MOVB R12, (CX)
+ MOVL R8, 1(CX)
+ ADDQ $0x05, CX
JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm
two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm:
- CMPL R11, $0x40
+ CMPL R12, $0x40
JBE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm
- MOVB $0xee, (AX)
- MOVW DI, 1(AX)
- LEAL -60(R11), R11
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW R8, 1(CX)
+ LEAL -60(R12), R12
+ ADDQ $0x03, CX
JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm
two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm:
- MOVL R11, BX
- SHLL $0x02, BX
- CMPL R11, $0x0c
+ MOVL R12, SI
+ SHLL $0x02, SI
+ CMPL R12, $0x0c
JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm
- CMPL DI, $0x00000800
+ CMPL R8, $0x00000800
JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm
- LEAL -15(BX), BX
- MOVB DI, 1(AX)
- SHRL $0x08, DI
- SHLL $0x05, DI
- ORL DI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
+ LEAL -15(SI), SI
+ MOVB R8, 1(CX)
+ SHRL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm
emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm:
- LEAL -2(BX), BX
- MOVB BL, (AX)
- MOVW DI, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(SI), SI
+ MOVB SI, (CX)
+ MOVW R8, 1(CX)
+ ADDQ $0x03, CX
match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm:
- CMPL CX, 8(SP)
+ CMPL DX, 8(SP)
JAE emit_remainder_encodeSnappyBetterBlockAsm
- CMPQ AX, (SP)
+ CMPQ CX, (SP)
JB match_nolit_dst_ok_encodeSnappyBetterBlockAsm
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_nolit_dst_ok_encodeSnappyBetterBlockAsm:
- MOVQ $0x00cf1bbcdcbfa563, BX
- MOVQ $0x9e3779b1, DI
- LEAQ 1(SI), SI
- LEAQ -2(CX), R8
- MOVQ (DX)(SI*1), R9
- MOVQ 1(DX)(SI*1), R10
- MOVQ (DX)(R8*1), R11
- MOVQ 1(DX)(R8*1), R12
- SHLQ $0x08, R9
- IMULQ BX, R9
- SHRQ $0x2f, R9
- SHLQ $0x20, R10
- IMULQ DI, R10
- SHRQ $0x32, R10
- SHLQ $0x08, R11
- IMULQ BX, R11
- SHRQ $0x2f, R11
- SHLQ $0x20, R12
- IMULQ DI, R12
- SHRQ $0x32, R12
- LEAQ 1(SI), DI
- LEAQ 1(R8), R13
- MOVL SI, 24(SP)(R9*4)
- MOVL R8, 24(SP)(R11*4)
- MOVL DI, 524312(SP)(R10*4)
- MOVL R13, 524312(SP)(R12*4)
- LEAQ 1(R8)(SI*1), DI
- SHRQ $0x01, DI
- ADDQ $0x01, SI
- SUBQ $0x01, R8
+ MOVQ $0x00cf1bbcdcbfa563, SI
+ MOVQ $0x9e3779b1, R8
+ LEAQ 1(DI), DI
+ LEAQ -2(DX), R9
+ MOVQ (BX)(DI*1), R10
+ MOVQ 1(BX)(DI*1), R11
+ MOVQ (BX)(R9*1), R12
+ MOVQ 1(BX)(R9*1), R13
+ SHLQ $0x08, R10
+ IMULQ SI, R10
+ SHRQ $0x2f, R10
+ SHLQ $0x20, R11
+ IMULQ R8, R11
+ SHRQ $0x32, R11
+ SHLQ $0x08, R12
+ IMULQ SI, R12
+ SHRQ $0x2f, R12
+ SHLQ $0x20, R13
+ IMULQ R8, R13
+ SHRQ $0x32, R13
+ LEAQ 1(DI), R8
+ LEAQ 1(R9), R14
+ MOVL DI, (AX)(R10*4)
+ MOVL R9, (AX)(R12*4)
+ MOVL R8, 524288(AX)(R11*4)
+ MOVL R14, 524288(AX)(R13*4)
+ LEAQ 1(R9)(DI*1), R8
+ SHRQ $0x01, R8
+ ADDQ $0x01, DI
+ SUBQ $0x01, R9
index_loop_encodeSnappyBetterBlockAsm:
- CMPQ DI, R8
+ CMPQ R8, R9
JAE search_loop_encodeSnappyBetterBlockAsm
- MOVQ (DX)(SI*1), R9
- MOVQ (DX)(DI*1), R10
- SHLQ $0x08, R9
- IMULQ BX, R9
- SHRQ $0x2f, R9
+ MOVQ (BX)(DI*1), R10
+ MOVQ (BX)(R8*1), R11
SHLQ $0x08, R10
- IMULQ BX, R10
+ IMULQ SI, R10
SHRQ $0x2f, R10
- MOVL SI, 24(SP)(R9*4)
- MOVL DI, 24(SP)(R10*4)
- ADDQ $0x02, SI
+ SHLQ $0x08, R11
+ IMULQ SI, R11
+ SHRQ $0x2f, R11
+ MOVL DI, (AX)(R10*4)
+ MOVL R8, (AX)(R11*4)
ADDQ $0x02, DI
+ ADDQ $0x02, R8
JMP index_loop_encodeSnappyBetterBlockAsm
emit_remainder_encodeSnappyBetterBlockAsm:
- MOVQ src_len+32(FP), CX
- SUBL 12(SP), CX
- LEAQ 5(AX)(CX*1), CX
- CMPQ CX, (SP)
+ MOVQ src_len+32(FP), AX
+ SUBL 12(SP), AX
+ LEAQ 5(CX)(AX*1), AX
+ CMPQ AX, (SP)
JB emit_remainder_ok_encodeSnappyBetterBlockAsm
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
emit_remainder_ok_encodeSnappyBetterBlockAsm:
- MOVQ src_len+32(FP), CX
- MOVL 12(SP), BX
- CMPL BX, CX
+ MOVQ src_len+32(FP), AX
+ MOVL 12(SP), DX
+ CMPL DX, AX
JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm
- MOVL CX, SI
- MOVL CX, 12(SP)
- LEAQ (DX)(BX*1), CX
- SUBL BX, SI
+ MOVL AX, SI
+ MOVL AX, 12(SP)
+ LEAQ (BX)(DX*1), AX
+ SUBL DX, SI
LEAL -1(SI), DX
CMPL DX, $0x3c
JB one_byte_emit_remainder_encodeSnappyBetterBlockAsm
@@ -15485,41 +15501,41 @@ emit_remainder_ok_encodeSnappyBetterBlockAsm:
JB three_bytes_emit_remainder_encodeSnappyBetterBlockAsm
CMPL DX, $0x01000000
JB four_bytes_emit_remainder_encodeSnappyBetterBlockAsm
- MOVB $0xfc, (AX)
- MOVL DX, 1(AX)
- ADDQ $0x05, AX
+ MOVB $0xfc, (CX)
+ MOVL DX, 1(CX)
+ ADDQ $0x05, CX
JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm
four_bytes_emit_remainder_encodeSnappyBetterBlockAsm:
MOVL DX, BX
SHRL $0x10, BX
- MOVB $0xf8, (AX)
- MOVW DX, 1(AX)
- MOVB BL, 3(AX)
- ADDQ $0x04, AX
+ MOVB $0xf8, (CX)
+ MOVW DX, 1(CX)
+ MOVB BL, 3(CX)
+ ADDQ $0x04, CX
JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm
three_bytes_emit_remainder_encodeSnappyBetterBlockAsm:
- MOVB $0xf4, (AX)
- MOVW DX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW DX, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm
two_bytes_emit_remainder_encodeSnappyBetterBlockAsm:
- MOVB $0xf0, (AX)
- MOVB DL, 1(AX)
- ADDQ $0x02, AX
+ MOVB $0xf0, (CX)
+ MOVB DL, 1(CX)
+ ADDQ $0x02, CX
CMPL DX, $0x40
JB memmove_emit_remainder_encodeSnappyBetterBlockAsm
JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm
one_byte_emit_remainder_encodeSnappyBetterBlockAsm:
SHLB $0x02, DL
- MOVB DL, (AX)
- ADDQ $0x01, AX
+ MOVB DL, (CX)
+ ADDQ $0x01, CX
memmove_emit_remainder_encodeSnappyBetterBlockAsm:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveShort
@@ -15535,73 +15551,73 @@ memmove_emit_remainder_encodeSnappyBetterBlockAsm:
JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_33through64
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_1or2:
- MOVB (CX), SI
- MOVB -1(CX)(BX*1), CL
- MOVB SI, (AX)
- MOVB CL, -1(AX)(BX*1)
+ MOVB (AX), SI
+ MOVB -1(AX)(BX*1), AL
+ MOVB SI, (CX)
+ MOVB AL, -1(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_3:
- MOVW (CX), SI
- MOVB 2(CX), CL
- MOVW SI, (AX)
- MOVB CL, 2(AX)
+ MOVW (AX), SI
+ MOVB 2(AX), AL
+ MOVW SI, (CX)
+ MOVB AL, 2(CX)
JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_4through7:
- MOVL (CX), SI
- MOVL -4(CX)(BX*1), CX
- MOVL SI, (AX)
- MOVL CX, -4(AX)(BX*1)
+ MOVL (AX), SI
+ MOVL -4(AX)(BX*1), AX
+ MOVL SI, (CX)
+ MOVL AX, -4(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_8through16:
- MOVQ (CX), SI
- MOVQ -8(CX)(BX*1), CX
- MOVQ SI, (AX)
- MOVQ CX, -8(AX)(BX*1)
+ MOVQ (AX), SI
+ MOVQ -8(AX)(BX*1), AX
+ MOVQ SI, (CX)
+ MOVQ AX, -8(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_17through32:
- MOVOU (CX), X0
- MOVOU -16(CX)(BX*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU -16(AX)(BX*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_33through64:
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm:
- MOVQ DX, AX
+ MOVQ DX, CX
JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm
memmove_long_emit_remainder_encodeSnappyBetterBlockAsm:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveLong
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
MOVQ BX, DI
SHRQ $0x05, DI
- MOVQ AX, SI
+ MOVQ CX, SI
ANDL $0x0000001f, SI
MOVQ $0x00000040, R8
SUBQ SI, R8
DECQ DI
JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32
- LEAQ -32(CX)(R8*1), SI
- LEAQ -32(AX)(R8*1), R9
+ LEAQ -32(AX)(R8*1), SI
+ LEAQ -32(CX)(R8*1), R9
emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_big_loop_back:
MOVOU (SI), X4
@@ -15615,463 +15631,464 @@ emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_big_loop_ba
JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_big_loop_back
emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32:
- MOVOU -32(CX)(R8*1), X4
- MOVOU -16(CX)(R8*1), X5
- MOVOA X4, -32(AX)(R8*1)
- MOVOA X5, -16(AX)(R8*1)
+ MOVOU -32(AX)(R8*1), X4
+ MOVOU -16(AX)(R8*1), X5
+ MOVOA X4, -32(CX)(R8*1)
+ MOVOA X5, -16(CX)(R8*1)
ADDQ $0x20, R8
CMPQ BX, R8
JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
- MOVQ DX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
+ MOVQ DX, CX
emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm:
- MOVQ dst_base+0(FP), CX
- SUBQ CX, AX
- MOVQ AX, ret+48(FP)
+ MOVQ dst_base+0(FP), AX
+ SUBQ AX, CX
+ MOVQ CX, ret+56(FP)
RET
-// func encodeSnappyBetterBlockAsm64K(dst []byte, src []byte) int
+// func encodeSnappyBetterBlockAsm64K(dst []byte, src []byte, tmp *[294912]byte) int
// Requires: BMI, SSE2
-TEXT ·encodeSnappyBetterBlockAsm64K(SB), $327704-56
- MOVQ dst_base+0(FP), AX
- MOVQ $0x00000a00, CX
- LEAQ 24(SP), DX
+TEXT ·encodeSnappyBetterBlockAsm64K(SB), $24-64
+ MOVQ tmp+48(FP), AX
+ MOVQ dst_base+0(FP), CX
+ MOVQ $0x00000900, DX
+ MOVQ AX, BX
PXOR X0, X0
zero_loop_encodeSnappyBetterBlockAsm64K:
- MOVOU X0, (DX)
- MOVOU X0, 16(DX)
- MOVOU X0, 32(DX)
- MOVOU X0, 48(DX)
- MOVOU X0, 64(DX)
- MOVOU X0, 80(DX)
- MOVOU X0, 96(DX)
- MOVOU X0, 112(DX)
- ADDQ $0x80, DX
- DECQ CX
+ MOVOU X0, (BX)
+ MOVOU X0, 16(BX)
+ MOVOU X0, 32(BX)
+ MOVOU X0, 48(BX)
+ MOVOU X0, 64(BX)
+ MOVOU X0, 80(BX)
+ MOVOU X0, 96(BX)
+ MOVOU X0, 112(BX)
+ ADDQ $0x80, BX
+ DECQ DX
JNZ zero_loop_encodeSnappyBetterBlockAsm64K
MOVL $0x00000000, 12(SP)
- MOVQ src_len+32(FP), CX
- LEAQ -9(CX), DX
- LEAQ -8(CX), BX
- MOVL BX, 8(SP)
- SHRQ $0x05, CX
- SUBL CX, DX
- LEAQ (AX)(DX*1), DX
- MOVQ DX, (SP)
- MOVL $0x00000001, CX
+ MOVQ src_len+32(FP), DX
+ LEAQ -9(DX), BX
+ LEAQ -8(DX), SI
+ MOVL SI, 8(SP)
+ SHRQ $0x05, DX
+ SUBL DX, BX
+ LEAQ (CX)(BX*1), BX
+ MOVQ BX, (SP)
+ MOVL $0x00000001, DX
MOVL $0x00000000, 16(SP)
- MOVQ src_base+24(FP), DX
+ MOVQ src_base+24(FP), BX
search_loop_encodeSnappyBetterBlockAsm64K:
- MOVL CX, BX
- SUBL 12(SP), BX
- SHRL $0x07, BX
- LEAL 1(CX)(BX*1), BX
- CMPL BX, 8(SP)
+ MOVL DX, SI
+ SUBL 12(SP), SI
+ SHRL $0x07, SI
+ LEAL 1(DX)(SI*1), SI
+ CMPL SI, 8(SP)
JAE emit_remainder_encodeSnappyBetterBlockAsm64K
- MOVQ (DX)(CX*1), SI
- MOVL BX, 20(SP)
- MOVQ $0x00cf1bbcdcbfa563, R8
- MOVQ $0x9e3779b1, BX
- MOVQ SI, R9
- MOVQ SI, R10
- SHLQ $0x08, R9
- IMULQ R8, R9
- SHRQ $0x30, R9
- SHLQ $0x20, R10
- IMULQ BX, R10
- SHRQ $0x32, R10
- MOVL 24(SP)(R9*4), BX
- MOVL 262168(SP)(R10*4), DI
- MOVL CX, 24(SP)(R9*4)
- MOVL CX, 262168(SP)(R10*4)
- MOVQ (DX)(BX*1), R9
- MOVQ (DX)(DI*1), R10
- CMPQ R9, SI
+ MOVQ (BX)(DX*1), DI
+ MOVL SI, 20(SP)
+ MOVQ $0x00cf1bbcdcbfa563, R9
+ MOVQ $0x9e3779b1, SI
+ MOVQ DI, R10
+ MOVQ DI, R11
+ SHLQ $0x08, R10
+ IMULQ R9, R10
+ SHRQ $0x30, R10
+ SHLQ $0x20, R11
+ IMULQ SI, R11
+ SHRQ $0x33, R11
+ MOVL (AX)(R10*4), SI
+ MOVL 262144(AX)(R11*4), R8
+ MOVL DX, (AX)(R10*4)
+ MOVL DX, 262144(AX)(R11*4)
+ MOVQ (BX)(SI*1), R10
+ MOVQ (BX)(R8*1), R11
+ CMPQ R10, DI
JEQ candidate_match_encodeSnappyBetterBlockAsm64K
- CMPQ R10, SI
+ CMPQ R11, DI
JNE no_short_found_encodeSnappyBetterBlockAsm64K
- MOVL DI, BX
+ MOVL R8, SI
JMP candidate_match_encodeSnappyBetterBlockAsm64K
no_short_found_encodeSnappyBetterBlockAsm64K:
- CMPL R9, SI
+ CMPL R10, DI
JEQ candidate_match_encodeSnappyBetterBlockAsm64K
- CMPL R10, SI
+ CMPL R11, DI
JEQ candidateS_match_encodeSnappyBetterBlockAsm64K
- MOVL 20(SP), CX
+ MOVL 20(SP), DX
JMP search_loop_encodeSnappyBetterBlockAsm64K
candidateS_match_encodeSnappyBetterBlockAsm64K:
- SHRQ $0x08, SI
- MOVQ SI, R9
- SHLQ $0x08, R9
- IMULQ R8, R9
- SHRQ $0x30, R9
- MOVL 24(SP)(R9*4), BX
- INCL CX
- MOVL CX, 24(SP)(R9*4)
- CMPL (DX)(BX*1), SI
+ SHRQ $0x08, DI
+ MOVQ DI, R10
+ SHLQ $0x08, R10
+ IMULQ R9, R10
+ SHRQ $0x30, R10
+ MOVL (AX)(R10*4), SI
+ INCL DX
+ MOVL DX, (AX)(R10*4)
+ CMPL (BX)(SI*1), DI
JEQ candidate_match_encodeSnappyBetterBlockAsm64K
- DECL CX
- MOVL DI, BX
+ DECL DX
+ MOVL R8, SI
candidate_match_encodeSnappyBetterBlockAsm64K:
- MOVL 12(SP), SI
- TESTL BX, BX
+ MOVL 12(SP), DI
+ TESTL SI, SI
JZ match_extend_back_end_encodeSnappyBetterBlockAsm64K
match_extend_back_loop_encodeSnappyBetterBlockAsm64K:
- CMPL CX, SI
+ CMPL DX, DI
JBE match_extend_back_end_encodeSnappyBetterBlockAsm64K
- MOVB -1(DX)(BX*1), DI
- MOVB -1(DX)(CX*1), R8
- CMPB DI, R8
+ MOVB -1(BX)(SI*1), R8
+ MOVB -1(BX)(DX*1), R9
+ CMPB R8, R9
JNE match_extend_back_end_encodeSnappyBetterBlockAsm64K
- LEAL -1(CX), CX
- DECL BX
+ LEAL -1(DX), DX
+ DECL SI
JZ match_extend_back_end_encodeSnappyBetterBlockAsm64K
JMP match_extend_back_loop_encodeSnappyBetterBlockAsm64K
match_extend_back_end_encodeSnappyBetterBlockAsm64K:
- MOVL CX, SI
- SUBL 12(SP), SI
- LEAQ 3(AX)(SI*1), SI
- CMPQ SI, (SP)
+ MOVL DX, DI
+ SUBL 12(SP), DI
+ LEAQ 3(CX)(DI*1), DI
+ CMPQ DI, (SP)
JB match_dst_size_check_encodeSnappyBetterBlockAsm64K
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_dst_size_check_encodeSnappyBetterBlockAsm64K:
- MOVL CX, SI
- ADDL $0x04, CX
- ADDL $0x04, BX
- MOVQ src_len+32(FP), DI
- SUBL CX, DI
- LEAQ (DX)(CX*1), R8
- LEAQ (DX)(BX*1), R9
+ MOVL DX, DI
+ ADDL $0x04, DX
+ ADDL $0x04, SI
+ MOVQ src_len+32(FP), R8
+ SUBL DX, R8
+ LEAQ (BX)(DX*1), R9
+ LEAQ (BX)(SI*1), R10
// matchLen
- XORL R11, R11
+ XORL R12, R12
matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm64K:
- CMPL DI, $0x10
+ CMPL R8, $0x10
JB matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm64K
- MOVQ (R8)(R11*1), R10
- MOVQ 8(R8)(R11*1), R12
- XORQ (R9)(R11*1), R10
+ MOVQ (R9)(R12*1), R11
+ MOVQ 8(R9)(R12*1), R13
+ XORQ (R10)(R12*1), R11
JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm64K
- XORQ 8(R9)(R11*1), R12
+ XORQ 8(R10)(R12*1), R13
JNZ matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm64K
- LEAL -16(DI), DI
- LEAL 16(R11), R11
+ LEAL -16(R8), R8
+ LEAL 16(R12), R12
JMP matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm64K
matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm64K:
#ifdef GOAMD64_v3
- TZCNTQ R12, R12
+ TZCNTQ R13, R13
#else
- BSFQ R12, R12
+ BSFQ R13, R13
#endif
- SARQ $0x03, R12
- LEAL 8(R11)(R12*1), R11
+ SARQ $0x03, R13
+ LEAL 8(R12)(R13*1), R12
JMP match_nolit_end_encodeSnappyBetterBlockAsm64K
matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm64K:
- CMPL DI, $0x08
+ CMPL R8, $0x08
JB matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm64K
- MOVQ (R8)(R11*1), R10
- XORQ (R9)(R11*1), R10
+ MOVQ (R9)(R12*1), R11
+ XORQ (R10)(R12*1), R11
JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm64K
- LEAL -8(DI), DI
- LEAL 8(R11), R11
+ LEAL -8(R8), R8
+ LEAL 8(R12), R12
JMP matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm64K
matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm64K:
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R11, R11
#else
- BSFQ R10, R10
+ BSFQ R11, R11
#endif
- SARQ $0x03, R10
- LEAL (R11)(R10*1), R11
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
JMP match_nolit_end_encodeSnappyBetterBlockAsm64K
matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm64K:
- CMPL DI, $0x04
+ CMPL R8, $0x04
JB matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm64K
- MOVL (R8)(R11*1), R10
- CMPL (R9)(R11*1), R10
+ MOVL (R9)(R12*1), R11
+ CMPL (R10)(R12*1), R11
JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm64K
- LEAL -4(DI), DI
- LEAL 4(R11), R11
+ LEAL -4(R8), R8
+ LEAL 4(R12), R12
matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm64K:
- CMPL DI, $0x01
+ CMPL R8, $0x01
JE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm64K
JB match_nolit_end_encodeSnappyBetterBlockAsm64K
- MOVW (R8)(R11*1), R10
- CMPW (R9)(R11*1), R10
+ MOVW (R9)(R12*1), R11
+ CMPW (R10)(R12*1), R11
JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm64K
- LEAL 2(R11), R11
- SUBL $0x02, DI
+ LEAL 2(R12), R12
+ SUBL $0x02, R8
JZ match_nolit_end_encodeSnappyBetterBlockAsm64K
matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm64K:
- MOVB (R8)(R11*1), R10
- CMPB (R9)(R11*1), R10
+ MOVB (R9)(R12*1), R11
+ CMPB (R10)(R12*1), R11
JNE match_nolit_end_encodeSnappyBetterBlockAsm64K
- LEAL 1(R11), R11
+ LEAL 1(R12), R12
match_nolit_end_encodeSnappyBetterBlockAsm64K:
- MOVL CX, DI
- SUBL BX, DI
+ MOVL DX, R8
+ SUBL SI, R8
// Check if repeat
- MOVL DI, 16(SP)
- MOVL 12(SP), BX
- CMPL BX, SI
+ MOVL R8, 16(SP)
+ MOVL 12(SP), SI
+ CMPL SI, DI
JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm64K
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(BX*1), R9
- SUBL BX, R8
- LEAL -1(R8), BX
- CMPL BX, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(SI*1), R10
+ SUBL SI, R9
+ LEAL -1(R9), SI
+ CMPL SI, $0x3c
JB one_byte_match_emit_encodeSnappyBetterBlockAsm64K
- CMPL BX, $0x00000100
+ CMPL SI, $0x00000100
JB two_bytes_match_emit_encodeSnappyBetterBlockAsm64K
JB three_bytes_match_emit_encodeSnappyBetterBlockAsm64K
three_bytes_match_emit_encodeSnappyBetterBlockAsm64K:
- MOVB $0xf4, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm64K
two_bytes_match_emit_encodeSnappyBetterBlockAsm64K:
- MOVB $0xf0, (AX)
- MOVB BL, 1(AX)
- ADDQ $0x02, AX
- CMPL BX, $0x40
+ MOVB $0xf0, (CX)
+ MOVB SI, 1(CX)
+ ADDQ $0x02, CX
+ CMPL SI, $0x40
JB memmove_match_emit_encodeSnappyBetterBlockAsm64K
JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm64K
one_byte_match_emit_encodeSnappyBetterBlockAsm64K:
- SHLB $0x02, BL
- MOVB BL, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, SI
+ MOVB SI, (CX)
+ ADDQ $0x01, CX
memmove_match_emit_encodeSnappyBetterBlockAsm64K:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveShort
- CMPQ R8, $0x08
+ CMPQ R9, $0x08
JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_8
- CMPQ R8, $0x10
+ CMPQ R9, $0x10
JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ R9, $0x20
JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_33through64
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_8:
- MOVQ (R9), R10
- MOVQ R10, (AX)
+ MOVQ (R10), R11
+ MOVQ R11, (CX)
JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm64K
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_8through16:
- MOVQ (R9), R10
- MOVQ -8(R9)(R8*1), R9
- MOVQ R10, (AX)
- MOVQ R9, -8(AX)(R8*1)
+ MOVQ (R10), R11
+ MOVQ -8(R10)(R9*1), R10
+ MOVQ R11, (CX)
+ MOVQ R10, -8(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm64K
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_17through32:
- MOVOU (R9), X0
- MOVOU -16(R9)(R8*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU -16(R10)(R9*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm64K
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_33through64:
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm64K:
- MOVQ BX, AX
+ MOVQ SI, CX
JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm64K
memmove_long_match_emit_encodeSnappyBetterBlockAsm64K:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveLong
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVQ R8, R12
- SHRQ $0x05, R12
- MOVQ AX, R10
- ANDL $0x0000001f, R10
- MOVQ $0x00000040, R13
- SUBQ R10, R13
- DECQ R12
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVQ R9, R13
+ SHRQ $0x05, R13
+ MOVQ CX, R11
+ ANDL $0x0000001f, R11
+ MOVQ $0x00000040, R14
+ SUBQ R11, R14
+ DECQ R13
JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32
- LEAQ -32(R9)(R13*1), R10
- LEAQ -32(AX)(R13*1), R14
+ LEAQ -32(R10)(R14*1), R11
+ LEAQ -32(CX)(R14*1), R15
emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_big_loop_back:
- MOVOU (R10), X4
- MOVOU 16(R10), X5
- MOVOA X4, (R14)
- MOVOA X5, 16(R14)
+ MOVOU (R11), X4
+ MOVOU 16(R11), X5
+ MOVOA X4, (R15)
+ MOVOA X5, 16(R15)
+ ADDQ $0x20, R15
+ ADDQ $0x20, R11
ADDQ $0x20, R14
- ADDQ $0x20, R10
- ADDQ $0x20, R13
- DECQ R12
+ DECQ R13
JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_big_loop_back
-
-emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32:
- MOVOU -32(R9)(R13*1), X4
- MOVOU -16(R9)(R13*1), X5
- MOVOA X4, -32(AX)(R13*1)
- MOVOA X5, -16(AX)(R13*1)
- ADDQ $0x20, R13
- CMPQ R8, R13
+
+emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32:
+ MOVOU -32(R10)(R14*1), X4
+ MOVOU -16(R10)(R14*1), X5
+ MOVOA X4, -32(CX)(R14*1)
+ MOVOA X5, -16(CX)(R14*1)
+ ADDQ $0x20, R14
+ CMPQ R9, R14
JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ BX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
+ MOVQ SI, CX
emit_literal_done_match_emit_encodeSnappyBetterBlockAsm64K:
- ADDL R11, CX
- ADDL $0x04, R11
- MOVL CX, 12(SP)
+ ADDL R12, DX
+ ADDL $0x04, R12
+ MOVL DX, 12(SP)
// emitCopy
two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm64K:
- CMPL R11, $0x40
+ CMPL R12, $0x40
JBE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm64K
- MOVB $0xee, (AX)
- MOVW DI, 1(AX)
- LEAL -60(R11), R11
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW R8, 1(CX)
+ LEAL -60(R12), R12
+ ADDQ $0x03, CX
JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm64K
two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm64K:
- MOVL R11, BX
- SHLL $0x02, BX
- CMPL R11, $0x0c
+ MOVL R12, SI
+ SHLL $0x02, SI
+ CMPL R12, $0x0c
JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm64K
- CMPL DI, $0x00000800
+ CMPL R8, $0x00000800
JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm64K
- LEAL -15(BX), BX
- MOVB DI, 1(AX)
- SHRL $0x08, DI
- SHLL $0x05, DI
- ORL DI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
+ LEAL -15(SI), SI
+ MOVB R8, 1(CX)
+ SHRL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm64K
emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm64K:
- LEAL -2(BX), BX
- MOVB BL, (AX)
- MOVW DI, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(SI), SI
+ MOVB SI, (CX)
+ MOVW R8, 1(CX)
+ ADDQ $0x03, CX
match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm64K:
- CMPL CX, 8(SP)
+ CMPL DX, 8(SP)
JAE emit_remainder_encodeSnappyBetterBlockAsm64K
- CMPQ AX, (SP)
+ CMPQ CX, (SP)
JB match_nolit_dst_ok_encodeSnappyBetterBlockAsm64K
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_nolit_dst_ok_encodeSnappyBetterBlockAsm64K:
- MOVQ $0x00cf1bbcdcbfa563, BX
- MOVQ $0x9e3779b1, DI
- LEAQ 1(SI), SI
- LEAQ -2(CX), R8
- MOVQ (DX)(SI*1), R9
- MOVQ 1(DX)(SI*1), R10
- MOVQ (DX)(R8*1), R11
- MOVQ 1(DX)(R8*1), R12
- SHLQ $0x08, R9
- IMULQ BX, R9
- SHRQ $0x30, R9
- SHLQ $0x20, R10
- IMULQ DI, R10
- SHRQ $0x32, R10
- SHLQ $0x08, R11
- IMULQ BX, R11
- SHRQ $0x30, R11
- SHLQ $0x20, R12
- IMULQ DI, R12
- SHRQ $0x32, R12
- LEAQ 1(SI), DI
- LEAQ 1(R8), R13
- MOVL SI, 24(SP)(R9*4)
- MOVL R8, 24(SP)(R11*4)
- MOVL DI, 262168(SP)(R10*4)
- MOVL R13, 262168(SP)(R12*4)
- LEAQ 1(R8)(SI*1), DI
- SHRQ $0x01, DI
- ADDQ $0x01, SI
- SUBQ $0x01, R8
+ MOVQ $0x00cf1bbcdcbfa563, SI
+ MOVQ $0x9e3779b1, R8
+ LEAQ 1(DI), DI
+ LEAQ -2(DX), R9
+ MOVQ (BX)(DI*1), R10
+ MOVQ 1(BX)(DI*1), R11
+ MOVQ (BX)(R9*1), R12
+ MOVQ 1(BX)(R9*1), R13
+ SHLQ $0x08, R10
+ IMULQ SI, R10
+ SHRQ $0x30, R10
+ SHLQ $0x20, R11
+ IMULQ R8, R11
+ SHRQ $0x33, R11
+ SHLQ $0x08, R12
+ IMULQ SI, R12
+ SHRQ $0x30, R12
+ SHLQ $0x20, R13
+ IMULQ R8, R13
+ SHRQ $0x33, R13
+ LEAQ 1(DI), R8
+ LEAQ 1(R9), R14
+ MOVL DI, (AX)(R10*4)
+ MOVL R9, (AX)(R12*4)
+ MOVL R8, 262144(AX)(R11*4)
+ MOVL R14, 262144(AX)(R13*4)
+ LEAQ 1(R9)(DI*1), R8
+ SHRQ $0x01, R8
+ ADDQ $0x01, DI
+ SUBQ $0x01, R9
index_loop_encodeSnappyBetterBlockAsm64K:
- CMPQ DI, R8
+ CMPQ R8, R9
JAE search_loop_encodeSnappyBetterBlockAsm64K
- MOVQ (DX)(SI*1), R9
- MOVQ (DX)(DI*1), R10
- SHLQ $0x08, R9
- IMULQ BX, R9
- SHRQ $0x30, R9
+ MOVQ (BX)(DI*1), R10
+ MOVQ (BX)(R8*1), R11
SHLQ $0x08, R10
- IMULQ BX, R10
+ IMULQ SI, R10
SHRQ $0x30, R10
- MOVL SI, 24(SP)(R9*4)
- MOVL DI, 24(SP)(R10*4)
- ADDQ $0x02, SI
+ SHLQ $0x08, R11
+ IMULQ SI, R11
+ SHRQ $0x30, R11
+ MOVL DI, (AX)(R10*4)
+ MOVL R8, (AX)(R11*4)
ADDQ $0x02, DI
+ ADDQ $0x02, R8
JMP index_loop_encodeSnappyBetterBlockAsm64K
emit_remainder_encodeSnappyBetterBlockAsm64K:
- MOVQ src_len+32(FP), CX
- SUBL 12(SP), CX
- LEAQ 3(AX)(CX*1), CX
- CMPQ CX, (SP)
+ MOVQ src_len+32(FP), AX
+ SUBL 12(SP), AX
+ LEAQ 3(CX)(AX*1), AX
+ CMPQ AX, (SP)
JB emit_remainder_ok_encodeSnappyBetterBlockAsm64K
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
emit_remainder_ok_encodeSnappyBetterBlockAsm64K:
- MOVQ src_len+32(FP), CX
- MOVL 12(SP), BX
- CMPL BX, CX
+ MOVQ src_len+32(FP), AX
+ MOVL 12(SP), DX
+ CMPL DX, AX
JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm64K
- MOVL CX, SI
- MOVL CX, 12(SP)
- LEAQ (DX)(BX*1), CX
- SUBL BX, SI
+ MOVL AX, SI
+ MOVL AX, 12(SP)
+ LEAQ (BX)(DX*1), AX
+ SUBL DX, SI
LEAL -1(SI), DX
CMPL DX, $0x3c
JB one_byte_emit_remainder_encodeSnappyBetterBlockAsm64K
@@ -16080,26 +16097,26 @@ emit_remainder_ok_encodeSnappyBetterBlockAsm64K:
JB three_bytes_emit_remainder_encodeSnappyBetterBlockAsm64K
three_bytes_emit_remainder_encodeSnappyBetterBlockAsm64K:
- MOVB $0xf4, (AX)
- MOVW DX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW DX, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64K
two_bytes_emit_remainder_encodeSnappyBetterBlockAsm64K:
- MOVB $0xf0, (AX)
- MOVB DL, 1(AX)
- ADDQ $0x02, AX
+ MOVB $0xf0, (CX)
+ MOVB DL, 1(CX)
+ ADDQ $0x02, CX
CMPL DX, $0x40
JB memmove_emit_remainder_encodeSnappyBetterBlockAsm64K
JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64K
one_byte_emit_remainder_encodeSnappyBetterBlockAsm64K:
SHLB $0x02, DL
- MOVB DL, (AX)
- ADDQ $0x01, AX
+ MOVB DL, (CX)
+ ADDQ $0x01, CX
memmove_emit_remainder_encodeSnappyBetterBlockAsm64K:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveShort
@@ -16115,73 +16132,73 @@ memmove_emit_remainder_encodeSnappyBetterBlockAsm64K:
JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_33through64
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_1or2:
- MOVB (CX), SI
- MOVB -1(CX)(BX*1), CL
- MOVB SI, (AX)
- MOVB CL, -1(AX)(BX*1)
+ MOVB (AX), SI
+ MOVB -1(AX)(BX*1), AL
+ MOVB SI, (CX)
+ MOVB AL, -1(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_3:
- MOVW (CX), SI
- MOVB 2(CX), CL
- MOVW SI, (AX)
- MOVB CL, 2(AX)
+ MOVW (AX), SI
+ MOVB 2(AX), AL
+ MOVW SI, (CX)
+ MOVB AL, 2(CX)
JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_4through7:
- MOVL (CX), SI
- MOVL -4(CX)(BX*1), CX
- MOVL SI, (AX)
- MOVL CX, -4(AX)(BX*1)
+ MOVL (AX), SI
+ MOVL -4(AX)(BX*1), AX
+ MOVL SI, (CX)
+ MOVL AX, -4(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_8through16:
- MOVQ (CX), SI
- MOVQ -8(CX)(BX*1), CX
- MOVQ SI, (AX)
- MOVQ CX, -8(AX)(BX*1)
+ MOVQ (AX), SI
+ MOVQ -8(AX)(BX*1), AX
+ MOVQ SI, (CX)
+ MOVQ AX, -8(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_17through32:
- MOVOU (CX), X0
- MOVOU -16(CX)(BX*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU -16(AX)(BX*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_33through64:
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K:
- MOVQ DX, AX
+ MOVQ DX, CX
JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm64K
memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64K:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveLong
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
MOVQ BX, DI
SHRQ $0x05, DI
- MOVQ AX, SI
+ MOVQ CX, SI
ANDL $0x0000001f, SI
MOVQ $0x00000040, R8
SUBQ SI, R8
DECQ DI
JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32
- LEAQ -32(CX)(R8*1), SI
- LEAQ -32(AX)(R8*1), R9
+ LEAQ -32(AX)(R8*1), SI
+ LEAQ -32(CX)(R8*1), R9
emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_big_loop_back:
MOVOU (SI), X4
@@ -16195,463 +16212,464 @@ emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_big_loop
JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_big_loop_back
emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32:
- MOVOU -32(CX)(R8*1), X4
- MOVOU -16(CX)(R8*1), X5
- MOVOA X4, -32(AX)(R8*1)
- MOVOA X5, -16(AX)(R8*1)
+ MOVOU -32(AX)(R8*1), X4
+ MOVOU -16(AX)(R8*1), X5
+ MOVOA X4, -32(CX)(R8*1)
+ MOVOA X5, -16(CX)(R8*1)
ADDQ $0x20, R8
CMPQ BX, R8
JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
- MOVQ DX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
+ MOVQ DX, CX
emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm64K:
- MOVQ dst_base+0(FP), CX
- SUBQ CX, AX
- MOVQ AX, ret+48(FP)
+ MOVQ dst_base+0(FP), AX
+ SUBQ AX, CX
+ MOVQ CX, ret+56(FP)
RET
-// func encodeSnappyBetterBlockAsm12B(dst []byte, src []byte) int
+// func encodeSnappyBetterBlockAsm12B(dst []byte, src []byte, tmp *[81920]byte) int
// Requires: BMI, SSE2
-TEXT ·encodeSnappyBetterBlockAsm12B(SB), $81944-56
- MOVQ dst_base+0(FP), AX
- MOVQ $0x00000280, CX
- LEAQ 24(SP), DX
+TEXT ·encodeSnappyBetterBlockAsm12B(SB), $24-64
+ MOVQ tmp+48(FP), AX
+ MOVQ dst_base+0(FP), CX
+ MOVQ $0x00000280, DX
+ MOVQ AX, BX
PXOR X0, X0
zero_loop_encodeSnappyBetterBlockAsm12B:
- MOVOU X0, (DX)
- MOVOU X0, 16(DX)
- MOVOU X0, 32(DX)
- MOVOU X0, 48(DX)
- MOVOU X0, 64(DX)
- MOVOU X0, 80(DX)
- MOVOU X0, 96(DX)
- MOVOU X0, 112(DX)
- ADDQ $0x80, DX
- DECQ CX
+ MOVOU X0, (BX)
+ MOVOU X0, 16(BX)
+ MOVOU X0, 32(BX)
+ MOVOU X0, 48(BX)
+ MOVOU X0, 64(BX)
+ MOVOU X0, 80(BX)
+ MOVOU X0, 96(BX)
+ MOVOU X0, 112(BX)
+ ADDQ $0x80, BX
+ DECQ DX
JNZ zero_loop_encodeSnappyBetterBlockAsm12B
MOVL $0x00000000, 12(SP)
- MOVQ src_len+32(FP), CX
- LEAQ -9(CX), DX
- LEAQ -8(CX), BX
- MOVL BX, 8(SP)
- SHRQ $0x05, CX
- SUBL CX, DX
- LEAQ (AX)(DX*1), DX
- MOVQ DX, (SP)
- MOVL $0x00000001, CX
+ MOVQ src_len+32(FP), DX
+ LEAQ -9(DX), BX
+ LEAQ -8(DX), SI
+ MOVL SI, 8(SP)
+ SHRQ $0x05, DX
+ SUBL DX, BX
+ LEAQ (CX)(BX*1), BX
+ MOVQ BX, (SP)
+ MOVL $0x00000001, DX
MOVL $0x00000000, 16(SP)
- MOVQ src_base+24(FP), DX
+ MOVQ src_base+24(FP), BX
search_loop_encodeSnappyBetterBlockAsm12B:
- MOVL CX, BX
- SUBL 12(SP), BX
- SHRL $0x06, BX
- LEAL 1(CX)(BX*1), BX
- CMPL BX, 8(SP)
+ MOVL DX, SI
+ SUBL 12(SP), SI
+ SHRL $0x06, SI
+ LEAL 1(DX)(SI*1), SI
+ CMPL SI, 8(SP)
JAE emit_remainder_encodeSnappyBetterBlockAsm12B
- MOVQ (DX)(CX*1), SI
- MOVL BX, 20(SP)
- MOVQ $0x0000cf1bbcdcbf9b, R8
- MOVQ $0x9e3779b1, BX
- MOVQ SI, R9
- MOVQ SI, R10
- SHLQ $0x10, R9
- IMULQ R8, R9
- SHRQ $0x32, R9
- SHLQ $0x20, R10
- IMULQ BX, R10
- SHRQ $0x34, R10
- MOVL 24(SP)(R9*4), BX
- MOVL 65560(SP)(R10*4), DI
- MOVL CX, 24(SP)(R9*4)
- MOVL CX, 65560(SP)(R10*4)
- MOVQ (DX)(BX*1), R9
- MOVQ (DX)(DI*1), R10
- CMPQ R9, SI
+ MOVQ (BX)(DX*1), DI
+ MOVL SI, 20(SP)
+ MOVQ $0x0000cf1bbcdcbf9b, R9
+ MOVQ $0x9e3779b1, SI
+ MOVQ DI, R10
+ MOVQ DI, R11
+ SHLQ $0x10, R10
+ IMULQ R9, R10
+ SHRQ $0x32, R10
+ SHLQ $0x20, R11
+ IMULQ SI, R11
+ SHRQ $0x34, R11
+ MOVL (AX)(R10*4), SI
+ MOVL 65536(AX)(R11*4), R8
+ MOVL DX, (AX)(R10*4)
+ MOVL DX, 65536(AX)(R11*4)
+ MOVQ (BX)(SI*1), R10
+ MOVQ (BX)(R8*1), R11
+ CMPQ R10, DI
JEQ candidate_match_encodeSnappyBetterBlockAsm12B
- CMPQ R10, SI
+ CMPQ R11, DI
JNE no_short_found_encodeSnappyBetterBlockAsm12B
- MOVL DI, BX
+ MOVL R8, SI
JMP candidate_match_encodeSnappyBetterBlockAsm12B
no_short_found_encodeSnappyBetterBlockAsm12B:
- CMPL R9, SI
+ CMPL R10, DI
JEQ candidate_match_encodeSnappyBetterBlockAsm12B
- CMPL R10, SI
+ CMPL R11, DI
JEQ candidateS_match_encodeSnappyBetterBlockAsm12B
- MOVL 20(SP), CX
+ MOVL 20(SP), DX
JMP search_loop_encodeSnappyBetterBlockAsm12B
candidateS_match_encodeSnappyBetterBlockAsm12B:
- SHRQ $0x08, SI
- MOVQ SI, R9
- SHLQ $0x10, R9
- IMULQ R8, R9
- SHRQ $0x32, R9
- MOVL 24(SP)(R9*4), BX
- INCL CX
- MOVL CX, 24(SP)(R9*4)
- CMPL (DX)(BX*1), SI
+ SHRQ $0x08, DI
+ MOVQ DI, R10
+ SHLQ $0x10, R10
+ IMULQ R9, R10
+ SHRQ $0x32, R10
+ MOVL (AX)(R10*4), SI
+ INCL DX
+ MOVL DX, (AX)(R10*4)
+ CMPL (BX)(SI*1), DI
JEQ candidate_match_encodeSnappyBetterBlockAsm12B
- DECL CX
- MOVL DI, BX
+ DECL DX
+ MOVL R8, SI
candidate_match_encodeSnappyBetterBlockAsm12B:
- MOVL 12(SP), SI
- TESTL BX, BX
+ MOVL 12(SP), DI
+ TESTL SI, SI
JZ match_extend_back_end_encodeSnappyBetterBlockAsm12B
match_extend_back_loop_encodeSnappyBetterBlockAsm12B:
- CMPL CX, SI
+ CMPL DX, DI
JBE match_extend_back_end_encodeSnappyBetterBlockAsm12B
- MOVB -1(DX)(BX*1), DI
- MOVB -1(DX)(CX*1), R8
- CMPB DI, R8
+ MOVB -1(BX)(SI*1), R8
+ MOVB -1(BX)(DX*1), R9
+ CMPB R8, R9
JNE match_extend_back_end_encodeSnappyBetterBlockAsm12B
- LEAL -1(CX), CX
- DECL BX
+ LEAL -1(DX), DX
+ DECL SI
JZ match_extend_back_end_encodeSnappyBetterBlockAsm12B
JMP match_extend_back_loop_encodeSnappyBetterBlockAsm12B
match_extend_back_end_encodeSnappyBetterBlockAsm12B:
- MOVL CX, SI
- SUBL 12(SP), SI
- LEAQ 3(AX)(SI*1), SI
- CMPQ SI, (SP)
+ MOVL DX, DI
+ SUBL 12(SP), DI
+ LEAQ 3(CX)(DI*1), DI
+ CMPQ DI, (SP)
JB match_dst_size_check_encodeSnappyBetterBlockAsm12B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_dst_size_check_encodeSnappyBetterBlockAsm12B:
- MOVL CX, SI
- ADDL $0x04, CX
- ADDL $0x04, BX
- MOVQ src_len+32(FP), DI
- SUBL CX, DI
- LEAQ (DX)(CX*1), R8
- LEAQ (DX)(BX*1), R9
+ MOVL DX, DI
+ ADDL $0x04, DX
+ ADDL $0x04, SI
+ MOVQ src_len+32(FP), R8
+ SUBL DX, R8
+ LEAQ (BX)(DX*1), R9
+ LEAQ (BX)(SI*1), R10
// matchLen
- XORL R11, R11
+ XORL R12, R12
matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm12B:
- CMPL DI, $0x10
+ CMPL R8, $0x10
JB matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm12B
- MOVQ (R8)(R11*1), R10
- MOVQ 8(R8)(R11*1), R12
- XORQ (R9)(R11*1), R10
+ MOVQ (R9)(R12*1), R11
+ MOVQ 8(R9)(R12*1), R13
+ XORQ (R10)(R12*1), R11
JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm12B
- XORQ 8(R9)(R11*1), R12
+ XORQ 8(R10)(R12*1), R13
JNZ matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm12B
- LEAL -16(DI), DI
- LEAL 16(R11), R11
+ LEAL -16(R8), R8
+ LEAL 16(R12), R12
JMP matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm12B
matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm12B:
#ifdef GOAMD64_v3
- TZCNTQ R12, R12
+ TZCNTQ R13, R13
#else
- BSFQ R12, R12
+ BSFQ R13, R13
#endif
- SARQ $0x03, R12
- LEAL 8(R11)(R12*1), R11
+ SARQ $0x03, R13
+ LEAL 8(R12)(R13*1), R12
JMP match_nolit_end_encodeSnappyBetterBlockAsm12B
matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm12B:
- CMPL DI, $0x08
+ CMPL R8, $0x08
JB matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm12B
- MOVQ (R8)(R11*1), R10
- XORQ (R9)(R11*1), R10
+ MOVQ (R9)(R12*1), R11
+ XORQ (R10)(R12*1), R11
JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm12B
- LEAL -8(DI), DI
- LEAL 8(R11), R11
+ LEAL -8(R8), R8
+ LEAL 8(R12), R12
JMP matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm12B
matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm12B:
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R11, R11
#else
- BSFQ R10, R10
+ BSFQ R11, R11
#endif
- SARQ $0x03, R10
- LEAL (R11)(R10*1), R11
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
JMP match_nolit_end_encodeSnappyBetterBlockAsm12B
matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm12B:
- CMPL DI, $0x04
+ CMPL R8, $0x04
JB matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm12B
- MOVL (R8)(R11*1), R10
- CMPL (R9)(R11*1), R10
+ MOVL (R9)(R12*1), R11
+ CMPL (R10)(R12*1), R11
JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm12B
- LEAL -4(DI), DI
- LEAL 4(R11), R11
+ LEAL -4(R8), R8
+ LEAL 4(R12), R12
matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm12B:
- CMPL DI, $0x01
+ CMPL R8, $0x01
JE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm12B
JB match_nolit_end_encodeSnappyBetterBlockAsm12B
- MOVW (R8)(R11*1), R10
- CMPW (R9)(R11*1), R10
+ MOVW (R9)(R12*1), R11
+ CMPW (R10)(R12*1), R11
JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm12B
- LEAL 2(R11), R11
- SUBL $0x02, DI
+ LEAL 2(R12), R12
+ SUBL $0x02, R8
JZ match_nolit_end_encodeSnappyBetterBlockAsm12B
matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm12B:
- MOVB (R8)(R11*1), R10
- CMPB (R9)(R11*1), R10
+ MOVB (R9)(R12*1), R11
+ CMPB (R10)(R12*1), R11
JNE match_nolit_end_encodeSnappyBetterBlockAsm12B
- LEAL 1(R11), R11
+ LEAL 1(R12), R12
match_nolit_end_encodeSnappyBetterBlockAsm12B:
- MOVL CX, DI
- SUBL BX, DI
+ MOVL DX, R8
+ SUBL SI, R8
// Check if repeat
- MOVL DI, 16(SP)
- MOVL 12(SP), BX
- CMPL BX, SI
+ MOVL R8, 16(SP)
+ MOVL 12(SP), SI
+ CMPL SI, DI
JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm12B
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(BX*1), R9
- SUBL BX, R8
- LEAL -1(R8), BX
- CMPL BX, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(SI*1), R10
+ SUBL SI, R9
+ LEAL -1(R9), SI
+ CMPL SI, $0x3c
JB one_byte_match_emit_encodeSnappyBetterBlockAsm12B
- CMPL BX, $0x00000100
+ CMPL SI, $0x00000100
JB two_bytes_match_emit_encodeSnappyBetterBlockAsm12B
JB three_bytes_match_emit_encodeSnappyBetterBlockAsm12B
three_bytes_match_emit_encodeSnappyBetterBlockAsm12B:
- MOVB $0xf4, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm12B
two_bytes_match_emit_encodeSnappyBetterBlockAsm12B:
- MOVB $0xf0, (AX)
- MOVB BL, 1(AX)
- ADDQ $0x02, AX
- CMPL BX, $0x40
+ MOVB $0xf0, (CX)
+ MOVB SI, 1(CX)
+ ADDQ $0x02, CX
+ CMPL SI, $0x40
JB memmove_match_emit_encodeSnappyBetterBlockAsm12B
JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm12B
one_byte_match_emit_encodeSnappyBetterBlockAsm12B:
- SHLB $0x02, BL
- MOVB BL, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, SI
+ MOVB SI, (CX)
+ ADDQ $0x01, CX
memmove_match_emit_encodeSnappyBetterBlockAsm12B:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveShort
- CMPQ R8, $0x08
+ CMPQ R9, $0x08
JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_8
- CMPQ R8, $0x10
+ CMPQ R9, $0x10
JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ R9, $0x20
JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_33through64
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_8:
- MOVQ (R9), R10
- MOVQ R10, (AX)
+ MOVQ (R10), R11
+ MOVQ R11, (CX)
JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm12B
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_8through16:
- MOVQ (R9), R10
- MOVQ -8(R9)(R8*1), R9
- MOVQ R10, (AX)
- MOVQ R9, -8(AX)(R8*1)
+ MOVQ (R10), R11
+ MOVQ -8(R10)(R9*1), R10
+ MOVQ R11, (CX)
+ MOVQ R10, -8(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm12B
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_17through32:
- MOVOU (R9), X0
- MOVOU -16(R9)(R8*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU -16(R10)(R9*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm12B
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_33through64:
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm12B:
- MOVQ BX, AX
+ MOVQ SI, CX
JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm12B
memmove_long_match_emit_encodeSnappyBetterBlockAsm12B:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveLong
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVQ R8, R12
- SHRQ $0x05, R12
- MOVQ AX, R10
- ANDL $0x0000001f, R10
- MOVQ $0x00000040, R13
- SUBQ R10, R13
- DECQ R12
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVQ R9, R13
+ SHRQ $0x05, R13
+ MOVQ CX, R11
+ ANDL $0x0000001f, R11
+ MOVQ $0x00000040, R14
+ SUBQ R11, R14
+ DECQ R13
JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32
- LEAQ -32(R9)(R13*1), R10
- LEAQ -32(AX)(R13*1), R14
+ LEAQ -32(R10)(R14*1), R11
+ LEAQ -32(CX)(R14*1), R15
emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_big_loop_back:
- MOVOU (R10), X4
- MOVOU 16(R10), X5
- MOVOA X4, (R14)
- MOVOA X5, 16(R14)
+ MOVOU (R11), X4
+ MOVOU 16(R11), X5
+ MOVOA X4, (R15)
+ MOVOA X5, 16(R15)
+ ADDQ $0x20, R15
+ ADDQ $0x20, R11
ADDQ $0x20, R14
- ADDQ $0x20, R10
- ADDQ $0x20, R13
- DECQ R12
+ DECQ R13
JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_big_loop_back
emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32:
- MOVOU -32(R9)(R13*1), X4
- MOVOU -16(R9)(R13*1), X5
- MOVOA X4, -32(AX)(R13*1)
- MOVOA X5, -16(AX)(R13*1)
- ADDQ $0x20, R13
- CMPQ R8, R13
+ MOVOU -32(R10)(R14*1), X4
+ MOVOU -16(R10)(R14*1), X5
+ MOVOA X4, -32(CX)(R14*1)
+ MOVOA X5, -16(CX)(R14*1)
+ ADDQ $0x20, R14
+ CMPQ R9, R14
JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ BX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
+ MOVQ SI, CX
emit_literal_done_match_emit_encodeSnappyBetterBlockAsm12B:
- ADDL R11, CX
- ADDL $0x04, R11
- MOVL CX, 12(SP)
+ ADDL R12, DX
+ ADDL $0x04, R12
+ MOVL DX, 12(SP)
// emitCopy
two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm12B:
- CMPL R11, $0x40
+ CMPL R12, $0x40
JBE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm12B
- MOVB $0xee, (AX)
- MOVW DI, 1(AX)
- LEAL -60(R11), R11
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW R8, 1(CX)
+ LEAL -60(R12), R12
+ ADDQ $0x03, CX
JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm12B
two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm12B:
- MOVL R11, BX
- SHLL $0x02, BX
- CMPL R11, $0x0c
+ MOVL R12, SI
+ SHLL $0x02, SI
+ CMPL R12, $0x0c
JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm12B
- CMPL DI, $0x00000800
+ CMPL R8, $0x00000800
JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm12B
- LEAL -15(BX), BX
- MOVB DI, 1(AX)
- SHRL $0x08, DI
- SHLL $0x05, DI
- ORL DI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
+ LEAL -15(SI), SI
+ MOVB R8, 1(CX)
+ SHRL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm12B
emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm12B:
- LEAL -2(BX), BX
- MOVB BL, (AX)
- MOVW DI, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(SI), SI
+ MOVB SI, (CX)
+ MOVW R8, 1(CX)
+ ADDQ $0x03, CX
match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm12B:
- CMPL CX, 8(SP)
+ CMPL DX, 8(SP)
JAE emit_remainder_encodeSnappyBetterBlockAsm12B
- CMPQ AX, (SP)
+ CMPQ CX, (SP)
JB match_nolit_dst_ok_encodeSnappyBetterBlockAsm12B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_nolit_dst_ok_encodeSnappyBetterBlockAsm12B:
- MOVQ $0x0000cf1bbcdcbf9b, BX
- MOVQ $0x9e3779b1, DI
- LEAQ 1(SI), SI
- LEAQ -2(CX), R8
- MOVQ (DX)(SI*1), R9
- MOVQ 1(DX)(SI*1), R10
- MOVQ (DX)(R8*1), R11
- MOVQ 1(DX)(R8*1), R12
- SHLQ $0x10, R9
- IMULQ BX, R9
- SHRQ $0x32, R9
- SHLQ $0x20, R10
- IMULQ DI, R10
- SHRQ $0x34, R10
- SHLQ $0x10, R11
- IMULQ BX, R11
- SHRQ $0x32, R11
- SHLQ $0x20, R12
- IMULQ DI, R12
- SHRQ $0x34, R12
- LEAQ 1(SI), DI
- LEAQ 1(R8), R13
- MOVL SI, 24(SP)(R9*4)
- MOVL R8, 24(SP)(R11*4)
- MOVL DI, 65560(SP)(R10*4)
- MOVL R13, 65560(SP)(R12*4)
- LEAQ 1(R8)(SI*1), DI
- SHRQ $0x01, DI
- ADDQ $0x01, SI
- SUBQ $0x01, R8
+ MOVQ $0x0000cf1bbcdcbf9b, SI
+ MOVQ $0x9e3779b1, R8
+ LEAQ 1(DI), DI
+ LEAQ -2(DX), R9
+ MOVQ (BX)(DI*1), R10
+ MOVQ 1(BX)(DI*1), R11
+ MOVQ (BX)(R9*1), R12
+ MOVQ 1(BX)(R9*1), R13
+ SHLQ $0x10, R10
+ IMULQ SI, R10
+ SHRQ $0x32, R10
+ SHLQ $0x20, R11
+ IMULQ R8, R11
+ SHRQ $0x34, R11
+ SHLQ $0x10, R12
+ IMULQ SI, R12
+ SHRQ $0x32, R12
+ SHLQ $0x20, R13
+ IMULQ R8, R13
+ SHRQ $0x34, R13
+ LEAQ 1(DI), R8
+ LEAQ 1(R9), R14
+ MOVL DI, (AX)(R10*4)
+ MOVL R9, (AX)(R12*4)
+ MOVL R8, 65536(AX)(R11*4)
+ MOVL R14, 65536(AX)(R13*4)
+ LEAQ 1(R9)(DI*1), R8
+ SHRQ $0x01, R8
+ ADDQ $0x01, DI
+ SUBQ $0x01, R9
index_loop_encodeSnappyBetterBlockAsm12B:
- CMPQ DI, R8
+ CMPQ R8, R9
JAE search_loop_encodeSnappyBetterBlockAsm12B
- MOVQ (DX)(SI*1), R9
- MOVQ (DX)(DI*1), R10
- SHLQ $0x10, R9
- IMULQ BX, R9
- SHRQ $0x32, R9
+ MOVQ (BX)(DI*1), R10
+ MOVQ (BX)(R8*1), R11
SHLQ $0x10, R10
- IMULQ BX, R10
+ IMULQ SI, R10
SHRQ $0x32, R10
- MOVL SI, 24(SP)(R9*4)
- MOVL DI, 24(SP)(R10*4)
- ADDQ $0x02, SI
+ SHLQ $0x10, R11
+ IMULQ SI, R11
+ SHRQ $0x32, R11
+ MOVL DI, (AX)(R10*4)
+ MOVL R8, (AX)(R11*4)
ADDQ $0x02, DI
+ ADDQ $0x02, R8
JMP index_loop_encodeSnappyBetterBlockAsm12B
emit_remainder_encodeSnappyBetterBlockAsm12B:
- MOVQ src_len+32(FP), CX
- SUBL 12(SP), CX
- LEAQ 3(AX)(CX*1), CX
- CMPQ CX, (SP)
+ MOVQ src_len+32(FP), AX
+ SUBL 12(SP), AX
+ LEAQ 3(CX)(AX*1), AX
+ CMPQ AX, (SP)
JB emit_remainder_ok_encodeSnappyBetterBlockAsm12B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
emit_remainder_ok_encodeSnappyBetterBlockAsm12B:
- MOVQ src_len+32(FP), CX
- MOVL 12(SP), BX
- CMPL BX, CX
+ MOVQ src_len+32(FP), AX
+ MOVL 12(SP), DX
+ CMPL DX, AX
JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm12B
- MOVL CX, SI
- MOVL CX, 12(SP)
- LEAQ (DX)(BX*1), CX
- SUBL BX, SI
+ MOVL AX, SI
+ MOVL AX, 12(SP)
+ LEAQ (BX)(DX*1), AX
+ SUBL DX, SI
LEAL -1(SI), DX
CMPL DX, $0x3c
JB one_byte_emit_remainder_encodeSnappyBetterBlockAsm12B
@@ -16660,26 +16678,26 @@ emit_remainder_ok_encodeSnappyBetterBlockAsm12B:
JB three_bytes_emit_remainder_encodeSnappyBetterBlockAsm12B
three_bytes_emit_remainder_encodeSnappyBetterBlockAsm12B:
- MOVB $0xf4, (AX)
- MOVW DX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW DX, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12B
two_bytes_emit_remainder_encodeSnappyBetterBlockAsm12B:
- MOVB $0xf0, (AX)
- MOVB DL, 1(AX)
- ADDQ $0x02, AX
+ MOVB $0xf0, (CX)
+ MOVB DL, 1(CX)
+ ADDQ $0x02, CX
CMPL DX, $0x40
JB memmove_emit_remainder_encodeSnappyBetterBlockAsm12B
JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12B
one_byte_emit_remainder_encodeSnappyBetterBlockAsm12B:
SHLB $0x02, DL
- MOVB DL, (AX)
- ADDQ $0x01, AX
+ MOVB DL, (CX)
+ ADDQ $0x01, CX
memmove_emit_remainder_encodeSnappyBetterBlockAsm12B:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveShort
@@ -16695,73 +16713,73 @@ memmove_emit_remainder_encodeSnappyBetterBlockAsm12B:
JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_33through64
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_1or2:
- MOVB (CX), SI
- MOVB -1(CX)(BX*1), CL
- MOVB SI, (AX)
- MOVB CL, -1(AX)(BX*1)
+ MOVB (AX), SI
+ MOVB -1(AX)(BX*1), AL
+ MOVB SI, (CX)
+ MOVB AL, -1(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_3:
- MOVW (CX), SI
- MOVB 2(CX), CL
- MOVW SI, (AX)
- MOVB CL, 2(AX)
+ MOVW (AX), SI
+ MOVB 2(AX), AL
+ MOVW SI, (CX)
+ MOVB AL, 2(CX)
JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_4through7:
- MOVL (CX), SI
- MOVL -4(CX)(BX*1), CX
- MOVL SI, (AX)
- MOVL CX, -4(AX)(BX*1)
+ MOVL (AX), SI
+ MOVL -4(AX)(BX*1), AX
+ MOVL SI, (CX)
+ MOVL AX, -4(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_8through16:
- MOVQ (CX), SI
- MOVQ -8(CX)(BX*1), CX
- MOVQ SI, (AX)
- MOVQ CX, -8(AX)(BX*1)
+ MOVQ (AX), SI
+ MOVQ -8(AX)(BX*1), AX
+ MOVQ SI, (CX)
+ MOVQ AX, -8(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_17through32:
- MOVOU (CX), X0
- MOVOU -16(CX)(BX*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(BX*1)
- JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B
-
-emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_33through64:
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU -16(AX)(BX*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_33through64:
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B:
- MOVQ DX, AX
+ MOVQ DX, CX
JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm12B
memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12B:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveLong
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
MOVQ BX, DI
SHRQ $0x05, DI
- MOVQ AX, SI
+ MOVQ CX, SI
ANDL $0x0000001f, SI
MOVQ $0x00000040, R8
SUBQ SI, R8
DECQ DI
JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32
- LEAQ -32(CX)(R8*1), SI
- LEAQ -32(AX)(R8*1), R9
+ LEAQ -32(AX)(R8*1), SI
+ LEAQ -32(CX)(R8*1), R9
emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_big_loop_back:
MOVOU (SI), X4
@@ -16775,463 +16793,464 @@ emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_big_loop
JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_big_loop_back
emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32:
- MOVOU -32(CX)(R8*1), X4
- MOVOU -16(CX)(R8*1), X5
- MOVOA X4, -32(AX)(R8*1)
- MOVOA X5, -16(AX)(R8*1)
+ MOVOU -32(AX)(R8*1), X4
+ MOVOU -16(AX)(R8*1), X5
+ MOVOA X4, -32(CX)(R8*1)
+ MOVOA X5, -16(CX)(R8*1)
ADDQ $0x20, R8
CMPQ BX, R8
JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
- MOVQ DX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
+ MOVQ DX, CX
emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm12B:
- MOVQ dst_base+0(FP), CX
- SUBQ CX, AX
- MOVQ AX, ret+48(FP)
+ MOVQ dst_base+0(FP), AX
+ SUBQ AX, CX
+ MOVQ CX, ret+56(FP)
RET
-// func encodeSnappyBetterBlockAsm10B(dst []byte, src []byte) int
+// func encodeSnappyBetterBlockAsm10B(dst []byte, src []byte, tmp *[20480]byte) int
// Requires: BMI, SSE2
-TEXT ·encodeSnappyBetterBlockAsm10B(SB), $20504-56
- MOVQ dst_base+0(FP), AX
- MOVQ $0x000000a0, CX
- LEAQ 24(SP), DX
+TEXT ·encodeSnappyBetterBlockAsm10B(SB), $24-64
+ MOVQ tmp+48(FP), AX
+ MOVQ dst_base+0(FP), CX
+ MOVQ $0x000000a0, DX
+ MOVQ AX, BX
PXOR X0, X0
zero_loop_encodeSnappyBetterBlockAsm10B:
- MOVOU X0, (DX)
- MOVOU X0, 16(DX)
- MOVOU X0, 32(DX)
- MOVOU X0, 48(DX)
- MOVOU X0, 64(DX)
- MOVOU X0, 80(DX)
- MOVOU X0, 96(DX)
- MOVOU X0, 112(DX)
- ADDQ $0x80, DX
- DECQ CX
+ MOVOU X0, (BX)
+ MOVOU X0, 16(BX)
+ MOVOU X0, 32(BX)
+ MOVOU X0, 48(BX)
+ MOVOU X0, 64(BX)
+ MOVOU X0, 80(BX)
+ MOVOU X0, 96(BX)
+ MOVOU X0, 112(BX)
+ ADDQ $0x80, BX
+ DECQ DX
JNZ zero_loop_encodeSnappyBetterBlockAsm10B
MOVL $0x00000000, 12(SP)
- MOVQ src_len+32(FP), CX
- LEAQ -9(CX), DX
- LEAQ -8(CX), BX
- MOVL BX, 8(SP)
- SHRQ $0x05, CX
- SUBL CX, DX
- LEAQ (AX)(DX*1), DX
- MOVQ DX, (SP)
- MOVL $0x00000001, CX
+ MOVQ src_len+32(FP), DX
+ LEAQ -9(DX), BX
+ LEAQ -8(DX), SI
+ MOVL SI, 8(SP)
+ SHRQ $0x05, DX
+ SUBL DX, BX
+ LEAQ (CX)(BX*1), BX
+ MOVQ BX, (SP)
+ MOVL $0x00000001, DX
MOVL $0x00000000, 16(SP)
- MOVQ src_base+24(FP), DX
+ MOVQ src_base+24(FP), BX
search_loop_encodeSnappyBetterBlockAsm10B:
- MOVL CX, BX
- SUBL 12(SP), BX
- SHRL $0x05, BX
- LEAL 1(CX)(BX*1), BX
- CMPL BX, 8(SP)
+ MOVL DX, SI
+ SUBL 12(SP), SI
+ SHRL $0x05, SI
+ LEAL 1(DX)(SI*1), SI
+ CMPL SI, 8(SP)
JAE emit_remainder_encodeSnappyBetterBlockAsm10B
- MOVQ (DX)(CX*1), SI
- MOVL BX, 20(SP)
- MOVQ $0x0000cf1bbcdcbf9b, R8
- MOVQ $0x9e3779b1, BX
- MOVQ SI, R9
- MOVQ SI, R10
- SHLQ $0x10, R9
- IMULQ R8, R9
- SHRQ $0x34, R9
- SHLQ $0x20, R10
- IMULQ BX, R10
- SHRQ $0x36, R10
- MOVL 24(SP)(R9*4), BX
- MOVL 16408(SP)(R10*4), DI
- MOVL CX, 24(SP)(R9*4)
- MOVL CX, 16408(SP)(R10*4)
- MOVQ (DX)(BX*1), R9
- MOVQ (DX)(DI*1), R10
- CMPQ R9, SI
+ MOVQ (BX)(DX*1), DI
+ MOVL SI, 20(SP)
+ MOVQ $0x0000cf1bbcdcbf9b, R9
+ MOVQ $0x9e3779b1, SI
+ MOVQ DI, R10
+ MOVQ DI, R11
+ SHLQ $0x10, R10
+ IMULQ R9, R10
+ SHRQ $0x34, R10
+ SHLQ $0x20, R11
+ IMULQ SI, R11
+ SHRQ $0x36, R11
+ MOVL (AX)(R10*4), SI
+ MOVL 16384(AX)(R11*4), R8
+ MOVL DX, (AX)(R10*4)
+ MOVL DX, 16384(AX)(R11*4)
+ MOVQ (BX)(SI*1), R10
+ MOVQ (BX)(R8*1), R11
+ CMPQ R10, DI
JEQ candidate_match_encodeSnappyBetterBlockAsm10B
- CMPQ R10, SI
+ CMPQ R11, DI
JNE no_short_found_encodeSnappyBetterBlockAsm10B
- MOVL DI, BX
+ MOVL R8, SI
JMP candidate_match_encodeSnappyBetterBlockAsm10B
no_short_found_encodeSnappyBetterBlockAsm10B:
- CMPL R9, SI
+ CMPL R10, DI
JEQ candidate_match_encodeSnappyBetterBlockAsm10B
- CMPL R10, SI
+ CMPL R11, DI
JEQ candidateS_match_encodeSnappyBetterBlockAsm10B
- MOVL 20(SP), CX
+ MOVL 20(SP), DX
JMP search_loop_encodeSnappyBetterBlockAsm10B
candidateS_match_encodeSnappyBetterBlockAsm10B:
- SHRQ $0x08, SI
- MOVQ SI, R9
- SHLQ $0x10, R9
- IMULQ R8, R9
- SHRQ $0x34, R9
- MOVL 24(SP)(R9*4), BX
- INCL CX
- MOVL CX, 24(SP)(R9*4)
- CMPL (DX)(BX*1), SI
+ SHRQ $0x08, DI
+ MOVQ DI, R10
+ SHLQ $0x10, R10
+ IMULQ R9, R10
+ SHRQ $0x34, R10
+ MOVL (AX)(R10*4), SI
+ INCL DX
+ MOVL DX, (AX)(R10*4)
+ CMPL (BX)(SI*1), DI
JEQ candidate_match_encodeSnappyBetterBlockAsm10B
- DECL CX
- MOVL DI, BX
+ DECL DX
+ MOVL R8, SI
candidate_match_encodeSnappyBetterBlockAsm10B:
- MOVL 12(SP), SI
- TESTL BX, BX
+ MOVL 12(SP), DI
+ TESTL SI, SI
JZ match_extend_back_end_encodeSnappyBetterBlockAsm10B
match_extend_back_loop_encodeSnappyBetterBlockAsm10B:
- CMPL CX, SI
+ CMPL DX, DI
JBE match_extend_back_end_encodeSnappyBetterBlockAsm10B
- MOVB -1(DX)(BX*1), DI
- MOVB -1(DX)(CX*1), R8
- CMPB DI, R8
+ MOVB -1(BX)(SI*1), R8
+ MOVB -1(BX)(DX*1), R9
+ CMPB R8, R9
JNE match_extend_back_end_encodeSnappyBetterBlockAsm10B
- LEAL -1(CX), CX
- DECL BX
+ LEAL -1(DX), DX
+ DECL SI
JZ match_extend_back_end_encodeSnappyBetterBlockAsm10B
JMP match_extend_back_loop_encodeSnappyBetterBlockAsm10B
match_extend_back_end_encodeSnappyBetterBlockAsm10B:
- MOVL CX, SI
- SUBL 12(SP), SI
- LEAQ 3(AX)(SI*1), SI
- CMPQ SI, (SP)
+ MOVL DX, DI
+ SUBL 12(SP), DI
+ LEAQ 3(CX)(DI*1), DI
+ CMPQ DI, (SP)
JB match_dst_size_check_encodeSnappyBetterBlockAsm10B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_dst_size_check_encodeSnappyBetterBlockAsm10B:
- MOVL CX, SI
- ADDL $0x04, CX
- ADDL $0x04, BX
- MOVQ src_len+32(FP), DI
- SUBL CX, DI
- LEAQ (DX)(CX*1), R8
- LEAQ (DX)(BX*1), R9
+ MOVL DX, DI
+ ADDL $0x04, DX
+ ADDL $0x04, SI
+ MOVQ src_len+32(FP), R8
+ SUBL DX, R8
+ LEAQ (BX)(DX*1), R9
+ LEAQ (BX)(SI*1), R10
// matchLen
- XORL R11, R11
+ XORL R12, R12
matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm10B:
- CMPL DI, $0x10
+ CMPL R8, $0x10
JB matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm10B
- MOVQ (R8)(R11*1), R10
- MOVQ 8(R8)(R11*1), R12
- XORQ (R9)(R11*1), R10
+ MOVQ (R9)(R12*1), R11
+ MOVQ 8(R9)(R12*1), R13
+ XORQ (R10)(R12*1), R11
JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm10B
- XORQ 8(R9)(R11*1), R12
+ XORQ 8(R10)(R12*1), R13
JNZ matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm10B
- LEAL -16(DI), DI
- LEAL 16(R11), R11
+ LEAL -16(R8), R8
+ LEAL 16(R12), R12
JMP matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm10B
matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm10B:
#ifdef GOAMD64_v3
- TZCNTQ R12, R12
+ TZCNTQ R13, R13
#else
- BSFQ R12, R12
+ BSFQ R13, R13
#endif
- SARQ $0x03, R12
- LEAL 8(R11)(R12*1), R11
+ SARQ $0x03, R13
+ LEAL 8(R12)(R13*1), R12
JMP match_nolit_end_encodeSnappyBetterBlockAsm10B
matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm10B:
- CMPL DI, $0x08
+ CMPL R8, $0x08
JB matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm10B
- MOVQ (R8)(R11*1), R10
- XORQ (R9)(R11*1), R10
+ MOVQ (R9)(R12*1), R11
+ XORQ (R10)(R12*1), R11
JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm10B
- LEAL -8(DI), DI
- LEAL 8(R11), R11
+ LEAL -8(R8), R8
+ LEAL 8(R12), R12
JMP matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm10B
matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm10B:
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R11, R11
#else
- BSFQ R10, R10
+ BSFQ R11, R11
#endif
- SARQ $0x03, R10
- LEAL (R11)(R10*1), R11
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
JMP match_nolit_end_encodeSnappyBetterBlockAsm10B
matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm10B:
- CMPL DI, $0x04
+ CMPL R8, $0x04
JB matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm10B
- MOVL (R8)(R11*1), R10
- CMPL (R9)(R11*1), R10
+ MOVL (R9)(R12*1), R11
+ CMPL (R10)(R12*1), R11
JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm10B
- LEAL -4(DI), DI
- LEAL 4(R11), R11
+ LEAL -4(R8), R8
+ LEAL 4(R12), R12
matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm10B:
- CMPL DI, $0x01
+ CMPL R8, $0x01
JE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm10B
JB match_nolit_end_encodeSnappyBetterBlockAsm10B
- MOVW (R8)(R11*1), R10
- CMPW (R9)(R11*1), R10
+ MOVW (R9)(R12*1), R11
+ CMPW (R10)(R12*1), R11
JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm10B
- LEAL 2(R11), R11
- SUBL $0x02, DI
+ LEAL 2(R12), R12
+ SUBL $0x02, R8
JZ match_nolit_end_encodeSnappyBetterBlockAsm10B
matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm10B:
- MOVB (R8)(R11*1), R10
- CMPB (R9)(R11*1), R10
+ MOVB (R9)(R12*1), R11
+ CMPB (R10)(R12*1), R11
JNE match_nolit_end_encodeSnappyBetterBlockAsm10B
- LEAL 1(R11), R11
+ LEAL 1(R12), R12
match_nolit_end_encodeSnappyBetterBlockAsm10B:
- MOVL CX, DI
- SUBL BX, DI
+ MOVL DX, R8
+ SUBL SI, R8
// Check if repeat
- MOVL DI, 16(SP)
- MOVL 12(SP), BX
- CMPL BX, SI
+ MOVL R8, 16(SP)
+ MOVL 12(SP), SI
+ CMPL SI, DI
JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm10B
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(BX*1), R9
- SUBL BX, R8
- LEAL -1(R8), BX
- CMPL BX, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(SI*1), R10
+ SUBL SI, R9
+ LEAL -1(R9), SI
+ CMPL SI, $0x3c
JB one_byte_match_emit_encodeSnappyBetterBlockAsm10B
- CMPL BX, $0x00000100
+ CMPL SI, $0x00000100
JB two_bytes_match_emit_encodeSnappyBetterBlockAsm10B
JB three_bytes_match_emit_encodeSnappyBetterBlockAsm10B
three_bytes_match_emit_encodeSnappyBetterBlockAsm10B:
- MOVB $0xf4, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm10B
two_bytes_match_emit_encodeSnappyBetterBlockAsm10B:
- MOVB $0xf0, (AX)
- MOVB BL, 1(AX)
- ADDQ $0x02, AX
- CMPL BX, $0x40
+ MOVB $0xf0, (CX)
+ MOVB SI, 1(CX)
+ ADDQ $0x02, CX
+ CMPL SI, $0x40
JB memmove_match_emit_encodeSnappyBetterBlockAsm10B
JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm10B
one_byte_match_emit_encodeSnappyBetterBlockAsm10B:
- SHLB $0x02, BL
- MOVB BL, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, SI
+ MOVB SI, (CX)
+ ADDQ $0x01, CX
memmove_match_emit_encodeSnappyBetterBlockAsm10B:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveShort
- CMPQ R8, $0x08
+ CMPQ R9, $0x08
JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_8
- CMPQ R8, $0x10
+ CMPQ R9, $0x10
JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ R9, $0x20
JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_33through64
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_8:
- MOVQ (R9), R10
- MOVQ R10, (AX)
+ MOVQ (R10), R11
+ MOVQ R11, (CX)
JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm10B
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_8through16:
- MOVQ (R9), R10
- MOVQ -8(R9)(R8*1), R9
- MOVQ R10, (AX)
- MOVQ R9, -8(AX)(R8*1)
+ MOVQ (R10), R11
+ MOVQ -8(R10)(R9*1), R10
+ MOVQ R11, (CX)
+ MOVQ R10, -8(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm10B
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_17through32:
- MOVOU (R9), X0
- MOVOU -16(R9)(R8*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU -16(R10)(R9*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm10B
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_33through64:
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm10B:
- MOVQ BX, AX
+ MOVQ SI, CX
JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm10B
memmove_long_match_emit_encodeSnappyBetterBlockAsm10B:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveLong
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVQ R8, R12
- SHRQ $0x05, R12
- MOVQ AX, R10
- ANDL $0x0000001f, R10
- MOVQ $0x00000040, R13
- SUBQ R10, R13
- DECQ R12
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVQ R9, R13
+ SHRQ $0x05, R13
+ MOVQ CX, R11
+ ANDL $0x0000001f, R11
+ MOVQ $0x00000040, R14
+ SUBQ R11, R14
+ DECQ R13
JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32
- LEAQ -32(R9)(R13*1), R10
- LEAQ -32(AX)(R13*1), R14
+ LEAQ -32(R10)(R14*1), R11
+ LEAQ -32(CX)(R14*1), R15
emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_big_loop_back:
- MOVOU (R10), X4
- MOVOU 16(R10), X5
- MOVOA X4, (R14)
- MOVOA X5, 16(R14)
+ MOVOU (R11), X4
+ MOVOU 16(R11), X5
+ MOVOA X4, (R15)
+ MOVOA X5, 16(R15)
+ ADDQ $0x20, R15
+ ADDQ $0x20, R11
ADDQ $0x20, R14
- ADDQ $0x20, R10
- ADDQ $0x20, R13
- DECQ R12
+ DECQ R13
JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_big_loop_back
emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32:
- MOVOU -32(R9)(R13*1), X4
- MOVOU -16(R9)(R13*1), X5
- MOVOA X4, -32(AX)(R13*1)
- MOVOA X5, -16(AX)(R13*1)
- ADDQ $0x20, R13
- CMPQ R8, R13
+ MOVOU -32(R10)(R14*1), X4
+ MOVOU -16(R10)(R14*1), X5
+ MOVOA X4, -32(CX)(R14*1)
+ MOVOA X5, -16(CX)(R14*1)
+ ADDQ $0x20, R14
+ CMPQ R9, R14
JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ BX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
+ MOVQ SI, CX
emit_literal_done_match_emit_encodeSnappyBetterBlockAsm10B:
- ADDL R11, CX
- ADDL $0x04, R11
- MOVL CX, 12(SP)
+ ADDL R12, DX
+ ADDL $0x04, R12
+ MOVL DX, 12(SP)
// emitCopy
two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm10B:
- CMPL R11, $0x40
+ CMPL R12, $0x40
JBE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm10B
- MOVB $0xee, (AX)
- MOVW DI, 1(AX)
- LEAL -60(R11), R11
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW R8, 1(CX)
+ LEAL -60(R12), R12
+ ADDQ $0x03, CX
JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm10B
two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm10B:
- MOVL R11, BX
- SHLL $0x02, BX
- CMPL R11, $0x0c
+ MOVL R12, SI
+ SHLL $0x02, SI
+ CMPL R12, $0x0c
JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm10B
- CMPL DI, $0x00000800
+ CMPL R8, $0x00000800
JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm10B
- LEAL -15(BX), BX
- MOVB DI, 1(AX)
- SHRL $0x08, DI
- SHLL $0x05, DI
- ORL DI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
+ LEAL -15(SI), SI
+ MOVB R8, 1(CX)
+ SHRL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm10B
emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm10B:
- LEAL -2(BX), BX
- MOVB BL, (AX)
- MOVW DI, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(SI), SI
+ MOVB SI, (CX)
+ MOVW R8, 1(CX)
+ ADDQ $0x03, CX
match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm10B:
- CMPL CX, 8(SP)
+ CMPL DX, 8(SP)
JAE emit_remainder_encodeSnappyBetterBlockAsm10B
- CMPQ AX, (SP)
+ CMPQ CX, (SP)
JB match_nolit_dst_ok_encodeSnappyBetterBlockAsm10B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_nolit_dst_ok_encodeSnappyBetterBlockAsm10B:
- MOVQ $0x0000cf1bbcdcbf9b, BX
- MOVQ $0x9e3779b1, DI
- LEAQ 1(SI), SI
- LEAQ -2(CX), R8
- MOVQ (DX)(SI*1), R9
- MOVQ 1(DX)(SI*1), R10
- MOVQ (DX)(R8*1), R11
- MOVQ 1(DX)(R8*1), R12
- SHLQ $0x10, R9
- IMULQ BX, R9
- SHRQ $0x34, R9
- SHLQ $0x20, R10
- IMULQ DI, R10
- SHRQ $0x36, R10
- SHLQ $0x10, R11
- IMULQ BX, R11
- SHRQ $0x34, R11
- SHLQ $0x20, R12
- IMULQ DI, R12
- SHRQ $0x36, R12
- LEAQ 1(SI), DI
- LEAQ 1(R8), R13
- MOVL SI, 24(SP)(R9*4)
- MOVL R8, 24(SP)(R11*4)
- MOVL DI, 16408(SP)(R10*4)
- MOVL R13, 16408(SP)(R12*4)
- LEAQ 1(R8)(SI*1), DI
- SHRQ $0x01, DI
- ADDQ $0x01, SI
- SUBQ $0x01, R8
+ MOVQ $0x0000cf1bbcdcbf9b, SI
+ MOVQ $0x9e3779b1, R8
+ LEAQ 1(DI), DI
+ LEAQ -2(DX), R9
+ MOVQ (BX)(DI*1), R10
+ MOVQ 1(BX)(DI*1), R11
+ MOVQ (BX)(R9*1), R12
+ MOVQ 1(BX)(R9*1), R13
+ SHLQ $0x10, R10
+ IMULQ SI, R10
+ SHRQ $0x34, R10
+ SHLQ $0x20, R11
+ IMULQ R8, R11
+ SHRQ $0x36, R11
+ SHLQ $0x10, R12
+ IMULQ SI, R12
+ SHRQ $0x34, R12
+ SHLQ $0x20, R13
+ IMULQ R8, R13
+ SHRQ $0x36, R13
+ LEAQ 1(DI), R8
+ LEAQ 1(R9), R14
+ MOVL DI, (AX)(R10*4)
+ MOVL R9, (AX)(R12*4)
+ MOVL R8, 16384(AX)(R11*4)
+ MOVL R14, 16384(AX)(R13*4)
+ LEAQ 1(R9)(DI*1), R8
+ SHRQ $0x01, R8
+ ADDQ $0x01, DI
+ SUBQ $0x01, R9
index_loop_encodeSnappyBetterBlockAsm10B:
- CMPQ DI, R8
+ CMPQ R8, R9
JAE search_loop_encodeSnappyBetterBlockAsm10B
- MOVQ (DX)(SI*1), R9
- MOVQ (DX)(DI*1), R10
- SHLQ $0x10, R9
- IMULQ BX, R9
- SHRQ $0x34, R9
+ MOVQ (BX)(DI*1), R10
+ MOVQ (BX)(R8*1), R11
SHLQ $0x10, R10
- IMULQ BX, R10
+ IMULQ SI, R10
SHRQ $0x34, R10
- MOVL SI, 24(SP)(R9*4)
- MOVL DI, 24(SP)(R10*4)
- ADDQ $0x02, SI
+ SHLQ $0x10, R11
+ IMULQ SI, R11
+ SHRQ $0x34, R11
+ MOVL DI, (AX)(R10*4)
+ MOVL R8, (AX)(R11*4)
ADDQ $0x02, DI
+ ADDQ $0x02, R8
JMP index_loop_encodeSnappyBetterBlockAsm10B
emit_remainder_encodeSnappyBetterBlockAsm10B:
- MOVQ src_len+32(FP), CX
- SUBL 12(SP), CX
- LEAQ 3(AX)(CX*1), CX
- CMPQ CX, (SP)
+ MOVQ src_len+32(FP), AX
+ SUBL 12(SP), AX
+ LEAQ 3(CX)(AX*1), AX
+ CMPQ AX, (SP)
JB emit_remainder_ok_encodeSnappyBetterBlockAsm10B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
emit_remainder_ok_encodeSnappyBetterBlockAsm10B:
- MOVQ src_len+32(FP), CX
- MOVL 12(SP), BX
- CMPL BX, CX
+ MOVQ src_len+32(FP), AX
+ MOVL 12(SP), DX
+ CMPL DX, AX
JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm10B
- MOVL CX, SI
- MOVL CX, 12(SP)
- LEAQ (DX)(BX*1), CX
- SUBL BX, SI
+ MOVL AX, SI
+ MOVL AX, 12(SP)
+ LEAQ (BX)(DX*1), AX
+ SUBL DX, SI
LEAL -1(SI), DX
CMPL DX, $0x3c
JB one_byte_emit_remainder_encodeSnappyBetterBlockAsm10B
@@ -17240,26 +17259,26 @@ emit_remainder_ok_encodeSnappyBetterBlockAsm10B:
JB three_bytes_emit_remainder_encodeSnappyBetterBlockAsm10B
three_bytes_emit_remainder_encodeSnappyBetterBlockAsm10B:
- MOVB $0xf4, (AX)
- MOVW DX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW DX, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10B
two_bytes_emit_remainder_encodeSnappyBetterBlockAsm10B:
- MOVB $0xf0, (AX)
- MOVB DL, 1(AX)
- ADDQ $0x02, AX
+ MOVB $0xf0, (CX)
+ MOVB DL, 1(CX)
+ ADDQ $0x02, CX
CMPL DX, $0x40
JB memmove_emit_remainder_encodeSnappyBetterBlockAsm10B
JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10B
one_byte_emit_remainder_encodeSnappyBetterBlockAsm10B:
SHLB $0x02, DL
- MOVB DL, (AX)
- ADDQ $0x01, AX
+ MOVB DL, (CX)
+ ADDQ $0x01, CX
memmove_emit_remainder_encodeSnappyBetterBlockAsm10B:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveShort
@@ -17275,73 +17294,73 @@ memmove_emit_remainder_encodeSnappyBetterBlockAsm10B:
JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_33through64
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_1or2:
- MOVB (CX), SI
- MOVB -1(CX)(BX*1), CL
- MOVB SI, (AX)
- MOVB CL, -1(AX)(BX*1)
+ MOVB (AX), SI
+ MOVB -1(AX)(BX*1), AL
+ MOVB SI, (CX)
+ MOVB AL, -1(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_3:
- MOVW (CX), SI
- MOVB 2(CX), CL
- MOVW SI, (AX)
- MOVB CL, 2(AX)
+ MOVW (AX), SI
+ MOVB 2(AX), AL
+ MOVW SI, (CX)
+ MOVB AL, 2(CX)
JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_4through7:
- MOVL (CX), SI
- MOVL -4(CX)(BX*1), CX
- MOVL SI, (AX)
- MOVL CX, -4(AX)(BX*1)
+ MOVL (AX), SI
+ MOVL -4(AX)(BX*1), AX
+ MOVL SI, (CX)
+ MOVL AX, -4(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_8through16:
- MOVQ (CX), SI
- MOVQ -8(CX)(BX*1), CX
- MOVQ SI, (AX)
- MOVQ CX, -8(AX)(BX*1)
+ MOVQ (AX), SI
+ MOVQ -8(AX)(BX*1), AX
+ MOVQ SI, (CX)
+ MOVQ AX, -8(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_17through32:
- MOVOU (CX), X0
- MOVOU -16(CX)(BX*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU -16(AX)(BX*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_33through64:
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B:
- MOVQ DX, AX
+ MOVQ DX, CX
JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm10B
memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10B:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveLong
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
MOVQ BX, DI
SHRQ $0x05, DI
- MOVQ AX, SI
+ MOVQ CX, SI
ANDL $0x0000001f, SI
MOVQ $0x00000040, R8
SUBQ SI, R8
DECQ DI
JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32
- LEAQ -32(CX)(R8*1), SI
- LEAQ -32(AX)(R8*1), R9
+ LEAQ -32(AX)(R8*1), SI
+ LEAQ -32(CX)(R8*1), R9
emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_big_loop_back:
MOVOU (SI), X4
@@ -17355,461 +17374,462 @@ emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_big_loop
JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_big_loop_back
emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32:
- MOVOU -32(CX)(R8*1), X4
- MOVOU -16(CX)(R8*1), X5
- MOVOA X4, -32(AX)(R8*1)
- MOVOA X5, -16(AX)(R8*1)
+ MOVOU -32(AX)(R8*1), X4
+ MOVOU -16(AX)(R8*1), X5
+ MOVOA X4, -32(CX)(R8*1)
+ MOVOA X5, -16(CX)(R8*1)
ADDQ $0x20, R8
CMPQ BX, R8
JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
- MOVQ DX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
+ MOVQ DX, CX
emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm10B:
- MOVQ dst_base+0(FP), CX
- SUBQ CX, AX
- MOVQ AX, ret+48(FP)
+ MOVQ dst_base+0(FP), AX
+ SUBQ AX, CX
+ MOVQ CX, ret+56(FP)
RET
-// func encodeSnappyBetterBlockAsm8B(dst []byte, src []byte) int
+// func encodeSnappyBetterBlockAsm8B(dst []byte, src []byte, tmp *[5120]byte) int
// Requires: BMI, SSE2
-TEXT ·encodeSnappyBetterBlockAsm8B(SB), $5144-56
- MOVQ dst_base+0(FP), AX
- MOVQ $0x00000028, CX
- LEAQ 24(SP), DX
+TEXT ·encodeSnappyBetterBlockAsm8B(SB), $24-64
+ MOVQ tmp+48(FP), AX
+ MOVQ dst_base+0(FP), CX
+ MOVQ $0x00000028, DX
+ MOVQ AX, BX
PXOR X0, X0
zero_loop_encodeSnappyBetterBlockAsm8B:
- MOVOU X0, (DX)
- MOVOU X0, 16(DX)
- MOVOU X0, 32(DX)
- MOVOU X0, 48(DX)
- MOVOU X0, 64(DX)
- MOVOU X0, 80(DX)
- MOVOU X0, 96(DX)
- MOVOU X0, 112(DX)
- ADDQ $0x80, DX
- DECQ CX
+ MOVOU X0, (BX)
+ MOVOU X0, 16(BX)
+ MOVOU X0, 32(BX)
+ MOVOU X0, 48(BX)
+ MOVOU X0, 64(BX)
+ MOVOU X0, 80(BX)
+ MOVOU X0, 96(BX)
+ MOVOU X0, 112(BX)
+ ADDQ $0x80, BX
+ DECQ DX
JNZ zero_loop_encodeSnappyBetterBlockAsm8B
MOVL $0x00000000, 12(SP)
- MOVQ src_len+32(FP), CX
- LEAQ -9(CX), DX
- LEAQ -8(CX), BX
- MOVL BX, 8(SP)
- SHRQ $0x05, CX
- SUBL CX, DX
- LEAQ (AX)(DX*1), DX
- MOVQ DX, (SP)
- MOVL $0x00000001, CX
+ MOVQ src_len+32(FP), DX
+ LEAQ -9(DX), BX
+ LEAQ -8(DX), SI
+ MOVL SI, 8(SP)
+ SHRQ $0x05, DX
+ SUBL DX, BX
+ LEAQ (CX)(BX*1), BX
+ MOVQ BX, (SP)
+ MOVL $0x00000001, DX
MOVL $0x00000000, 16(SP)
- MOVQ src_base+24(FP), DX
+ MOVQ src_base+24(FP), BX
search_loop_encodeSnappyBetterBlockAsm8B:
- MOVL CX, BX
- SUBL 12(SP), BX
- SHRL $0x04, BX
- LEAL 1(CX)(BX*1), BX
- CMPL BX, 8(SP)
+ MOVL DX, SI
+ SUBL 12(SP), SI
+ SHRL $0x04, SI
+ LEAL 1(DX)(SI*1), SI
+ CMPL SI, 8(SP)
JAE emit_remainder_encodeSnappyBetterBlockAsm8B
- MOVQ (DX)(CX*1), SI
- MOVL BX, 20(SP)
- MOVQ $0x0000cf1bbcdcbf9b, R8
- MOVQ $0x9e3779b1, BX
- MOVQ SI, R9
- MOVQ SI, R10
- SHLQ $0x10, R9
- IMULQ R8, R9
- SHRQ $0x36, R9
- SHLQ $0x20, R10
- IMULQ BX, R10
- SHRQ $0x38, R10
- MOVL 24(SP)(R9*4), BX
- MOVL 4120(SP)(R10*4), DI
- MOVL CX, 24(SP)(R9*4)
- MOVL CX, 4120(SP)(R10*4)
- MOVQ (DX)(BX*1), R9
- MOVQ (DX)(DI*1), R10
- CMPQ R9, SI
+ MOVQ (BX)(DX*1), DI
+ MOVL SI, 20(SP)
+ MOVQ $0x0000cf1bbcdcbf9b, R9
+ MOVQ $0x9e3779b1, SI
+ MOVQ DI, R10
+ MOVQ DI, R11
+ SHLQ $0x10, R10
+ IMULQ R9, R10
+ SHRQ $0x36, R10
+ SHLQ $0x20, R11
+ IMULQ SI, R11
+ SHRQ $0x38, R11
+ MOVL (AX)(R10*4), SI
+ MOVL 4096(AX)(R11*4), R8
+ MOVL DX, (AX)(R10*4)
+ MOVL DX, 4096(AX)(R11*4)
+ MOVQ (BX)(SI*1), R10
+ MOVQ (BX)(R8*1), R11
+ CMPQ R10, DI
JEQ candidate_match_encodeSnappyBetterBlockAsm8B
- CMPQ R10, SI
+ CMPQ R11, DI
JNE no_short_found_encodeSnappyBetterBlockAsm8B
- MOVL DI, BX
+ MOVL R8, SI
JMP candidate_match_encodeSnappyBetterBlockAsm8B
no_short_found_encodeSnappyBetterBlockAsm8B:
- CMPL R9, SI
+ CMPL R10, DI
JEQ candidate_match_encodeSnappyBetterBlockAsm8B
- CMPL R10, SI
+ CMPL R11, DI
JEQ candidateS_match_encodeSnappyBetterBlockAsm8B
- MOVL 20(SP), CX
+ MOVL 20(SP), DX
JMP search_loop_encodeSnappyBetterBlockAsm8B
candidateS_match_encodeSnappyBetterBlockAsm8B:
- SHRQ $0x08, SI
- MOVQ SI, R9
- SHLQ $0x10, R9
- IMULQ R8, R9
- SHRQ $0x36, R9
- MOVL 24(SP)(R9*4), BX
- INCL CX
- MOVL CX, 24(SP)(R9*4)
- CMPL (DX)(BX*1), SI
+ SHRQ $0x08, DI
+ MOVQ DI, R10
+ SHLQ $0x10, R10
+ IMULQ R9, R10
+ SHRQ $0x36, R10
+ MOVL (AX)(R10*4), SI
+ INCL DX
+ MOVL DX, (AX)(R10*4)
+ CMPL (BX)(SI*1), DI
JEQ candidate_match_encodeSnappyBetterBlockAsm8B
- DECL CX
- MOVL DI, BX
+ DECL DX
+ MOVL R8, SI
candidate_match_encodeSnappyBetterBlockAsm8B:
- MOVL 12(SP), SI
- TESTL BX, BX
+ MOVL 12(SP), DI
+ TESTL SI, SI
JZ match_extend_back_end_encodeSnappyBetterBlockAsm8B
match_extend_back_loop_encodeSnappyBetterBlockAsm8B:
- CMPL CX, SI
+ CMPL DX, DI
JBE match_extend_back_end_encodeSnappyBetterBlockAsm8B
- MOVB -1(DX)(BX*1), DI
- MOVB -1(DX)(CX*1), R8
- CMPB DI, R8
+ MOVB -1(BX)(SI*1), R8
+ MOVB -1(BX)(DX*1), R9
+ CMPB R8, R9
JNE match_extend_back_end_encodeSnappyBetterBlockAsm8B
- LEAL -1(CX), CX
- DECL BX
+ LEAL -1(DX), DX
+ DECL SI
JZ match_extend_back_end_encodeSnappyBetterBlockAsm8B
JMP match_extend_back_loop_encodeSnappyBetterBlockAsm8B
match_extend_back_end_encodeSnappyBetterBlockAsm8B:
- MOVL CX, SI
- SUBL 12(SP), SI
- LEAQ 3(AX)(SI*1), SI
- CMPQ SI, (SP)
+ MOVL DX, DI
+ SUBL 12(SP), DI
+ LEAQ 3(CX)(DI*1), DI
+ CMPQ DI, (SP)
JB match_dst_size_check_encodeSnappyBetterBlockAsm8B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_dst_size_check_encodeSnappyBetterBlockAsm8B:
- MOVL CX, SI
- ADDL $0x04, CX
- ADDL $0x04, BX
- MOVQ src_len+32(FP), DI
- SUBL CX, DI
- LEAQ (DX)(CX*1), R8
- LEAQ (DX)(BX*1), R9
+ MOVL DX, DI
+ ADDL $0x04, DX
+ ADDL $0x04, SI
+ MOVQ src_len+32(FP), R8
+ SUBL DX, R8
+ LEAQ (BX)(DX*1), R9
+ LEAQ (BX)(SI*1), R10
// matchLen
- XORL R11, R11
+ XORL R12, R12
matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm8B:
- CMPL DI, $0x10
+ CMPL R8, $0x10
JB matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm8B
- MOVQ (R8)(R11*1), R10
- MOVQ 8(R8)(R11*1), R12
- XORQ (R9)(R11*1), R10
+ MOVQ (R9)(R12*1), R11
+ MOVQ 8(R9)(R12*1), R13
+ XORQ (R10)(R12*1), R11
JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm8B
- XORQ 8(R9)(R11*1), R12
+ XORQ 8(R10)(R12*1), R13
JNZ matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm8B
- LEAL -16(DI), DI
- LEAL 16(R11), R11
+ LEAL -16(R8), R8
+ LEAL 16(R12), R12
JMP matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm8B
matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm8B:
#ifdef GOAMD64_v3
- TZCNTQ R12, R12
+ TZCNTQ R13, R13
#else
- BSFQ R12, R12
+ BSFQ R13, R13
#endif
- SARQ $0x03, R12
- LEAL 8(R11)(R12*1), R11
+ SARQ $0x03, R13
+ LEAL 8(R12)(R13*1), R12
JMP match_nolit_end_encodeSnappyBetterBlockAsm8B
matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm8B:
- CMPL DI, $0x08
+ CMPL R8, $0x08
JB matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm8B
- MOVQ (R8)(R11*1), R10
- XORQ (R9)(R11*1), R10
+ MOVQ (R9)(R12*1), R11
+ XORQ (R10)(R12*1), R11
JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm8B
- LEAL -8(DI), DI
- LEAL 8(R11), R11
+ LEAL -8(R8), R8
+ LEAL 8(R12), R12
JMP matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm8B
matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm8B:
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R11, R11
#else
- BSFQ R10, R10
+ BSFQ R11, R11
#endif
- SARQ $0x03, R10
- LEAL (R11)(R10*1), R11
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
JMP match_nolit_end_encodeSnappyBetterBlockAsm8B
matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm8B:
- CMPL DI, $0x04
+ CMPL R8, $0x04
JB matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm8B
- MOVL (R8)(R11*1), R10
- CMPL (R9)(R11*1), R10
- JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm8B
- LEAL -4(DI), DI
- LEAL 4(R11), R11
+ MOVL (R9)(R12*1), R11
+ CMPL (R10)(R12*1), R11
+ JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm8B
+ LEAL -4(R8), R8
+ LEAL 4(R12), R12
matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm8B:
- CMPL DI, $0x01
+ CMPL R8, $0x01
JE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm8B
JB match_nolit_end_encodeSnappyBetterBlockAsm8B
- MOVW (R8)(R11*1), R10
- CMPW (R9)(R11*1), R10
+ MOVW (R9)(R12*1), R11
+ CMPW (R10)(R12*1), R11
JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm8B
- LEAL 2(R11), R11
- SUBL $0x02, DI
+ LEAL 2(R12), R12
+ SUBL $0x02, R8
JZ match_nolit_end_encodeSnappyBetterBlockAsm8B
matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm8B:
- MOVB (R8)(R11*1), R10
- CMPB (R9)(R11*1), R10
+ MOVB (R9)(R12*1), R11
+ CMPB (R10)(R12*1), R11
JNE match_nolit_end_encodeSnappyBetterBlockAsm8B
- LEAL 1(R11), R11
+ LEAL 1(R12), R12
match_nolit_end_encodeSnappyBetterBlockAsm8B:
- MOVL CX, DI
- SUBL BX, DI
+ MOVL DX, R8
+ SUBL SI, R8
// Check if repeat
- MOVL DI, 16(SP)
- MOVL 12(SP), BX
- CMPL BX, SI
+ MOVL R8, 16(SP)
+ MOVL 12(SP), SI
+ CMPL SI, DI
JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm8B
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(BX*1), R9
- SUBL BX, R8
- LEAL -1(R8), BX
- CMPL BX, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(SI*1), R10
+ SUBL SI, R9
+ LEAL -1(R9), SI
+ CMPL SI, $0x3c
JB one_byte_match_emit_encodeSnappyBetterBlockAsm8B
- CMPL BX, $0x00000100
+ CMPL SI, $0x00000100
JB two_bytes_match_emit_encodeSnappyBetterBlockAsm8B
JB three_bytes_match_emit_encodeSnappyBetterBlockAsm8B
three_bytes_match_emit_encodeSnappyBetterBlockAsm8B:
- MOVB $0xf4, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm8B
two_bytes_match_emit_encodeSnappyBetterBlockAsm8B:
- MOVB $0xf0, (AX)
- MOVB BL, 1(AX)
- ADDQ $0x02, AX
- CMPL BX, $0x40
+ MOVB $0xf0, (CX)
+ MOVB SI, 1(CX)
+ ADDQ $0x02, CX
+ CMPL SI, $0x40
JB memmove_match_emit_encodeSnappyBetterBlockAsm8B
JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm8B
one_byte_match_emit_encodeSnappyBetterBlockAsm8B:
- SHLB $0x02, BL
- MOVB BL, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, SI
+ MOVB SI, (CX)
+ ADDQ $0x01, CX
memmove_match_emit_encodeSnappyBetterBlockAsm8B:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveShort
- CMPQ R8, $0x08
+ CMPQ R9, $0x08
JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_8
- CMPQ R8, $0x10
+ CMPQ R9, $0x10
JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ R9, $0x20
JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_33through64
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_8:
- MOVQ (R9), R10
- MOVQ R10, (AX)
+ MOVQ (R10), R11
+ MOVQ R11, (CX)
JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm8B
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_8through16:
- MOVQ (R9), R10
- MOVQ -8(R9)(R8*1), R9
- MOVQ R10, (AX)
- MOVQ R9, -8(AX)(R8*1)
+ MOVQ (R10), R11
+ MOVQ -8(R10)(R9*1), R10
+ MOVQ R11, (CX)
+ MOVQ R10, -8(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm8B
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_17through32:
- MOVOU (R9), X0
- MOVOU -16(R9)(R8*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU -16(R10)(R9*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm8B
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_33through64:
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm8B:
- MOVQ BX, AX
+ MOVQ SI, CX
JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm8B
memmove_long_match_emit_encodeSnappyBetterBlockAsm8B:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveLong
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVQ R8, R12
- SHRQ $0x05, R12
- MOVQ AX, R10
- ANDL $0x0000001f, R10
- MOVQ $0x00000040, R13
- SUBQ R10, R13
- DECQ R12
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVQ R9, R13
+ SHRQ $0x05, R13
+ MOVQ CX, R11
+ ANDL $0x0000001f, R11
+ MOVQ $0x00000040, R14
+ SUBQ R11, R14
+ DECQ R13
JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32
- LEAQ -32(R9)(R13*1), R10
- LEAQ -32(AX)(R13*1), R14
+ LEAQ -32(R10)(R14*1), R11
+ LEAQ -32(CX)(R14*1), R15
emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_big_loop_back:
- MOVOU (R10), X4
- MOVOU 16(R10), X5
- MOVOA X4, (R14)
- MOVOA X5, 16(R14)
+ MOVOU (R11), X4
+ MOVOU 16(R11), X5
+ MOVOA X4, (R15)
+ MOVOA X5, 16(R15)
+ ADDQ $0x20, R15
+ ADDQ $0x20, R11
ADDQ $0x20, R14
- ADDQ $0x20, R10
- ADDQ $0x20, R13
- DECQ R12
+ DECQ R13
JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_big_loop_back
emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32:
- MOVOU -32(R9)(R13*1), X4
- MOVOU -16(R9)(R13*1), X5
- MOVOA X4, -32(AX)(R13*1)
- MOVOA X5, -16(AX)(R13*1)
- ADDQ $0x20, R13
- CMPQ R8, R13
+ MOVOU -32(R10)(R14*1), X4
+ MOVOU -16(R10)(R14*1), X5
+ MOVOA X4, -32(CX)(R14*1)
+ MOVOA X5, -16(CX)(R14*1)
+ ADDQ $0x20, R14
+ CMPQ R9, R14
JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ BX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
+ MOVQ SI, CX
emit_literal_done_match_emit_encodeSnappyBetterBlockAsm8B:
- ADDL R11, CX
- ADDL $0x04, R11
- MOVL CX, 12(SP)
+ ADDL R12, DX
+ ADDL $0x04, R12
+ MOVL DX, 12(SP)
// emitCopy
two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm8B:
- CMPL R11, $0x40
+ CMPL R12, $0x40
JBE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm8B
- MOVB $0xee, (AX)
- MOVW DI, 1(AX)
- LEAL -60(R11), R11
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW R8, 1(CX)
+ LEAL -60(R12), R12
+ ADDQ $0x03, CX
JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm8B
two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm8B:
- MOVL R11, BX
- SHLL $0x02, BX
- CMPL R11, $0x0c
+ MOVL R12, SI
+ SHLL $0x02, SI
+ CMPL R12, $0x0c
JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm8B
- LEAL -15(BX), BX
- MOVB DI, 1(AX)
- SHRL $0x08, DI
- SHLL $0x05, DI
- ORL DI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
+ LEAL -15(SI), SI
+ MOVB R8, 1(CX)
+ SHRL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm8B
emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm8B:
- LEAL -2(BX), BX
- MOVB BL, (AX)
- MOVW DI, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(SI), SI
+ MOVB SI, (CX)
+ MOVW R8, 1(CX)
+ ADDQ $0x03, CX
match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm8B:
- CMPL CX, 8(SP)
+ CMPL DX, 8(SP)
JAE emit_remainder_encodeSnappyBetterBlockAsm8B
- CMPQ AX, (SP)
+ CMPQ CX, (SP)
JB match_nolit_dst_ok_encodeSnappyBetterBlockAsm8B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_nolit_dst_ok_encodeSnappyBetterBlockAsm8B:
- MOVQ $0x0000cf1bbcdcbf9b, BX
- MOVQ $0x9e3779b1, DI
- LEAQ 1(SI), SI
- LEAQ -2(CX), R8
- MOVQ (DX)(SI*1), R9
- MOVQ 1(DX)(SI*1), R10
- MOVQ (DX)(R8*1), R11
- MOVQ 1(DX)(R8*1), R12
- SHLQ $0x10, R9
- IMULQ BX, R9
- SHRQ $0x36, R9
- SHLQ $0x20, R10
- IMULQ DI, R10
- SHRQ $0x38, R10
- SHLQ $0x10, R11
- IMULQ BX, R11
- SHRQ $0x36, R11
- SHLQ $0x20, R12
- IMULQ DI, R12
- SHRQ $0x38, R12
- LEAQ 1(SI), DI
- LEAQ 1(R8), R13
- MOVL SI, 24(SP)(R9*4)
- MOVL R8, 24(SP)(R11*4)
- MOVL DI, 4120(SP)(R10*4)
- MOVL R13, 4120(SP)(R12*4)
- LEAQ 1(R8)(SI*1), DI
- SHRQ $0x01, DI
- ADDQ $0x01, SI
- SUBQ $0x01, R8
+ MOVQ $0x0000cf1bbcdcbf9b, SI
+ MOVQ $0x9e3779b1, R8
+ LEAQ 1(DI), DI
+ LEAQ -2(DX), R9
+ MOVQ (BX)(DI*1), R10
+ MOVQ 1(BX)(DI*1), R11
+ MOVQ (BX)(R9*1), R12
+ MOVQ 1(BX)(R9*1), R13
+ SHLQ $0x10, R10
+ IMULQ SI, R10
+ SHRQ $0x36, R10
+ SHLQ $0x20, R11
+ IMULQ R8, R11
+ SHRQ $0x38, R11
+ SHLQ $0x10, R12
+ IMULQ SI, R12
+ SHRQ $0x36, R12
+ SHLQ $0x20, R13
+ IMULQ R8, R13
+ SHRQ $0x38, R13
+ LEAQ 1(DI), R8
+ LEAQ 1(R9), R14
+ MOVL DI, (AX)(R10*4)
+ MOVL R9, (AX)(R12*4)
+ MOVL R8, 4096(AX)(R11*4)
+ MOVL R14, 4096(AX)(R13*4)
+ LEAQ 1(R9)(DI*1), R8
+ SHRQ $0x01, R8
+ ADDQ $0x01, DI
+ SUBQ $0x01, R9
index_loop_encodeSnappyBetterBlockAsm8B:
- CMPQ DI, R8
+ CMPQ R8, R9
JAE search_loop_encodeSnappyBetterBlockAsm8B
- MOVQ (DX)(SI*1), R9
- MOVQ (DX)(DI*1), R10
- SHLQ $0x10, R9
- IMULQ BX, R9
- SHRQ $0x36, R9
+ MOVQ (BX)(DI*1), R10
+ MOVQ (BX)(R8*1), R11
SHLQ $0x10, R10
- IMULQ BX, R10
+ IMULQ SI, R10
SHRQ $0x36, R10
- MOVL SI, 24(SP)(R9*4)
- MOVL DI, 24(SP)(R10*4)
- ADDQ $0x02, SI
+ SHLQ $0x10, R11
+ IMULQ SI, R11
+ SHRQ $0x36, R11
+ MOVL DI, (AX)(R10*4)
+ MOVL R8, (AX)(R11*4)
ADDQ $0x02, DI
+ ADDQ $0x02, R8
JMP index_loop_encodeSnappyBetterBlockAsm8B
emit_remainder_encodeSnappyBetterBlockAsm8B:
- MOVQ src_len+32(FP), CX
- SUBL 12(SP), CX
- LEAQ 3(AX)(CX*1), CX
- CMPQ CX, (SP)
+ MOVQ src_len+32(FP), AX
+ SUBL 12(SP), AX
+ LEAQ 3(CX)(AX*1), AX
+ CMPQ AX, (SP)
JB emit_remainder_ok_encodeSnappyBetterBlockAsm8B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
emit_remainder_ok_encodeSnappyBetterBlockAsm8B:
- MOVQ src_len+32(FP), CX
- MOVL 12(SP), BX
- CMPL BX, CX
+ MOVQ src_len+32(FP), AX
+ MOVL 12(SP), DX
+ CMPL DX, AX
JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm8B
- MOVL CX, SI
- MOVL CX, 12(SP)
- LEAQ (DX)(BX*1), CX
- SUBL BX, SI
+ MOVL AX, SI
+ MOVL AX, 12(SP)
+ LEAQ (BX)(DX*1), AX
+ SUBL DX, SI
LEAL -1(SI), DX
CMPL DX, $0x3c
JB one_byte_emit_remainder_encodeSnappyBetterBlockAsm8B
@@ -17818,26 +17838,26 @@ emit_remainder_ok_encodeSnappyBetterBlockAsm8B:
JB three_bytes_emit_remainder_encodeSnappyBetterBlockAsm8B
three_bytes_emit_remainder_encodeSnappyBetterBlockAsm8B:
- MOVB $0xf4, (AX)
- MOVW DX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW DX, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8B
two_bytes_emit_remainder_encodeSnappyBetterBlockAsm8B:
- MOVB $0xf0, (AX)
- MOVB DL, 1(AX)
- ADDQ $0x02, AX
+ MOVB $0xf0, (CX)
+ MOVB DL, 1(CX)
+ ADDQ $0x02, CX
CMPL DX, $0x40
JB memmove_emit_remainder_encodeSnappyBetterBlockAsm8B
JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8B
one_byte_emit_remainder_encodeSnappyBetterBlockAsm8B:
SHLB $0x02, DL
- MOVB DL, (AX)
- ADDQ $0x01, AX
+ MOVB DL, (CX)
+ ADDQ $0x01, CX
memmove_emit_remainder_encodeSnappyBetterBlockAsm8B:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveShort
@@ -17853,73 +17873,73 @@ memmove_emit_remainder_encodeSnappyBetterBlockAsm8B:
JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_33through64
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_1or2:
- MOVB (CX), SI
- MOVB -1(CX)(BX*1), CL
- MOVB SI, (AX)
- MOVB CL, -1(AX)(BX*1)
+ MOVB (AX), SI
+ MOVB -1(AX)(BX*1), AL
+ MOVB SI, (CX)
+ MOVB AL, -1(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_3:
- MOVW (CX), SI
- MOVB 2(CX), CL
- MOVW SI, (AX)
- MOVB CL, 2(AX)
+ MOVW (AX), SI
+ MOVB 2(AX), AL
+ MOVW SI, (CX)
+ MOVB AL, 2(CX)
JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_4through7:
- MOVL (CX), SI
- MOVL -4(CX)(BX*1), CX
- MOVL SI, (AX)
- MOVL CX, -4(AX)(BX*1)
+ MOVL (AX), SI
+ MOVL -4(AX)(BX*1), AX
+ MOVL SI, (CX)
+ MOVL AX, -4(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_8through16:
- MOVQ (CX), SI
- MOVQ -8(CX)(BX*1), CX
- MOVQ SI, (AX)
- MOVQ CX, -8(AX)(BX*1)
+ MOVQ (AX), SI
+ MOVQ -8(AX)(BX*1), AX
+ MOVQ SI, (CX)
+ MOVQ AX, -8(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_17through32:
- MOVOU (CX), X0
- MOVOU -16(CX)(BX*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU -16(AX)(BX*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_33through64:
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B:
- MOVQ DX, AX
+ MOVQ DX, CX
JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm8B
memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8B:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveLong
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
MOVQ BX, DI
SHRQ $0x05, DI
- MOVQ AX, SI
+ MOVQ CX, SI
ANDL $0x0000001f, SI
MOVQ $0x00000040, R8
SUBQ SI, R8
DECQ DI
JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32
- LEAQ -32(CX)(R8*1), SI
- LEAQ -32(AX)(R8*1), R9
+ LEAQ -32(AX)(R8*1), SI
+ LEAQ -32(CX)(R8*1), R9
emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_big_loop_back:
MOVOU (SI), X4
@@ -17933,1136 +17953,1142 @@ emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_big_loop_
JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_big_loop_back
emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32:
- MOVOU -32(CX)(R8*1), X4
- MOVOU -16(CX)(R8*1), X5
- MOVOA X4, -32(AX)(R8*1)
- MOVOA X5, -16(AX)(R8*1)
+ MOVOU -32(AX)(R8*1), X4
+ MOVOU -16(AX)(R8*1), X5
+ MOVOA X4, -32(CX)(R8*1)
+ MOVOA X5, -16(CX)(R8*1)
ADDQ $0x20, R8
CMPQ BX, R8
JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
- MOVQ DX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
+ MOVQ DX, CX
emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm8B:
- MOVQ dst_base+0(FP), CX
- SUBQ CX, AX
- MOVQ AX, ret+48(FP)
+ MOVQ dst_base+0(FP), AX
+ SUBQ AX, CX
+ MOVQ CX, ret+56(FP)
RET
-// func calcBlockSize(src []byte) int
+// func calcBlockSize(src []byte, tmp *[32768]byte) int
// Requires: BMI, SSE2
-TEXT ·calcBlockSize(SB), $32792-32
- XORQ AX, AX
- MOVQ $0x00000100, CX
- LEAQ 24(SP), DX
+TEXT ·calcBlockSize(SB), $24-40
+ MOVQ tmp+24(FP), AX
+ XORQ CX, CX
+ MOVQ $0x00000100, DX
+ MOVQ AX, BX
PXOR X0, X0
zero_loop_calcBlockSize:
- MOVOU X0, (DX)
- MOVOU X0, 16(DX)
- MOVOU X0, 32(DX)
- MOVOU X0, 48(DX)
- MOVOU X0, 64(DX)
- MOVOU X0, 80(DX)
- MOVOU X0, 96(DX)
- MOVOU X0, 112(DX)
- ADDQ $0x80, DX
- DECQ CX
+ MOVOU X0, (BX)
+ MOVOU X0, 16(BX)
+ MOVOU X0, 32(BX)
+ MOVOU X0, 48(BX)
+ MOVOU X0, 64(BX)
+ MOVOU X0, 80(BX)
+ MOVOU X0, 96(BX)
+ MOVOU X0, 112(BX)
+ ADDQ $0x80, BX
+ DECQ DX
JNZ zero_loop_calcBlockSize
MOVL $0x00000000, 12(SP)
- MOVQ src_len+8(FP), CX
- LEAQ -9(CX), DX
- LEAQ -8(CX), BX
- MOVL BX, 8(SP)
- SHRQ $0x05, CX
- SUBL CX, DX
- LEAQ (AX)(DX*1), DX
- MOVQ DX, (SP)
- MOVL $0x00000001, CX
- MOVL CX, 16(SP)
- MOVQ src_base+0(FP), DX
+ MOVQ src_len+8(FP), DX
+ LEAQ -9(DX), BX
+ LEAQ -8(DX), SI
+ MOVL SI, 8(SP)
+ SHRQ $0x05, DX
+ SUBL DX, BX
+ LEAQ (CX)(BX*1), BX
+ MOVQ BX, (SP)
+ MOVL $0x00000001, DX
+ MOVL DX, 16(SP)
+ MOVQ src_base+0(FP), BX
search_loop_calcBlockSize:
- MOVL CX, BX
- SUBL 12(SP), BX
- SHRL $0x05, BX
- LEAL 4(CX)(BX*1), BX
- CMPL BX, 8(SP)
+ MOVL DX, SI
+ SUBL 12(SP), SI
+ SHRL $0x05, SI
+ LEAL 4(DX)(SI*1), SI
+ CMPL SI, 8(SP)
JAE emit_remainder_calcBlockSize
- MOVQ (DX)(CX*1), SI
- MOVL BX, 20(SP)
- MOVQ $0x0000cf1bbcdcbf9b, R8
- MOVQ SI, R9
- MOVQ SI, R10
- SHRQ $0x08, R10
- SHLQ $0x10, R9
- IMULQ R8, R9
- SHRQ $0x33, R9
+ MOVQ (BX)(DX*1), DI
+ MOVL SI, 20(SP)
+ MOVQ $0x0000cf1bbcdcbf9b, R9
+ MOVQ DI, R10
+ MOVQ DI, R11
+ SHRQ $0x08, R11
SHLQ $0x10, R10
- IMULQ R8, R10
+ IMULQ R9, R10
SHRQ $0x33, R10
- MOVL 24(SP)(R9*4), BX
- MOVL 24(SP)(R10*4), DI
- MOVL CX, 24(SP)(R9*4)
- LEAL 1(CX), R9
- MOVL R9, 24(SP)(R10*4)
- MOVQ SI, R9
- SHRQ $0x10, R9
- SHLQ $0x10, R9
- IMULQ R8, R9
- SHRQ $0x33, R9
- MOVL CX, R8
- SUBL 16(SP), R8
- MOVL 1(DX)(R8*1), R10
- MOVQ SI, R8
- SHRQ $0x08, R8
- CMPL R8, R10
+ SHLQ $0x10, R11
+ IMULQ R9, R11
+ SHRQ $0x33, R11
+ MOVL (AX)(R10*4), SI
+ MOVL (AX)(R11*4), R8
+ MOVL DX, (AX)(R10*4)
+ LEAL 1(DX), R10
+ MOVL R10, (AX)(R11*4)
+ MOVQ DI, R10
+ SHRQ $0x10, R10
+ SHLQ $0x10, R10
+ IMULQ R9, R10
+ SHRQ $0x33, R10
+ MOVL DX, R9
+ SUBL 16(SP), R9
+ MOVL 1(BX)(R9*1), R11
+ MOVQ DI, R9
+ SHRQ $0x08, R9
+ CMPL R9, R11
JNE no_repeat_found_calcBlockSize
- LEAL 1(CX), SI
- MOVL 12(SP), BX
- MOVL SI, DI
- SUBL 16(SP), DI
+ LEAL 1(DX), DI
+ MOVL 12(SP), SI
+ MOVL DI, R8
+ SUBL 16(SP), R8
JZ repeat_extend_back_end_calcBlockSize
repeat_extend_back_loop_calcBlockSize:
- CMPL SI, BX
+ CMPL DI, SI
JBE repeat_extend_back_end_calcBlockSize
- MOVB -1(DX)(DI*1), R8
- MOVB -1(DX)(SI*1), R9
- CMPB R8, R9
+ MOVB -1(BX)(R8*1), R9
+ MOVB -1(BX)(DI*1), R10
+ CMPB R9, R10
JNE repeat_extend_back_end_calcBlockSize
- LEAL -1(SI), SI
- DECL DI
+ LEAL -1(DI), DI
+ DECL R8
JNZ repeat_extend_back_loop_calcBlockSize
repeat_extend_back_end_calcBlockSize:
- MOVL SI, BX
- SUBL 12(SP), BX
- LEAQ 5(AX)(BX*1), BX
- CMPQ BX, (SP)
+ MOVL DI, SI
+ SUBL 12(SP), SI
+ LEAQ 5(CX)(SI*1), SI
+ CMPQ SI, (SP)
JB repeat_dst_size_check_calcBlockSize
- MOVQ $0x00000000, ret+24(FP)
+ MOVQ $0x00000000, ret+32(FP)
RET
repeat_dst_size_check_calcBlockSize:
- MOVL 12(SP), BX
- CMPL BX, SI
+ MOVL 12(SP), SI
+ CMPL SI, DI
JEQ emit_literal_done_repeat_emit_calcBlockSize
- MOVL SI, DI
- MOVL SI, 12(SP)
- LEAQ (DX)(BX*1), R8
- SUBL BX, DI
- LEAL -1(DI), BX
- CMPL BX, $0x3c
+ MOVL DI, R8
+ MOVL DI, 12(SP)
+ LEAQ (BX)(SI*1), R9
+ SUBL SI, R8
+ LEAL -1(R8), SI
+ CMPL SI, $0x3c
JB one_byte_repeat_emit_calcBlockSize
- CMPL BX, $0x00000100
+ CMPL SI, $0x00000100
JB two_bytes_repeat_emit_calcBlockSize
- CMPL BX, $0x00010000
+ CMPL SI, $0x00010000
JB three_bytes_repeat_emit_calcBlockSize
- CMPL BX, $0x01000000
+ CMPL SI, $0x01000000
JB four_bytes_repeat_emit_calcBlockSize
- ADDQ $0x05, AX
+ ADDQ $0x05, CX
JMP memmove_long_repeat_emit_calcBlockSize
four_bytes_repeat_emit_calcBlockSize:
- ADDQ $0x04, AX
+ ADDQ $0x04, CX
JMP memmove_long_repeat_emit_calcBlockSize
three_bytes_repeat_emit_calcBlockSize:
- ADDQ $0x03, AX
+ ADDQ $0x03, CX
JMP memmove_long_repeat_emit_calcBlockSize
two_bytes_repeat_emit_calcBlockSize:
- ADDQ $0x02, AX
- CMPL BX, $0x40
+ ADDQ $0x02, CX
+ CMPL SI, $0x40
JB memmove_repeat_emit_calcBlockSize
JMP memmove_long_repeat_emit_calcBlockSize
one_byte_repeat_emit_calcBlockSize:
- ADDQ $0x01, AX
+ ADDQ $0x01, CX
memmove_repeat_emit_calcBlockSize:
- LEAQ (AX)(DI*1), AX
+ LEAQ (CX)(R8*1), CX
JMP emit_literal_done_repeat_emit_calcBlockSize
memmove_long_repeat_emit_calcBlockSize:
- LEAQ (AX)(DI*1), AX
+ LEAQ (CX)(R8*1), CX
emit_literal_done_repeat_emit_calcBlockSize:
- ADDL $0x05, CX
- MOVL CX, BX
- SUBL 16(SP), BX
- MOVQ src_len+8(FP), DI
- SUBL CX, DI
- LEAQ (DX)(CX*1), R8
- LEAQ (DX)(BX*1), BX
+ ADDL $0x05, DX
+ MOVL DX, SI
+ SUBL 16(SP), SI
+ MOVQ src_len+8(FP), R8
+ SUBL DX, R8
+ LEAQ (BX)(DX*1), R9
+ LEAQ (BX)(SI*1), SI
// matchLen
- XORL R10, R10
+ XORL R11, R11
matchlen_loopback_16_repeat_extend_calcBlockSize:
- CMPL DI, $0x10
+ CMPL R8, $0x10
JB matchlen_match8_repeat_extend_calcBlockSize
- MOVQ (R8)(R10*1), R9
- MOVQ 8(R8)(R10*1), R11
- XORQ (BX)(R10*1), R9
+ MOVQ (R9)(R11*1), R10
+ MOVQ 8(R9)(R11*1), R12
+ XORQ (SI)(R11*1), R10
JNZ matchlen_bsf_8_repeat_extend_calcBlockSize
- XORQ 8(BX)(R10*1), R11
+ XORQ 8(SI)(R11*1), R12
JNZ matchlen_bsf_16repeat_extend_calcBlockSize
- LEAL -16(DI), DI
- LEAL 16(R10), R10
+ LEAL -16(R8), R8
+ LEAL 16(R11), R11
JMP matchlen_loopback_16_repeat_extend_calcBlockSize
matchlen_bsf_16repeat_extend_calcBlockSize:
#ifdef GOAMD64_v3
- TZCNTQ R11, R11
+ TZCNTQ R12, R12
#else
- BSFQ R11, R11
+ BSFQ R12, R12
#endif
- SARQ $0x03, R11
- LEAL 8(R10)(R11*1), R10
+ SARQ $0x03, R12
+ LEAL 8(R11)(R12*1), R11
JMP repeat_extend_forward_end_calcBlockSize
matchlen_match8_repeat_extend_calcBlockSize:
- CMPL DI, $0x08
+ CMPL R8, $0x08
JB matchlen_match4_repeat_extend_calcBlockSize
- MOVQ (R8)(R10*1), R9
- XORQ (BX)(R10*1), R9
+ MOVQ (R9)(R11*1), R10
+ XORQ (SI)(R11*1), R10
JNZ matchlen_bsf_8_repeat_extend_calcBlockSize
- LEAL -8(DI), DI
- LEAL 8(R10), R10
+ LEAL -8(R8), R8
+ LEAL 8(R11), R11
JMP matchlen_match4_repeat_extend_calcBlockSize
matchlen_bsf_8_repeat_extend_calcBlockSize:
#ifdef GOAMD64_v3
- TZCNTQ R9, R9
+ TZCNTQ R10, R10
#else
- BSFQ R9, R9
+ BSFQ R10, R10
#endif
- SARQ $0x03, R9
- LEAL (R10)(R9*1), R10
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
JMP repeat_extend_forward_end_calcBlockSize
matchlen_match4_repeat_extend_calcBlockSize:
- CMPL DI, $0x04
+ CMPL R8, $0x04
JB matchlen_match2_repeat_extend_calcBlockSize
- MOVL (R8)(R10*1), R9
- CMPL (BX)(R10*1), R9
+ MOVL (R9)(R11*1), R10
+ CMPL (SI)(R11*1), R10
JNE matchlen_match2_repeat_extend_calcBlockSize
- LEAL -4(DI), DI
- LEAL 4(R10), R10
+ LEAL -4(R8), R8
+ LEAL 4(R11), R11
matchlen_match2_repeat_extend_calcBlockSize:
- CMPL DI, $0x01
+ CMPL R8, $0x01
JE matchlen_match1_repeat_extend_calcBlockSize
JB repeat_extend_forward_end_calcBlockSize
- MOVW (R8)(R10*1), R9
- CMPW (BX)(R10*1), R9
+ MOVW (R9)(R11*1), R10
+ CMPW (SI)(R11*1), R10
JNE matchlen_match1_repeat_extend_calcBlockSize
- LEAL 2(R10), R10
- SUBL $0x02, DI
+ LEAL 2(R11), R11
+ SUBL $0x02, R8
JZ repeat_extend_forward_end_calcBlockSize
matchlen_match1_repeat_extend_calcBlockSize:
- MOVB (R8)(R10*1), R9
- CMPB (BX)(R10*1), R9
+ MOVB (R9)(R11*1), R10
+ CMPB (SI)(R11*1), R10
JNE repeat_extend_forward_end_calcBlockSize
- LEAL 1(R10), R10
+ LEAL 1(R11), R11
repeat_extend_forward_end_calcBlockSize:
- ADDL R10, CX
- MOVL CX, BX
- SUBL SI, BX
- MOVL 16(SP), SI
+ ADDL R11, DX
+ MOVL DX, SI
+ SUBL DI, SI
+ MOVL 16(SP), DI
// emitCopy
- CMPL SI, $0x00010000
+ CMPL DI, $0x00010000
JB two_byte_offset_repeat_as_copy_calcBlockSize
four_bytes_loop_back_repeat_as_copy_calcBlockSize:
- CMPL BX, $0x40
+ CMPL SI, $0x40
JBE four_bytes_remain_repeat_as_copy_calcBlockSize
- LEAL -64(BX), BX
- ADDQ $0x05, AX
- CMPL BX, $0x04
+ LEAL -64(SI), SI
+ ADDQ $0x05, CX
+ CMPL SI, $0x04
JB four_bytes_remain_repeat_as_copy_calcBlockSize
JMP four_bytes_loop_back_repeat_as_copy_calcBlockSize
four_bytes_remain_repeat_as_copy_calcBlockSize:
- TESTL BX, BX
+ TESTL SI, SI
JZ repeat_end_emit_calcBlockSize
- XORL BX, BX
- ADDQ $0x05, AX
+ XORL SI, SI
+ ADDQ $0x05, CX
JMP repeat_end_emit_calcBlockSize
two_byte_offset_repeat_as_copy_calcBlockSize:
- CMPL BX, $0x40
+ CMPL SI, $0x40
JBE two_byte_offset_short_repeat_as_copy_calcBlockSize
- LEAL -60(BX), BX
- ADDQ $0x03, AX
+ LEAL -60(SI), SI
+ ADDQ $0x03, CX
JMP two_byte_offset_repeat_as_copy_calcBlockSize
two_byte_offset_short_repeat_as_copy_calcBlockSize:
- MOVL BX, DI
- SHLL $0x02, DI
- CMPL BX, $0x0c
+ MOVL SI, R8
+ SHLL $0x02, R8
+ CMPL SI, $0x0c
JAE emit_copy_three_repeat_as_copy_calcBlockSize
- CMPL SI, $0x00000800
+ CMPL DI, $0x00000800
JAE emit_copy_three_repeat_as_copy_calcBlockSize
- ADDQ $0x02, AX
+ ADDQ $0x02, CX
JMP repeat_end_emit_calcBlockSize
emit_copy_three_repeat_as_copy_calcBlockSize:
- ADDQ $0x03, AX
+ ADDQ $0x03, CX
repeat_end_emit_calcBlockSize:
- MOVL CX, 12(SP)
+ MOVL DX, 12(SP)
JMP search_loop_calcBlockSize
no_repeat_found_calcBlockSize:
- CMPL (DX)(BX*1), SI
+ CMPL (BX)(SI*1), DI
JEQ candidate_match_calcBlockSize
- SHRQ $0x08, SI
- MOVL 24(SP)(R9*4), BX
- LEAL 2(CX), R8
- CMPL (DX)(DI*1), SI
+ SHRQ $0x08, DI
+ MOVL (AX)(R10*4), SI
+ LEAL 2(DX), R9
+ CMPL (BX)(R8*1), DI
JEQ candidate2_match_calcBlockSize
- MOVL R8, 24(SP)(R9*4)
- SHRQ $0x08, SI
- CMPL (DX)(BX*1), SI
+ MOVL R9, (AX)(R10*4)
+ SHRQ $0x08, DI
+ CMPL (BX)(SI*1), DI
JEQ candidate3_match_calcBlockSize
- MOVL 20(SP), CX
+ MOVL 20(SP), DX
JMP search_loop_calcBlockSize
candidate3_match_calcBlockSize:
- ADDL $0x02, CX
+ ADDL $0x02, DX
JMP candidate_match_calcBlockSize
candidate2_match_calcBlockSize:
- MOVL R8, 24(SP)(R9*4)
- INCL CX
- MOVL DI, BX
+ MOVL R9, (AX)(R10*4)
+ INCL DX
+ MOVL R8, SI
candidate_match_calcBlockSize:
- MOVL 12(SP), SI
- TESTL BX, BX
+ MOVL 12(SP), DI
+ TESTL SI, SI
JZ match_extend_back_end_calcBlockSize
match_extend_back_loop_calcBlockSize:
- CMPL CX, SI
+ CMPL DX, DI
JBE match_extend_back_end_calcBlockSize
- MOVB -1(DX)(BX*1), DI
- MOVB -1(DX)(CX*1), R8
- CMPB DI, R8
+ MOVB -1(BX)(SI*1), R8
+ MOVB -1(BX)(DX*1), R9
+ CMPB R8, R9
JNE match_extend_back_end_calcBlockSize
- LEAL -1(CX), CX
- DECL BX
+ LEAL -1(DX), DX
+ DECL SI
JZ match_extend_back_end_calcBlockSize
JMP match_extend_back_loop_calcBlockSize
match_extend_back_end_calcBlockSize:
- MOVL CX, SI
- SUBL 12(SP), SI
- LEAQ 5(AX)(SI*1), SI
- CMPQ SI, (SP)
+ MOVL DX, DI
+ SUBL 12(SP), DI
+ LEAQ 5(CX)(DI*1), DI
+ CMPQ DI, (SP)
JB match_dst_size_check_calcBlockSize
- MOVQ $0x00000000, ret+24(FP)
+ MOVQ $0x00000000, ret+32(FP)
RET
match_dst_size_check_calcBlockSize:
- MOVL CX, SI
- MOVL 12(SP), DI
- CMPL DI, SI
+ MOVL DX, DI
+ MOVL 12(SP), R8
+ CMPL R8, DI
JEQ emit_literal_done_match_emit_calcBlockSize
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(DI*1), SI
- SUBL DI, R8
- LEAL -1(R8), SI
- CMPL SI, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(R8*1), DI
+ SUBL R8, R9
+ LEAL -1(R9), DI
+ CMPL DI, $0x3c
JB one_byte_match_emit_calcBlockSize
- CMPL SI, $0x00000100
+ CMPL DI, $0x00000100
JB two_bytes_match_emit_calcBlockSize
- CMPL SI, $0x00010000
+ CMPL DI, $0x00010000
JB three_bytes_match_emit_calcBlockSize
- CMPL SI, $0x01000000
+ CMPL DI, $0x01000000
JB four_bytes_match_emit_calcBlockSize
- ADDQ $0x05, AX
+ ADDQ $0x05, CX
JMP memmove_long_match_emit_calcBlockSize
four_bytes_match_emit_calcBlockSize:
- ADDQ $0x04, AX
+ ADDQ $0x04, CX
JMP memmove_long_match_emit_calcBlockSize
three_bytes_match_emit_calcBlockSize:
- ADDQ $0x03, AX
+ ADDQ $0x03, CX
JMP memmove_long_match_emit_calcBlockSize
two_bytes_match_emit_calcBlockSize:
- ADDQ $0x02, AX
- CMPL SI, $0x40
+ ADDQ $0x02, CX
+ CMPL DI, $0x40
JB memmove_match_emit_calcBlockSize
JMP memmove_long_match_emit_calcBlockSize
one_byte_match_emit_calcBlockSize:
- ADDQ $0x01, AX
+ ADDQ $0x01, CX
memmove_match_emit_calcBlockSize:
- LEAQ (AX)(R8*1), AX
+ LEAQ (CX)(R9*1), CX
JMP emit_literal_done_match_emit_calcBlockSize
memmove_long_match_emit_calcBlockSize:
- LEAQ (AX)(R8*1), AX
+ LEAQ (CX)(R9*1), CX
emit_literal_done_match_emit_calcBlockSize:
match_nolit_loop_calcBlockSize:
- MOVL CX, SI
- SUBL BX, SI
- MOVL SI, 16(SP)
- ADDL $0x04, CX
- ADDL $0x04, BX
- MOVQ src_len+8(FP), SI
- SUBL CX, SI
- LEAQ (DX)(CX*1), DI
- LEAQ (DX)(BX*1), BX
+ MOVL DX, DI
+ SUBL SI, DI
+ MOVL DI, 16(SP)
+ ADDL $0x04, DX
+ ADDL $0x04, SI
+ MOVQ src_len+8(FP), DI
+ SUBL DX, DI
+ LEAQ (BX)(DX*1), R8
+ LEAQ (BX)(SI*1), SI
// matchLen
- XORL R9, R9
+ XORL R10, R10
matchlen_loopback_16_match_nolit_calcBlockSize:
- CMPL SI, $0x10
+ CMPL DI, $0x10
JB matchlen_match8_match_nolit_calcBlockSize
- MOVQ (DI)(R9*1), R8
- MOVQ 8(DI)(R9*1), R10
- XORQ (BX)(R9*1), R8
+ MOVQ (R8)(R10*1), R9
+ MOVQ 8(R8)(R10*1), R11
+ XORQ (SI)(R10*1), R9
JNZ matchlen_bsf_8_match_nolit_calcBlockSize
- XORQ 8(BX)(R9*1), R10
+ XORQ 8(SI)(R10*1), R11
JNZ matchlen_bsf_16match_nolit_calcBlockSize
- LEAL -16(SI), SI
- LEAL 16(R9), R9
+ LEAL -16(DI), DI
+ LEAL 16(R10), R10
JMP matchlen_loopback_16_match_nolit_calcBlockSize
matchlen_bsf_16match_nolit_calcBlockSize:
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R11, R11
#else
- BSFQ R10, R10
+ BSFQ R11, R11
#endif
- SARQ $0x03, R10
- LEAL 8(R9)(R10*1), R9
+ SARQ $0x03, R11
+ LEAL 8(R10)(R11*1), R10
JMP match_nolit_end_calcBlockSize
matchlen_match8_match_nolit_calcBlockSize:
- CMPL SI, $0x08
+ CMPL DI, $0x08
JB matchlen_match4_match_nolit_calcBlockSize
- MOVQ (DI)(R9*1), R8
- XORQ (BX)(R9*1), R8
+ MOVQ (R8)(R10*1), R9
+ XORQ (SI)(R10*1), R9
JNZ matchlen_bsf_8_match_nolit_calcBlockSize
- LEAL -8(SI), SI
- LEAL 8(R9), R9
+ LEAL -8(DI), DI
+ LEAL 8(R10), R10
JMP matchlen_match4_match_nolit_calcBlockSize
matchlen_bsf_8_match_nolit_calcBlockSize:
#ifdef GOAMD64_v3
- TZCNTQ R8, R8
+ TZCNTQ R9, R9
#else
- BSFQ R8, R8
+ BSFQ R9, R9
#endif
- SARQ $0x03, R8
- LEAL (R9)(R8*1), R9
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
JMP match_nolit_end_calcBlockSize
matchlen_match4_match_nolit_calcBlockSize:
- CMPL SI, $0x04
+ CMPL DI, $0x04
JB matchlen_match2_match_nolit_calcBlockSize
- MOVL (DI)(R9*1), R8
- CMPL (BX)(R9*1), R8
+ MOVL (R8)(R10*1), R9
+ CMPL (SI)(R10*1), R9
JNE matchlen_match2_match_nolit_calcBlockSize
- LEAL -4(SI), SI
- LEAL 4(R9), R9
+ LEAL -4(DI), DI
+ LEAL 4(R10), R10
matchlen_match2_match_nolit_calcBlockSize:
- CMPL SI, $0x01
+ CMPL DI, $0x01
JE matchlen_match1_match_nolit_calcBlockSize
JB match_nolit_end_calcBlockSize
- MOVW (DI)(R9*1), R8
- CMPW (BX)(R9*1), R8
+ MOVW (R8)(R10*1), R9
+ CMPW (SI)(R10*1), R9
JNE matchlen_match1_match_nolit_calcBlockSize
- LEAL 2(R9), R9
- SUBL $0x02, SI
+ LEAL 2(R10), R10
+ SUBL $0x02, DI
JZ match_nolit_end_calcBlockSize
matchlen_match1_match_nolit_calcBlockSize:
- MOVB (DI)(R9*1), R8
- CMPB (BX)(R9*1), R8
+ MOVB (R8)(R10*1), R9
+ CMPB (SI)(R10*1), R9
JNE match_nolit_end_calcBlockSize
- LEAL 1(R9), R9
+ LEAL 1(R10), R10
match_nolit_end_calcBlockSize:
- ADDL R9, CX
- MOVL 16(SP), BX
- ADDL $0x04, R9
- MOVL CX, 12(SP)
+ ADDL R10, DX
+ MOVL 16(SP), SI
+ ADDL $0x04, R10
+ MOVL DX, 12(SP)
// emitCopy
- CMPL BX, $0x00010000
+ CMPL SI, $0x00010000
JB two_byte_offset_match_nolit_calcBlockSize
four_bytes_loop_back_match_nolit_calcBlockSize:
- CMPL R9, $0x40
+ CMPL R10, $0x40
JBE four_bytes_remain_match_nolit_calcBlockSize
- LEAL -64(R9), R9
- ADDQ $0x05, AX
- CMPL R9, $0x04
+ LEAL -64(R10), R10
+ ADDQ $0x05, CX
+ CMPL R10, $0x04
JB four_bytes_remain_match_nolit_calcBlockSize
JMP four_bytes_loop_back_match_nolit_calcBlockSize
four_bytes_remain_match_nolit_calcBlockSize:
- TESTL R9, R9
+ TESTL R10, R10
JZ match_nolit_emitcopy_end_calcBlockSize
- XORL BX, BX
- ADDQ $0x05, AX
+ XORL SI, SI
+ ADDQ $0x05, CX
JMP match_nolit_emitcopy_end_calcBlockSize
two_byte_offset_match_nolit_calcBlockSize:
- CMPL R9, $0x40
+ CMPL R10, $0x40
JBE two_byte_offset_short_match_nolit_calcBlockSize
- LEAL -60(R9), R9
- ADDQ $0x03, AX
+ LEAL -60(R10), R10
+ ADDQ $0x03, CX
JMP two_byte_offset_match_nolit_calcBlockSize
two_byte_offset_short_match_nolit_calcBlockSize:
- MOVL R9, SI
- SHLL $0x02, SI
- CMPL R9, $0x0c
+ MOVL R10, DI
+ SHLL $0x02, DI
+ CMPL R10, $0x0c
JAE emit_copy_three_match_nolit_calcBlockSize
- CMPL BX, $0x00000800
+ CMPL SI, $0x00000800
JAE emit_copy_three_match_nolit_calcBlockSize
- ADDQ $0x02, AX
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_calcBlockSize
emit_copy_three_match_nolit_calcBlockSize:
- ADDQ $0x03, AX
+ ADDQ $0x03, CX
match_nolit_emitcopy_end_calcBlockSize:
- CMPL CX, 8(SP)
+ CMPL DX, 8(SP)
JAE emit_remainder_calcBlockSize
- MOVQ -2(DX)(CX*1), SI
- CMPQ AX, (SP)
+ MOVQ -2(BX)(DX*1), DI
+ CMPQ CX, (SP)
JB match_nolit_dst_ok_calcBlockSize
- MOVQ $0x00000000, ret+24(FP)
+ MOVQ $0x00000000, ret+32(FP)
RET
match_nolit_dst_ok_calcBlockSize:
- MOVQ $0x0000cf1bbcdcbf9b, R8
- MOVQ SI, DI
- SHRQ $0x10, SI
- MOVQ SI, BX
- SHLQ $0x10, DI
- IMULQ R8, DI
- SHRQ $0x33, DI
- SHLQ $0x10, BX
- IMULQ R8, BX
- SHRQ $0x33, BX
- LEAL -2(CX), R8
- LEAQ 24(SP)(BX*4), R9
- MOVL (R9), BX
- MOVL R8, 24(SP)(DI*4)
- MOVL CX, (R9)
- CMPL (DX)(BX*1), SI
+ MOVQ $0x0000cf1bbcdcbf9b, R9
+ MOVQ DI, R8
+ SHRQ $0x10, DI
+ MOVQ DI, SI
+ SHLQ $0x10, R8
+ IMULQ R9, R8
+ SHRQ $0x33, R8
+ SHLQ $0x10, SI
+ IMULQ R9, SI
+ SHRQ $0x33, SI
+ LEAL -2(DX), R9
+ LEAQ (AX)(SI*4), R10
+ MOVL (R10), SI
+ MOVL R9, (AX)(R8*4)
+ MOVL DX, (R10)
+ CMPL (BX)(SI*1), DI
JEQ match_nolit_loop_calcBlockSize
- INCL CX
+ INCL DX
JMP search_loop_calcBlockSize
emit_remainder_calcBlockSize:
- MOVQ src_len+8(FP), CX
- SUBL 12(SP), CX
- LEAQ 5(AX)(CX*1), CX
- CMPQ CX, (SP)
+ MOVQ src_len+8(FP), AX
+ SUBL 12(SP), AX
+ LEAQ 5(CX)(AX*1), AX
+ CMPQ AX, (SP)
JB emit_remainder_ok_calcBlockSize
- MOVQ $0x00000000, ret+24(FP)
+ MOVQ $0x00000000, ret+32(FP)
RET
emit_remainder_ok_calcBlockSize:
- MOVQ src_len+8(FP), CX
- MOVL 12(SP), BX
- CMPL BX, CX
+ MOVQ src_len+8(FP), AX
+ MOVL 12(SP), DX
+ CMPL DX, AX
JEQ emit_literal_done_emit_remainder_calcBlockSize
- MOVL CX, SI
- MOVL CX, 12(SP)
- LEAQ (DX)(BX*1), CX
- SUBL BX, SI
- LEAL -1(SI), CX
- CMPL CX, $0x3c
+ MOVL AX, SI
+ MOVL AX, 12(SP)
+ LEAQ (BX)(DX*1), AX
+ SUBL DX, SI
+ LEAL -1(SI), AX
+ CMPL AX, $0x3c
JB one_byte_emit_remainder_calcBlockSize
- CMPL CX, $0x00000100
+ CMPL AX, $0x00000100
JB two_bytes_emit_remainder_calcBlockSize
- CMPL CX, $0x00010000
+ CMPL AX, $0x00010000
JB three_bytes_emit_remainder_calcBlockSize
- CMPL CX, $0x01000000
+ CMPL AX, $0x01000000
JB four_bytes_emit_remainder_calcBlockSize
- ADDQ $0x05, AX
+ ADDQ $0x05, CX
JMP memmove_long_emit_remainder_calcBlockSize
four_bytes_emit_remainder_calcBlockSize:
- ADDQ $0x04, AX
+ ADDQ $0x04, CX
JMP memmove_long_emit_remainder_calcBlockSize
three_bytes_emit_remainder_calcBlockSize:
- ADDQ $0x03, AX
+ ADDQ $0x03, CX
JMP memmove_long_emit_remainder_calcBlockSize
two_bytes_emit_remainder_calcBlockSize:
- ADDQ $0x02, AX
- CMPL CX, $0x40
+ ADDQ $0x02, CX
+ CMPL AX, $0x40
JB memmove_emit_remainder_calcBlockSize
JMP memmove_long_emit_remainder_calcBlockSize
one_byte_emit_remainder_calcBlockSize:
- ADDQ $0x01, AX
+ ADDQ $0x01, CX
memmove_emit_remainder_calcBlockSize:
- LEAQ (AX)(SI*1), AX
+ LEAQ (CX)(SI*1), AX
+ MOVQ AX, CX
JMP emit_literal_done_emit_remainder_calcBlockSize
memmove_long_emit_remainder_calcBlockSize:
- LEAQ (AX)(SI*1), AX
+ LEAQ (CX)(SI*1), AX
+ MOVQ AX, CX
emit_literal_done_emit_remainder_calcBlockSize:
- MOVQ AX, ret+24(FP)
+ MOVQ CX, ret+32(FP)
RET
-// func calcBlockSizeSmall(src []byte) int
+// func calcBlockSizeSmall(src []byte, tmp *[2048]byte) int
// Requires: BMI, SSE2
-TEXT ·calcBlockSizeSmall(SB), $2072-32
- XORQ AX, AX
- MOVQ $0x00000010, CX
- LEAQ 24(SP), DX
+TEXT ·calcBlockSizeSmall(SB), $24-40
+ MOVQ tmp+24(FP), AX
+ XORQ CX, CX
+ MOVQ $0x00000010, DX
+ MOVQ AX, BX
PXOR X0, X0
zero_loop_calcBlockSizeSmall:
- MOVOU X0, (DX)
- MOVOU X0, 16(DX)
- MOVOU X0, 32(DX)
- MOVOU X0, 48(DX)
- MOVOU X0, 64(DX)
- MOVOU X0, 80(DX)
- MOVOU X0, 96(DX)
- MOVOU X0, 112(DX)
- ADDQ $0x80, DX
- DECQ CX
+ MOVOU X0, (BX)
+ MOVOU X0, 16(BX)
+ MOVOU X0, 32(BX)
+ MOVOU X0, 48(BX)
+ MOVOU X0, 64(BX)
+ MOVOU X0, 80(BX)
+ MOVOU X0, 96(BX)
+ MOVOU X0, 112(BX)
+ ADDQ $0x80, BX
+ DECQ DX
JNZ zero_loop_calcBlockSizeSmall
MOVL $0x00000000, 12(SP)
- MOVQ src_len+8(FP), CX
- LEAQ -9(CX), DX
- LEAQ -8(CX), BX
- MOVL BX, 8(SP)
- SHRQ $0x05, CX
- SUBL CX, DX
- LEAQ (AX)(DX*1), DX
- MOVQ DX, (SP)
- MOVL $0x00000001, CX
- MOVL CX, 16(SP)
- MOVQ src_base+0(FP), DX
+ MOVQ src_len+8(FP), DX
+ LEAQ -9(DX), BX
+ LEAQ -8(DX), SI
+ MOVL SI, 8(SP)
+ SHRQ $0x05, DX
+ SUBL DX, BX
+ LEAQ (CX)(BX*1), BX
+ MOVQ BX, (SP)
+ MOVL $0x00000001, DX
+ MOVL DX, 16(SP)
+ MOVQ src_base+0(FP), BX
search_loop_calcBlockSizeSmall:
- MOVL CX, BX
- SUBL 12(SP), BX
- SHRL $0x04, BX
- LEAL 4(CX)(BX*1), BX
- CMPL BX, 8(SP)
+ MOVL DX, SI
+ SUBL 12(SP), SI
+ SHRL $0x04, SI
+ LEAL 4(DX)(SI*1), SI
+ CMPL SI, 8(SP)
JAE emit_remainder_calcBlockSizeSmall
- MOVQ (DX)(CX*1), SI
- MOVL BX, 20(SP)
- MOVQ $0x9e3779b1, R8
- MOVQ SI, R9
- MOVQ SI, R10
- SHRQ $0x08, R10
- SHLQ $0x20, R9
- IMULQ R8, R9
- SHRQ $0x37, R9
+ MOVQ (BX)(DX*1), DI
+ MOVL SI, 20(SP)
+ MOVQ $0x9e3779b1, R9
+ MOVQ DI, R10
+ MOVQ DI, R11
+ SHRQ $0x08, R11
SHLQ $0x20, R10
- IMULQ R8, R10
+ IMULQ R9, R10
SHRQ $0x37, R10
- MOVL 24(SP)(R9*4), BX
- MOVL 24(SP)(R10*4), DI
- MOVL CX, 24(SP)(R9*4)
- LEAL 1(CX), R9
- MOVL R9, 24(SP)(R10*4)
- MOVQ SI, R9
- SHRQ $0x10, R9
- SHLQ $0x20, R9
- IMULQ R8, R9
- SHRQ $0x37, R9
- MOVL CX, R8
- SUBL 16(SP), R8
- MOVL 1(DX)(R8*1), R10
- MOVQ SI, R8
- SHRQ $0x08, R8
- CMPL R8, R10
+ SHLQ $0x20, R11
+ IMULQ R9, R11
+ SHRQ $0x37, R11
+ MOVL (AX)(R10*4), SI
+ MOVL (AX)(R11*4), R8
+ MOVL DX, (AX)(R10*4)
+ LEAL 1(DX), R10
+ MOVL R10, (AX)(R11*4)
+ MOVQ DI, R10
+ SHRQ $0x10, R10
+ SHLQ $0x20, R10
+ IMULQ R9, R10
+ SHRQ $0x37, R10
+ MOVL DX, R9
+ SUBL 16(SP), R9
+ MOVL 1(BX)(R9*1), R11
+ MOVQ DI, R9
+ SHRQ $0x08, R9
+ CMPL R9, R11
JNE no_repeat_found_calcBlockSizeSmall
- LEAL 1(CX), SI
- MOVL 12(SP), BX
- MOVL SI, DI
- SUBL 16(SP), DI
+ LEAL 1(DX), DI
+ MOVL 12(SP), SI
+ MOVL DI, R8
+ SUBL 16(SP), R8
JZ repeat_extend_back_end_calcBlockSizeSmall
repeat_extend_back_loop_calcBlockSizeSmall:
- CMPL SI, BX
+ CMPL DI, SI
JBE repeat_extend_back_end_calcBlockSizeSmall
- MOVB -1(DX)(DI*1), R8
- MOVB -1(DX)(SI*1), R9
- CMPB R8, R9
+ MOVB -1(BX)(R8*1), R9
+ MOVB -1(BX)(DI*1), R10
+ CMPB R9, R10
JNE repeat_extend_back_end_calcBlockSizeSmall
- LEAL -1(SI), SI
- DECL DI
+ LEAL -1(DI), DI
+ DECL R8
JNZ repeat_extend_back_loop_calcBlockSizeSmall
repeat_extend_back_end_calcBlockSizeSmall:
- MOVL SI, BX
- SUBL 12(SP), BX
- LEAQ 3(AX)(BX*1), BX
- CMPQ BX, (SP)
+ MOVL DI, SI
+ SUBL 12(SP), SI
+ LEAQ 3(CX)(SI*1), SI
+ CMPQ SI, (SP)
JB repeat_dst_size_check_calcBlockSizeSmall
- MOVQ $0x00000000, ret+24(FP)
+ MOVQ $0x00000000, ret+32(FP)
RET
repeat_dst_size_check_calcBlockSizeSmall:
- MOVL 12(SP), BX
- CMPL BX, SI
+ MOVL 12(SP), SI
+ CMPL SI, DI
JEQ emit_literal_done_repeat_emit_calcBlockSizeSmall
- MOVL SI, DI
- MOVL SI, 12(SP)
- LEAQ (DX)(BX*1), R8
- SUBL BX, DI
- LEAL -1(DI), BX
- CMPL BX, $0x3c
+ MOVL DI, R8
+ MOVL DI, 12(SP)
+ LEAQ (BX)(SI*1), R9
+ SUBL SI, R8
+ LEAL -1(R8), SI
+ CMPL SI, $0x3c
JB one_byte_repeat_emit_calcBlockSizeSmall
- CMPL BX, $0x00000100
+ CMPL SI, $0x00000100
JB two_bytes_repeat_emit_calcBlockSizeSmall
JB three_bytes_repeat_emit_calcBlockSizeSmall
three_bytes_repeat_emit_calcBlockSizeSmall:
- ADDQ $0x03, AX
+ ADDQ $0x03, CX
JMP memmove_long_repeat_emit_calcBlockSizeSmall
two_bytes_repeat_emit_calcBlockSizeSmall:
- ADDQ $0x02, AX
- CMPL BX, $0x40
+ ADDQ $0x02, CX
+ CMPL SI, $0x40
JB memmove_repeat_emit_calcBlockSizeSmall
JMP memmove_long_repeat_emit_calcBlockSizeSmall
one_byte_repeat_emit_calcBlockSizeSmall:
- ADDQ $0x01, AX
+ ADDQ $0x01, CX
memmove_repeat_emit_calcBlockSizeSmall:
- LEAQ (AX)(DI*1), AX
+ LEAQ (CX)(R8*1), CX
JMP emit_literal_done_repeat_emit_calcBlockSizeSmall
memmove_long_repeat_emit_calcBlockSizeSmall:
- LEAQ (AX)(DI*1), AX
+ LEAQ (CX)(R8*1), CX
emit_literal_done_repeat_emit_calcBlockSizeSmall:
- ADDL $0x05, CX
- MOVL CX, BX
- SUBL 16(SP), BX
- MOVQ src_len+8(FP), DI
- SUBL CX, DI
- LEAQ (DX)(CX*1), R8
- LEAQ (DX)(BX*1), BX
+ ADDL $0x05, DX
+ MOVL DX, SI
+ SUBL 16(SP), SI
+ MOVQ src_len+8(FP), R8
+ SUBL DX, R8
+ LEAQ (BX)(DX*1), R9
+ LEAQ (BX)(SI*1), SI
// matchLen
- XORL R10, R10
+ XORL R11, R11
matchlen_loopback_16_repeat_extend_calcBlockSizeSmall:
- CMPL DI, $0x10
+ CMPL R8, $0x10
JB matchlen_match8_repeat_extend_calcBlockSizeSmall
- MOVQ (R8)(R10*1), R9
- MOVQ 8(R8)(R10*1), R11
- XORQ (BX)(R10*1), R9
+ MOVQ (R9)(R11*1), R10
+ MOVQ 8(R9)(R11*1), R12
+ XORQ (SI)(R11*1), R10
JNZ matchlen_bsf_8_repeat_extend_calcBlockSizeSmall
- XORQ 8(BX)(R10*1), R11
+ XORQ 8(SI)(R11*1), R12
JNZ matchlen_bsf_16repeat_extend_calcBlockSizeSmall
- LEAL -16(DI), DI
- LEAL 16(R10), R10
+ LEAL -16(R8), R8
+ LEAL 16(R11), R11
JMP matchlen_loopback_16_repeat_extend_calcBlockSizeSmall
matchlen_bsf_16repeat_extend_calcBlockSizeSmall:
#ifdef GOAMD64_v3
- TZCNTQ R11, R11
+ TZCNTQ R12, R12
#else
- BSFQ R11, R11
+ BSFQ R12, R12
#endif
- SARQ $0x03, R11
- LEAL 8(R10)(R11*1), R10
+ SARQ $0x03, R12
+ LEAL 8(R11)(R12*1), R11
JMP repeat_extend_forward_end_calcBlockSizeSmall
matchlen_match8_repeat_extend_calcBlockSizeSmall:
- CMPL DI, $0x08
+ CMPL R8, $0x08
JB matchlen_match4_repeat_extend_calcBlockSizeSmall
- MOVQ (R8)(R10*1), R9
- XORQ (BX)(R10*1), R9
+ MOVQ (R9)(R11*1), R10
+ XORQ (SI)(R11*1), R10
JNZ matchlen_bsf_8_repeat_extend_calcBlockSizeSmall
- LEAL -8(DI), DI
- LEAL 8(R10), R10
+ LEAL -8(R8), R8
+ LEAL 8(R11), R11
JMP matchlen_match4_repeat_extend_calcBlockSizeSmall
matchlen_bsf_8_repeat_extend_calcBlockSizeSmall:
#ifdef GOAMD64_v3
- TZCNTQ R9, R9
+ TZCNTQ R10, R10
#else
- BSFQ R9, R9
+ BSFQ R10, R10
#endif
- SARQ $0x03, R9
- LEAL (R10)(R9*1), R10
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
JMP repeat_extend_forward_end_calcBlockSizeSmall
matchlen_match4_repeat_extend_calcBlockSizeSmall:
- CMPL DI, $0x04
+ CMPL R8, $0x04
JB matchlen_match2_repeat_extend_calcBlockSizeSmall
- MOVL (R8)(R10*1), R9
- CMPL (BX)(R10*1), R9
+ MOVL (R9)(R11*1), R10
+ CMPL (SI)(R11*1), R10
JNE matchlen_match2_repeat_extend_calcBlockSizeSmall
- LEAL -4(DI), DI
- LEAL 4(R10), R10
+ LEAL -4(R8), R8
+ LEAL 4(R11), R11
matchlen_match2_repeat_extend_calcBlockSizeSmall:
- CMPL DI, $0x01
+ CMPL R8, $0x01
JE matchlen_match1_repeat_extend_calcBlockSizeSmall
JB repeat_extend_forward_end_calcBlockSizeSmall
- MOVW (R8)(R10*1), R9
- CMPW (BX)(R10*1), R9
+ MOVW (R9)(R11*1), R10
+ CMPW (SI)(R11*1), R10
JNE matchlen_match1_repeat_extend_calcBlockSizeSmall
- LEAL 2(R10), R10
- SUBL $0x02, DI
+ LEAL 2(R11), R11
+ SUBL $0x02, R8
JZ repeat_extend_forward_end_calcBlockSizeSmall
matchlen_match1_repeat_extend_calcBlockSizeSmall:
- MOVB (R8)(R10*1), R9
- CMPB (BX)(R10*1), R9
+ MOVB (R9)(R11*1), R10
+ CMPB (SI)(R11*1), R10
JNE repeat_extend_forward_end_calcBlockSizeSmall
- LEAL 1(R10), R10
+ LEAL 1(R11), R11
repeat_extend_forward_end_calcBlockSizeSmall:
- ADDL R10, CX
- MOVL CX, BX
- SUBL SI, BX
- MOVL 16(SP), SI
+ ADDL R11, DX
+ MOVL DX, SI
+ SUBL DI, SI
+ MOVL 16(SP), DI
// emitCopy
two_byte_offset_repeat_as_copy_calcBlockSizeSmall:
- CMPL BX, $0x40
+ CMPL SI, $0x40
JBE two_byte_offset_short_repeat_as_copy_calcBlockSizeSmall
- LEAL -60(BX), BX
- ADDQ $0x03, AX
+ LEAL -60(SI), SI
+ ADDQ $0x03, CX
JMP two_byte_offset_repeat_as_copy_calcBlockSizeSmall
two_byte_offset_short_repeat_as_copy_calcBlockSizeSmall:
- MOVL BX, SI
- SHLL $0x02, SI
- CMPL BX, $0x0c
+ MOVL SI, DI
+ SHLL $0x02, DI
+ CMPL SI, $0x0c
JAE emit_copy_three_repeat_as_copy_calcBlockSizeSmall
- ADDQ $0x02, AX
+ ADDQ $0x02, CX
JMP repeat_end_emit_calcBlockSizeSmall
emit_copy_three_repeat_as_copy_calcBlockSizeSmall:
- ADDQ $0x03, AX
+ ADDQ $0x03, CX
repeat_end_emit_calcBlockSizeSmall:
- MOVL CX, 12(SP)
+ MOVL DX, 12(SP)
JMP search_loop_calcBlockSizeSmall
no_repeat_found_calcBlockSizeSmall:
- CMPL (DX)(BX*1), SI
+ CMPL (BX)(SI*1), DI
JEQ candidate_match_calcBlockSizeSmall
- SHRQ $0x08, SI
- MOVL 24(SP)(R9*4), BX
- LEAL 2(CX), R8
- CMPL (DX)(DI*1), SI
+ SHRQ $0x08, DI
+ MOVL (AX)(R10*4), SI
+ LEAL 2(DX), R9
+ CMPL (BX)(R8*1), DI
JEQ candidate2_match_calcBlockSizeSmall
- MOVL R8, 24(SP)(R9*4)
- SHRQ $0x08, SI
- CMPL (DX)(BX*1), SI
+ MOVL R9, (AX)(R10*4)
+ SHRQ $0x08, DI
+ CMPL (BX)(SI*1), DI
JEQ candidate3_match_calcBlockSizeSmall
- MOVL 20(SP), CX
+ MOVL 20(SP), DX
JMP search_loop_calcBlockSizeSmall
candidate3_match_calcBlockSizeSmall:
- ADDL $0x02, CX
+ ADDL $0x02, DX
JMP candidate_match_calcBlockSizeSmall
candidate2_match_calcBlockSizeSmall:
- MOVL R8, 24(SP)(R9*4)
- INCL CX
- MOVL DI, BX
+ MOVL R9, (AX)(R10*4)
+ INCL DX
+ MOVL R8, SI
candidate_match_calcBlockSizeSmall:
- MOVL 12(SP), SI
- TESTL BX, BX
+ MOVL 12(SP), DI
+ TESTL SI, SI
JZ match_extend_back_end_calcBlockSizeSmall
match_extend_back_loop_calcBlockSizeSmall:
- CMPL CX, SI
+ CMPL DX, DI
JBE match_extend_back_end_calcBlockSizeSmall
- MOVB -1(DX)(BX*1), DI
- MOVB -1(DX)(CX*1), R8
- CMPB DI, R8
+ MOVB -1(BX)(SI*1), R8
+ MOVB -1(BX)(DX*1), R9
+ CMPB R8, R9
JNE match_extend_back_end_calcBlockSizeSmall
- LEAL -1(CX), CX
- DECL BX
+ LEAL -1(DX), DX
+ DECL SI
JZ match_extend_back_end_calcBlockSizeSmall
JMP match_extend_back_loop_calcBlockSizeSmall
match_extend_back_end_calcBlockSizeSmall:
- MOVL CX, SI
- SUBL 12(SP), SI
- LEAQ 3(AX)(SI*1), SI
- CMPQ SI, (SP)
+ MOVL DX, DI
+ SUBL 12(SP), DI
+ LEAQ 3(CX)(DI*1), DI
+ CMPQ DI, (SP)
JB match_dst_size_check_calcBlockSizeSmall
- MOVQ $0x00000000, ret+24(FP)
+ MOVQ $0x00000000, ret+32(FP)
RET
match_dst_size_check_calcBlockSizeSmall:
- MOVL CX, SI
- MOVL 12(SP), DI
- CMPL DI, SI
+ MOVL DX, DI
+ MOVL 12(SP), R8
+ CMPL R8, DI
JEQ emit_literal_done_match_emit_calcBlockSizeSmall
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(DI*1), SI
- SUBL DI, R8
- LEAL -1(R8), SI
- CMPL SI, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(R8*1), DI
+ SUBL R8, R9
+ LEAL -1(R9), DI
+ CMPL DI, $0x3c
JB one_byte_match_emit_calcBlockSizeSmall
- CMPL SI, $0x00000100
+ CMPL DI, $0x00000100
JB two_bytes_match_emit_calcBlockSizeSmall
JB three_bytes_match_emit_calcBlockSizeSmall
three_bytes_match_emit_calcBlockSizeSmall:
- ADDQ $0x03, AX
+ ADDQ $0x03, CX
JMP memmove_long_match_emit_calcBlockSizeSmall
two_bytes_match_emit_calcBlockSizeSmall:
- ADDQ $0x02, AX
- CMPL SI, $0x40
+ ADDQ $0x02, CX
+ CMPL DI, $0x40
JB memmove_match_emit_calcBlockSizeSmall
JMP memmove_long_match_emit_calcBlockSizeSmall
one_byte_match_emit_calcBlockSizeSmall:
- ADDQ $0x01, AX
+ ADDQ $0x01, CX
memmove_match_emit_calcBlockSizeSmall:
- LEAQ (AX)(R8*1), AX
+ LEAQ (CX)(R9*1), CX
JMP emit_literal_done_match_emit_calcBlockSizeSmall
memmove_long_match_emit_calcBlockSizeSmall:
- LEAQ (AX)(R8*1), AX
+ LEAQ (CX)(R9*1), CX
emit_literal_done_match_emit_calcBlockSizeSmall:
match_nolit_loop_calcBlockSizeSmall:
- MOVL CX, SI
- SUBL BX, SI
- MOVL SI, 16(SP)
- ADDL $0x04, CX
- ADDL $0x04, BX
- MOVQ src_len+8(FP), SI
- SUBL CX, SI
- LEAQ (DX)(CX*1), DI
- LEAQ (DX)(BX*1), BX
+ MOVL DX, DI
+ SUBL SI, DI
+ MOVL DI, 16(SP)
+ ADDL $0x04, DX
+ ADDL $0x04, SI
+ MOVQ src_len+8(FP), DI
+ SUBL DX, DI
+ LEAQ (BX)(DX*1), R8
+ LEAQ (BX)(SI*1), SI
// matchLen
- XORL R9, R9
+ XORL R10, R10
matchlen_loopback_16_match_nolit_calcBlockSizeSmall:
- CMPL SI, $0x10
+ CMPL DI, $0x10
JB matchlen_match8_match_nolit_calcBlockSizeSmall
- MOVQ (DI)(R9*1), R8
- MOVQ 8(DI)(R9*1), R10
- XORQ (BX)(R9*1), R8
+ MOVQ (R8)(R10*1), R9
+ MOVQ 8(R8)(R10*1), R11
+ XORQ (SI)(R10*1), R9
JNZ matchlen_bsf_8_match_nolit_calcBlockSizeSmall
- XORQ 8(BX)(R9*1), R10
+ XORQ 8(SI)(R10*1), R11
JNZ matchlen_bsf_16match_nolit_calcBlockSizeSmall
- LEAL -16(SI), SI
- LEAL 16(R9), R9
+ LEAL -16(DI), DI
+ LEAL 16(R10), R10
JMP matchlen_loopback_16_match_nolit_calcBlockSizeSmall
matchlen_bsf_16match_nolit_calcBlockSizeSmall:
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R11, R11
#else
- BSFQ R10, R10
+ BSFQ R11, R11
#endif
- SARQ $0x03, R10
- LEAL 8(R9)(R10*1), R9
+ SARQ $0x03, R11
+ LEAL 8(R10)(R11*1), R10
JMP match_nolit_end_calcBlockSizeSmall
matchlen_match8_match_nolit_calcBlockSizeSmall:
- CMPL SI, $0x08
+ CMPL DI, $0x08
JB matchlen_match4_match_nolit_calcBlockSizeSmall
- MOVQ (DI)(R9*1), R8
- XORQ (BX)(R9*1), R8
+ MOVQ (R8)(R10*1), R9
+ XORQ (SI)(R10*1), R9
JNZ matchlen_bsf_8_match_nolit_calcBlockSizeSmall
- LEAL -8(SI), SI
- LEAL 8(R9), R9
+ LEAL -8(DI), DI
+ LEAL 8(R10), R10
JMP matchlen_match4_match_nolit_calcBlockSizeSmall
matchlen_bsf_8_match_nolit_calcBlockSizeSmall:
#ifdef GOAMD64_v3
- TZCNTQ R8, R8
+ TZCNTQ R9, R9
#else
- BSFQ R8, R8
+ BSFQ R9, R9
#endif
- SARQ $0x03, R8
- LEAL (R9)(R8*1), R9
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
JMP match_nolit_end_calcBlockSizeSmall
matchlen_match4_match_nolit_calcBlockSizeSmall:
- CMPL SI, $0x04
+ CMPL DI, $0x04
JB matchlen_match2_match_nolit_calcBlockSizeSmall
- MOVL (DI)(R9*1), R8
- CMPL (BX)(R9*1), R8
+ MOVL (R8)(R10*1), R9
+ CMPL (SI)(R10*1), R9
JNE matchlen_match2_match_nolit_calcBlockSizeSmall
- LEAL -4(SI), SI
- LEAL 4(R9), R9
+ LEAL -4(DI), DI
+ LEAL 4(R10), R10
matchlen_match2_match_nolit_calcBlockSizeSmall:
- CMPL SI, $0x01
+ CMPL DI, $0x01
JE matchlen_match1_match_nolit_calcBlockSizeSmall
JB match_nolit_end_calcBlockSizeSmall
- MOVW (DI)(R9*1), R8
- CMPW (BX)(R9*1), R8
+ MOVW (R8)(R10*1), R9
+ CMPW (SI)(R10*1), R9
JNE matchlen_match1_match_nolit_calcBlockSizeSmall
- LEAL 2(R9), R9
- SUBL $0x02, SI
+ LEAL 2(R10), R10
+ SUBL $0x02, DI
JZ match_nolit_end_calcBlockSizeSmall
matchlen_match1_match_nolit_calcBlockSizeSmall:
- MOVB (DI)(R9*1), R8
- CMPB (BX)(R9*1), R8
+ MOVB (R8)(R10*1), R9
+ CMPB (SI)(R10*1), R9
JNE match_nolit_end_calcBlockSizeSmall
- LEAL 1(R9), R9
+ LEAL 1(R10), R10
match_nolit_end_calcBlockSizeSmall:
- ADDL R9, CX
- MOVL 16(SP), BX
- ADDL $0x04, R9
- MOVL CX, 12(SP)
+ ADDL R10, DX
+ MOVL 16(SP), SI
+ ADDL $0x04, R10
+ MOVL DX, 12(SP)
// emitCopy
two_byte_offset_match_nolit_calcBlockSizeSmall:
- CMPL R9, $0x40
+ CMPL R10, $0x40
JBE two_byte_offset_short_match_nolit_calcBlockSizeSmall
- LEAL -60(R9), R9
- ADDQ $0x03, AX
+ LEAL -60(R10), R10
+ ADDQ $0x03, CX
JMP two_byte_offset_match_nolit_calcBlockSizeSmall
two_byte_offset_short_match_nolit_calcBlockSizeSmall:
- MOVL R9, BX
- SHLL $0x02, BX
- CMPL R9, $0x0c
+ MOVL R10, SI
+ SHLL $0x02, SI
+ CMPL R10, $0x0c
JAE emit_copy_three_match_nolit_calcBlockSizeSmall
- ADDQ $0x02, AX
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_calcBlockSizeSmall
emit_copy_three_match_nolit_calcBlockSizeSmall:
- ADDQ $0x03, AX
+ ADDQ $0x03, CX
match_nolit_emitcopy_end_calcBlockSizeSmall:
- CMPL CX, 8(SP)
+ CMPL DX, 8(SP)
JAE emit_remainder_calcBlockSizeSmall
- MOVQ -2(DX)(CX*1), SI
- CMPQ AX, (SP)
+ MOVQ -2(BX)(DX*1), DI
+ CMPQ CX, (SP)
JB match_nolit_dst_ok_calcBlockSizeSmall
- MOVQ $0x00000000, ret+24(FP)
+ MOVQ $0x00000000, ret+32(FP)
RET
match_nolit_dst_ok_calcBlockSizeSmall:
- MOVQ $0x9e3779b1, R8
- MOVQ SI, DI
- SHRQ $0x10, SI
- MOVQ SI, BX
- SHLQ $0x20, DI
- IMULQ R8, DI
- SHRQ $0x37, DI
- SHLQ $0x20, BX
- IMULQ R8, BX
- SHRQ $0x37, BX
- LEAL -2(CX), R8
- LEAQ 24(SP)(BX*4), R9
- MOVL (R9), BX
- MOVL R8, 24(SP)(DI*4)
- MOVL CX, (R9)
- CMPL (DX)(BX*1), SI
+ MOVQ $0x9e3779b1, R9
+ MOVQ DI, R8
+ SHRQ $0x10, DI
+ MOVQ DI, SI
+ SHLQ $0x20, R8
+ IMULQ R9, R8
+ SHRQ $0x37, R8
+ SHLQ $0x20, SI
+ IMULQ R9, SI
+ SHRQ $0x37, SI
+ LEAL -2(DX), R9
+ LEAQ (AX)(SI*4), R10
+ MOVL (R10), SI
+ MOVL R9, (AX)(R8*4)
+ MOVL DX, (R10)
+ CMPL (BX)(SI*1), DI
JEQ match_nolit_loop_calcBlockSizeSmall
- INCL CX
+ INCL DX
JMP search_loop_calcBlockSizeSmall
emit_remainder_calcBlockSizeSmall:
- MOVQ src_len+8(FP), CX
- SUBL 12(SP), CX
- LEAQ 3(AX)(CX*1), CX
- CMPQ CX, (SP)
+ MOVQ src_len+8(FP), AX
+ SUBL 12(SP), AX
+ LEAQ 3(CX)(AX*1), AX
+ CMPQ AX, (SP)
JB emit_remainder_ok_calcBlockSizeSmall
- MOVQ $0x00000000, ret+24(FP)
+ MOVQ $0x00000000, ret+32(FP)
RET
emit_remainder_ok_calcBlockSizeSmall:
- MOVQ src_len+8(FP), CX
- MOVL 12(SP), BX
- CMPL BX, CX
+ MOVQ src_len+8(FP), AX
+ MOVL 12(SP), DX
+ CMPL DX, AX
JEQ emit_literal_done_emit_remainder_calcBlockSizeSmall
- MOVL CX, SI
- MOVL CX, 12(SP)
- LEAQ (DX)(BX*1), CX
- SUBL BX, SI
- LEAL -1(SI), CX
- CMPL CX, $0x3c
+ MOVL AX, SI
+ MOVL AX, 12(SP)
+ LEAQ (BX)(DX*1), AX
+ SUBL DX, SI
+ LEAL -1(SI), AX
+ CMPL AX, $0x3c
JB one_byte_emit_remainder_calcBlockSizeSmall
- CMPL CX, $0x00000100
+ CMPL AX, $0x00000100
JB two_bytes_emit_remainder_calcBlockSizeSmall
JB three_bytes_emit_remainder_calcBlockSizeSmall
three_bytes_emit_remainder_calcBlockSizeSmall:
- ADDQ $0x03, AX
+ ADDQ $0x03, CX
JMP memmove_long_emit_remainder_calcBlockSizeSmall
two_bytes_emit_remainder_calcBlockSizeSmall:
- ADDQ $0x02, AX
- CMPL CX, $0x40
+ ADDQ $0x02, CX
+ CMPL AX, $0x40
JB memmove_emit_remainder_calcBlockSizeSmall
JMP memmove_long_emit_remainder_calcBlockSizeSmall
one_byte_emit_remainder_calcBlockSizeSmall:
- ADDQ $0x01, AX
+ ADDQ $0x01, CX
memmove_emit_remainder_calcBlockSizeSmall:
- LEAQ (AX)(SI*1), AX
+ LEAQ (CX)(SI*1), AX
+ MOVQ AX, CX
JMP emit_literal_done_emit_remainder_calcBlockSizeSmall
memmove_long_emit_remainder_calcBlockSizeSmall:
- LEAQ (AX)(SI*1), AX
+ LEAQ (CX)(SI*1), AX
+ MOVQ AX, CX
emit_literal_done_emit_remainder_calcBlockSizeSmall:
- MOVQ AX, ret+24(FP)
+ MOVQ CX, ret+32(FP)
RET
// func emitLiteral(dst []byte, lit []byte) int
@@ -19783,7 +19809,7 @@ TEXT ·cvtLZ4BlockAsm(SB), NOSPLIT, $0-64
MOVQ src_base+24(FP), DX
MOVQ src_len+32(FP), BX
LEAQ (DX)(BX*1), BX
- LEAQ -10(AX)(CX*1), CX
+ LEAQ -8(AX)(CX*1), CX
XORQ DI, DI
lz4_s2_loop:
@@ -20266,7 +20292,7 @@ TEXT ·cvtLZ4sBlockAsm(SB), NOSPLIT, $0-64
MOVQ src_base+24(FP), DX
MOVQ src_len+32(FP), BX
LEAQ (DX)(BX*1), BX
- LEAQ -10(AX)(CX*1), CX
+ LEAQ -8(AX)(CX*1), CX
XORQ DI, DI
lz4s_s2_loop:
@@ -20751,7 +20777,7 @@ TEXT ·cvtLZ4BlockSnappyAsm(SB), NOSPLIT, $0-64
MOVQ src_base+24(FP), DX
MOVQ src_len+32(FP), BX
LEAQ (DX)(BX*1), BX
- LEAQ -10(AX)(CX*1), CX
+ LEAQ -8(AX)(CX*1), CX
lz4_snappy_loop:
CMPQ DX, BX
@@ -21017,7 +21043,7 @@ TEXT ·cvtLZ4sBlockSnappyAsm(SB), NOSPLIT, $0-64
MOVQ src_base+24(FP), DX
MOVQ src_len+32(FP), BX
LEAQ (DX)(BX*1), BX
- LEAQ -10(AX)(CX*1), CX
+ LEAQ -8(AX)(CX*1), CX
lz4s_snappy_loop:
CMPQ DX, BX
diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go
index a79c4a527c62c..8f8223cd3a678 100644
--- a/vendor/github.com/klauspost/compress/zstd/encoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/encoder.go
@@ -6,6 +6,7 @@ package zstd
import (
"crypto/rand"
+ "errors"
"fmt"
"io"
"math"
@@ -149,6 +150,9 @@ func (e *Encoder) ResetContentSize(w io.Writer, size int64) {
// and write CRC if requested.
func (e *Encoder) Write(p []byte) (n int, err error) {
s := &e.state
+ if s.eofWritten {
+ return 0, ErrEncoderClosed
+ }
for len(p) > 0 {
if len(p)+len(s.filling) < e.o.blockSize {
if e.o.crc {
@@ -288,6 +292,9 @@ func (e *Encoder) nextBlock(final bool) error {
s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current
s.nInput += int64(len(s.current))
s.wg.Add(1)
+ if final {
+ s.eofWritten = true
+ }
go func(src []byte) {
if debugEncoder {
println("Adding block,", len(src), "bytes, final:", final)
@@ -303,9 +310,6 @@ func (e *Encoder) nextBlock(final bool) error {
blk := enc.Block()
enc.Encode(blk, src)
blk.last = final
- if final {
- s.eofWritten = true
- }
// Wait for pending writes.
s.wWg.Wait()
if s.writeErr != nil {
@@ -401,12 +405,20 @@ func (e *Encoder) Flush() error {
if len(s.filling) > 0 {
err := e.nextBlock(false)
if err != nil {
+ // Ignore Flush after Close.
+ if errors.Is(s.err, ErrEncoderClosed) {
+ return nil
+ }
return err
}
}
s.wg.Wait()
s.wWg.Wait()
if s.err != nil {
+ // Ignore Flush after Close.
+ if errors.Is(s.err, ErrEncoderClosed) {
+ return nil
+ }
return s.err
}
return s.writeErr
@@ -422,6 +434,9 @@ func (e *Encoder) Close() error {
}
err := e.nextBlock(true)
if err != nil {
+ if errors.Is(s.err, ErrEncoderClosed) {
+ return nil
+ }
return err
}
if s.frameContentSize > 0 {
@@ -459,6 +474,11 @@ func (e *Encoder) Close() error {
}
_, s.err = s.w.Write(frame)
}
+ if s.err == nil {
+ s.err = ErrEncoderClosed
+ return nil
+ }
+
return s.err
}
diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go
index 4be7cc73671b3..066bef2a4f0e9 100644
--- a/vendor/github.com/klauspost/compress/zstd/zstd.go
+++ b/vendor/github.com/klauspost/compress/zstd/zstd.go
@@ -88,6 +88,10 @@ var (
// Close has been called.
ErrDecoderClosed = errors.New("decoder used after Close")
+ // ErrEncoderClosed will be returned if the Encoder was used after
+ // Close has been called.
+ ErrEncoderClosed = errors.New("encoder used after Close")
+
// ErrDecoderNilInput is returned when a nil Reader was provided
// and an operation other than Reset/DecodeAll/Close was attempted.
ErrDecoderNilInput = errors.New("nil input provided as reader")
diff --git a/vendor/github.com/minio/minio-go/v7/Makefile b/vendor/github.com/minio/minio-go/v7/Makefile
index 68444aa681d42..9e4ddc4c88ad4 100644
--- a/vendor/github.com/minio/minio-go/v7/Makefile
+++ b/vendor/github.com/minio/minio-go/v7/Makefile
@@ -32,6 +32,10 @@ functional-test:
@GO111MODULE=on go build -race functional_tests.go
@SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minioadmin SECRET_KEY=minioadmin ENABLE_HTTPS=1 MINT_MODE=full ./functional_tests
+functional-test-notls:
+ @GO111MODULE=on go build -race functional_tests.go
+ @SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minioadmin SECRET_KEY=minioadmin ENABLE_HTTPS=0 MINT_MODE=full ./functional_tests
+
clean:
@echo "Cleaning up all the generated files"
@find . -name '*.test' | xargs rm -fv
diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object.go b/vendor/github.com/minio/minio-go/v7/api-put-object.go
index d769648a7ef34..10131a5be630d 100644
--- a/vendor/github.com/minio/minio-go/v7/api-put-object.go
+++ b/vendor/github.com/minio/minio-go/v7/api-put-object.go
@@ -45,6 +45,8 @@ const (
ReplicationStatusFailed ReplicationStatus = "FAILED"
// ReplicationStatusReplica indicates object is a replica of a source
ReplicationStatusReplica ReplicationStatus = "REPLICA"
+ // ReplicationStatusReplicaEdge indicates object is a replica of a edge source
+ ReplicationStatusReplicaEdge ReplicationStatus = "REPLICA-EDGE"
)
// Empty returns true if no replication status set.
diff --git a/vendor/github.com/minio/minio-go/v7/api.go b/vendor/github.com/minio/minio-go/v7/api.go
index 1d6b6650250a3..90e9b63f5b14a 100644
--- a/vendor/github.com/minio/minio-go/v7/api.go
+++ b/vendor/github.com/minio/minio-go/v7/api.go
@@ -128,7 +128,7 @@ type Options struct {
// Global constants.
const (
libraryName = "minio-go"
- libraryVersion = "v7.0.77"
+ libraryVersion = "v7.0.78"
)
// User Agent should always following the below style.
diff --git a/vendor/github.com/minio/minio-go/v7/functional_tests.go b/vendor/github.com/minio/minio-go/v7/functional_tests.go
index 780dc899795c8..c0180b36b7015 100644
--- a/vendor/github.com/minio/minio-go/v7/functional_tests.go
+++ b/vendor/github.com/minio/minio-go/v7/functional_tests.go
@@ -3565,16 +3565,10 @@ func validateObjectAttributeRequest(OA *minio.ObjectAttributes, opts *minio.Obje
}
}
- hasFullObjectChecksum := true
- if OA.Checksum.ChecksumCRC32 == "" {
- if OA.Checksum.ChecksumCRC32C == "" {
- if OA.Checksum.ChecksumSHA1 == "" {
- if OA.Checksum.ChecksumSHA256 == "" {
- hasFullObjectChecksum = false
- }
- }
- }
- }
+ hasFullObjectChecksum := (OA.Checksum.ChecksumCRC32 != "" ||
+ OA.Checksum.ChecksumCRC32C != "" ||
+ OA.Checksum.ChecksumSHA1 != "" ||
+ OA.Checksum.ChecksumSHA256 != "")
if test.HasFullChecksum {
if !hasFullObjectChecksum {
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go
index 596d951525dd2..f1c76c78ea0a3 100644
--- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go
@@ -25,6 +25,7 @@ import (
"io"
"net/http"
"net/url"
+ "os"
"strconv"
"strings"
"time"
@@ -85,29 +86,59 @@ type STSWebIdentity struct {
// assuming.
RoleARN string
+ // Policy is the policy where the credentials should be limited too.
+ Policy string
+
// roleSessionName is the identifier for the assumed role session.
roleSessionName string
}
// NewSTSWebIdentity returns a pointer to a new
// Credentials object wrapping the STSWebIdentity.
-func NewSTSWebIdentity(stsEndpoint string, getWebIDTokenExpiry func() (*WebIdentityToken, error)) (*Credentials, error) {
+func NewSTSWebIdentity(stsEndpoint string, getWebIDTokenExpiry func() (*WebIdentityToken, error), opts ...func(*STSWebIdentity)) (*Credentials, error) {
if stsEndpoint == "" {
return nil, errors.New("STS endpoint cannot be empty")
}
if getWebIDTokenExpiry == nil {
return nil, errors.New("Web ID token and expiry retrieval function should be defined")
}
- return New(&STSWebIdentity{
+ i := &STSWebIdentity{
Client: &http.Client{
Transport: http.DefaultTransport,
},
STSEndpoint: stsEndpoint,
GetWebIDTokenExpiry: getWebIDTokenExpiry,
- }), nil
+ }
+ for _, o := range opts {
+ o(i)
+ }
+ return New(i), nil
+}
+
+// NewKubernetesIdentity returns a pointer to a new
+// Credentials object using the Kubernetes service account
+func NewKubernetesIdentity(stsEndpoint string, opts ...func(*STSWebIdentity)) (*Credentials, error) {
+ return NewSTSWebIdentity(stsEndpoint, func() (*WebIdentityToken, error) {
+ token, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/token")
+ if err != nil {
+ return nil, err
+ }
+
+ return &WebIdentityToken{
+ Token: string(token),
+ }, nil
+ }, opts...)
+}
+
+// WithPolicy option will enforce that the returned credentials
+// will be scoped down to the specified policy
+func WithPolicy(policy string) func(*STSWebIdentity) {
+ return func(i *STSWebIdentity) {
+ i.Policy = policy
+ }
}
-func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSessionName string,
+func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSessionName string, policy string,
getWebIDTokenExpiry func() (*WebIdentityToken, error),
) (AssumeRoleWithWebIdentityResponse, error) {
idToken, err := getWebIDTokenExpiry()
@@ -133,6 +164,9 @@ func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSession
if idToken.Expiry > 0 {
v.Set("DurationSeconds", fmt.Sprintf("%d", idToken.Expiry))
}
+ if policy != "" {
+ v.Set("Policy", policy)
+ }
v.Set("Version", STSVersion)
u, err := url.Parse(endpoint)
@@ -183,7 +217,7 @@ func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSession
// Retrieve retrieves credentials from the MinIO service.
// Error will be returned if the request fails.
func (m *STSWebIdentity) Retrieve() (Value, error) {
- a, err := getWebIdentityCredentials(m.Client, m.STSEndpoint, m.RoleARN, m.roleSessionName, m.GetWebIDTokenExpiry)
+ a, err := getWebIdentityCredentials(m.Client, m.STSEndpoint, m.RoleARN, m.roleSessionName, m.Policy, m.GetWebIDTokenExpiry)
if err != nil {
return Value{}, err
}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go b/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go
index 7a84a6f349edb..33465c6326de7 100644
--- a/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go
+++ b/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go
@@ -69,7 +69,7 @@ const (
// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#tag-restrictions
// borrowed from this article and also testing various ASCII characters following regex
// is supported by AWS S3 for both tags and values.
-var validTagKeyValue = regexp.MustCompile(`^[a-zA-Z0-9-+\-._:/@ ]+$`)
+var validTagKeyValue = regexp.MustCompile(`^[a-zA-Z0-9-+\-._:/@ =]+$`)
func checkKey(key string) error {
if len(key) == 0 {
diff --git a/vendor/github.com/pkg/xattr/.gitignore b/vendor/github.com/pkg/xattr/.gitignore
new file mode 100644
index 0000000000000..d8b32652e5a92
--- /dev/null
+++ b/vendor/github.com/pkg/xattr/.gitignore
@@ -0,0 +1,26 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+.DS_Store
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+
+*.swp
diff --git a/vendor/github.com/pkg/xattr/LICENSE b/vendor/github.com/pkg/xattr/LICENSE
new file mode 100644
index 0000000000000..99d2e9dc8ff27
--- /dev/null
+++ b/vendor/github.com/pkg/xattr/LICENSE
@@ -0,0 +1,25 @@
+Copyright (c) 2012 Dave Cheney. All rights reserved.
+Copyright (c) 2014 Kuba Podgórski. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/pkg/xattr/README.md b/vendor/github.com/pkg/xattr/README.md
new file mode 100644
index 0000000000000..0662c0208c572
--- /dev/null
+++ b/vendor/github.com/pkg/xattr/README.md
@@ -0,0 +1,45 @@
+[](http://godoc.org/github.com/pkg/xattr)
+[](https://goreportcard.com/report/github.com/pkg/xattr)
+[](https://github.com/pkg/xattr/actions?query=workflow%3Abuild)
+[](https://codecov.io/gh/pkg/xattr)
+
+xattr
+=====
+Extended attribute support for Go (linux + darwin + freebsd + netbsd + solaris).
+
+"Extended attributes are name:value pairs associated permanently with files and directories, similar to the environment strings associated with a process. An attribute may be defined or undefined. If it is defined, its value may be empty or non-empty." [See more...](https://en.wikipedia.org/wiki/Extended_file_attributes)
+
+`SetWithFlags` allows to additionally pass system flags to be forwarded to the underlying calls. FreeBSD and NetBSD do not support this and the parameter will be ignored.
+
+The `L` variants of all functions (`LGet/LSet/...`) are identical to `Get/Set/...` except that they
+do not reference a symlink that appears at the end of a path. See
+[GoDoc](http://godoc.org/github.com/pkg/xattr) for details.
+
+### Example
+```go
+ const path = "/tmp/myfile"
+ const prefix = "user."
+
+ if err := xattr.Set(path, prefix+"test", []byte("test-attr-value")); err != nil {
+ log.Fatal(err)
+ }
+
+ var list []string
+ if list, err = xattr.List(path); err != nil {
+ log.Fatal(err)
+ }
+
+ var data []byte
+ if data, err = xattr.Get(path, prefix+"test"); err != nil {
+ log.Fatal(err)
+ }
+
+ if err = xattr.Remove(path, prefix+"test"); err != nil {
+ log.Fatal(err)
+ }
+
+ // One can also specify the flags parameter to be passed to the OS.
+ if err := xattr.SetWithFlags(path, prefix+"test", []byte("test-attr-value"), xattr.XATTR_CREATE); err != nil {
+ log.Fatal(err)
+ }
+```
diff --git a/vendor/github.com/pkg/xattr/xattr.go b/vendor/github.com/pkg/xattr/xattr.go
new file mode 100644
index 0000000000000..e34e274d51373
--- /dev/null
+++ b/vendor/github.com/pkg/xattr/xattr.go
@@ -0,0 +1,258 @@
+/*
+Package xattr provides support for extended attributes on linux, darwin and freebsd.
+Extended attributes are name:value pairs associated permanently with files and directories,
+similar to the environment strings associated with a process.
+An attribute may be defined or undefined. If it is defined, its value may be empty or non-empty.
+More details you can find here: https://en.wikipedia.org/wiki/Extended_file_attributes .
+
+All functions are provided in triples: Get/LGet/FGet, Set/LSet/FSet etc. The "L"
+variant will not follow a symlink at the end of the path, and "F" variant accepts
+a file descriptor instead of a path.
+
+Example for "L" variant, assuming path is "/symlink1/symlink2", where both components are
+symlinks:
+Get will follow "symlink1" and "symlink2" and operate on the target of
+"symlink2". LGet will follow "symlink1" but operate directly on "symlink2".
+*/
+package xattr
+
+import (
+ "os"
+ "syscall"
+)
+
+// Error records an error and the operation, file path and attribute that caused it.
+type Error struct {
+ Op string
+ Path string
+ Name string
+ Err error
+}
+
+func (e *Error) Unwrap() error { return e.Err }
+
+func (e *Error) Error() (errstr string) {
+ if e.Op != "" {
+ errstr += e.Op
+ }
+ if e.Path != "" {
+ if errstr != "" {
+ errstr += " "
+ }
+ errstr += e.Path
+ }
+ if e.Name != "" {
+ if errstr != "" {
+ errstr += " "
+ }
+ errstr += e.Name
+ }
+ if e.Err != nil {
+ if errstr != "" {
+ errstr += ": "
+ }
+ errstr += e.Err.Error()
+ }
+ return
+}
+
+// Get retrieves extended attribute data associated with path. It will follow
+// all symlinks along the path.
+func Get(path, name string) ([]byte, error) {
+ return get(path, name, func(name string, data []byte) (int, error) {
+ return getxattr(path, name, data)
+ })
+}
+
+// LGet is like Get but does not follow a symlink at the end of the path.
+func LGet(path, name string) ([]byte, error) {
+ return get(path, name, func(name string, data []byte) (int, error) {
+ return lgetxattr(path, name, data)
+ })
+}
+
+// FGet is like Get but accepts a os.File instead of a file path.
+func FGet(f *os.File, name string) ([]byte, error) {
+ return get(f.Name(), name, func(name string, data []byte) (int, error) {
+ return fgetxattr(f, name, data)
+ })
+}
+
+type getxattrFunc func(name string, data []byte) (int, error)
+
+// get contains the buffer allocation logic used by both Get and LGet.
+func get(path string, name string, getxattrFunc getxattrFunc) ([]byte, error) {
+ const (
+ // Start with a 1 KB buffer for the xattr value
+ initialBufSize = 1024
+
+ // The theoretical maximum xattr value size on MacOS is 64 MB. On Linux it's
+ // much smaller: documented at 64 KB. However, at least on TrueNAS SCALE, a
+ // Debian-based Linux distro, it can be larger.
+ maxBufSize = 64 * 1024 * 1024
+
+ // Function name as reported in error messages
+ myname = "xattr.get"
+ )
+
+ size := initialBufSize
+ for {
+ data := make([]byte, size)
+ read, err := getxattrFunc(name, data)
+
+ // If the buffer was too small to fit the value, Linux and MacOS react
+ // differently:
+ // Linux: returns an ERANGE error and "-1" bytes. However, the TrueNAS
+ // SCALE distro sometimes returns E2BIG.
+ // MacOS: truncates the value and returns "size" bytes. If the value
+ // happens to be exactly as big as the buffer, we cannot know if it was
+ // truncated, and we retry with a bigger buffer. Contrary to documentation,
+ // MacOS never seems to return ERANGE!
+ // To keep the code simple, we always check both conditions, and sometimes
+ // double the buffer size without it being strictly necessary.
+ if err == syscall.ERANGE || err == syscall.E2BIG || read == size {
+ // The buffer was too small. Try again.
+ size <<= 1
+ if size >= maxBufSize {
+ return nil, &Error{myname, path, name, syscall.EOVERFLOW}
+ }
+ continue
+ }
+ if err != nil {
+ return nil, &Error{myname, path, name, err}
+ }
+ return data[:read], nil
+ }
+}
+
+// Set associates name and data together as an attribute of path.
+func Set(path, name string, data []byte) error {
+ if err := setxattr(path, name, data, 0); err != nil {
+ return &Error{"xattr.Set", path, name, err}
+ }
+ return nil
+}
+
+// LSet is like Set but does not follow a symlink at
+// the end of the path.
+func LSet(path, name string, data []byte) error {
+ if err := lsetxattr(path, name, data, 0); err != nil {
+ return &Error{"xattr.LSet", path, name, err}
+ }
+ return nil
+}
+
+// FSet is like Set but accepts a os.File instead of a file path.
+func FSet(f *os.File, name string, data []byte) error {
+ if err := fsetxattr(f, name, data, 0); err != nil {
+ return &Error{"xattr.FSet", f.Name(), name, err}
+ }
+ return nil
+}
+
+// SetWithFlags associates name and data together as an attribute of path.
+// Forwards the flags parameter to the syscall layer.
+func SetWithFlags(path, name string, data []byte, flags int) error {
+ if err := setxattr(path, name, data, flags); err != nil {
+ return &Error{"xattr.SetWithFlags", path, name, err}
+ }
+ return nil
+}
+
+// LSetWithFlags is like SetWithFlags but does not follow a symlink at
+// the end of the path.
+func LSetWithFlags(path, name string, data []byte, flags int) error {
+ if err := lsetxattr(path, name, data, flags); err != nil {
+ return &Error{"xattr.LSetWithFlags", path, name, err}
+ }
+ return nil
+}
+
+// FSetWithFlags is like SetWithFlags but accepts a os.File instead of a file path.
+func FSetWithFlags(f *os.File, name string, data []byte, flags int) error {
+ if err := fsetxattr(f, name, data, flags); err != nil {
+ return &Error{"xattr.FSetWithFlags", f.Name(), name, err}
+ }
+ return nil
+}
+
+// Remove removes the attribute associated with the given path.
+func Remove(path, name string) error {
+ if err := removexattr(path, name); err != nil {
+ return &Error{"xattr.Remove", path, name, err}
+ }
+ return nil
+}
+
+// LRemove is like Remove but does not follow a symlink at the end of the
+// path.
+func LRemove(path, name string) error {
+ if err := lremovexattr(path, name); err != nil {
+ return &Error{"xattr.LRemove", path, name, err}
+ }
+ return nil
+}
+
+// FRemove is like Remove but accepts a os.File instead of a file path.
+func FRemove(f *os.File, name string) error {
+ if err := fremovexattr(f, name); err != nil {
+ return &Error{"xattr.FRemove", f.Name(), name, err}
+ }
+ return nil
+}
+
+// List retrieves a list of names of extended attributes associated
+// with the given path in the file system.
+func List(path string) ([]string, error) {
+ return list(path, func(data []byte) (int, error) {
+ return listxattr(path, data)
+ })
+}
+
+// LList is like List but does not follow a symlink at the end of the
+// path.
+func LList(path string) ([]string, error) {
+ return list(path, func(data []byte) (int, error) {
+ return llistxattr(path, data)
+ })
+}
+
+// FList is like List but accepts a os.File instead of a file path.
+func FList(f *os.File) ([]string, error) {
+ return list(f.Name(), func(data []byte) (int, error) {
+ return flistxattr(f, data)
+ })
+}
+
+type listxattrFunc func(data []byte) (int, error)
+
+// list contains the buffer allocation logic used by both List and LList.
+func list(path string, listxattrFunc listxattrFunc) ([]string, error) {
+ myname := "xattr.list"
+ // find size.
+ size, err := listxattrFunc(nil)
+ if err != nil {
+ return nil, &Error{myname, path, "", err}
+ }
+ if size > 0 {
+ // `size + 1` because of ERANGE error when reading
+ // from a SMB1 mount point (https://github.com/pkg/xattr/issues/16).
+ buf := make([]byte, size+1)
+ // Read into buffer of that size.
+ read, err := listxattrFunc(buf)
+ if err != nil {
+ return nil, &Error{myname, path, "", err}
+ }
+ return stringsFromByteSlice(buf[:read]), nil
+ }
+ return []string{}, nil
+}
+
+// bytePtrFromSlice returns a pointer to array of bytes and a size.
+func bytePtrFromSlice(data []byte) (ptr *byte, size int) {
+ size = len(data)
+ if size > 0 {
+ ptr = &data[0]
+ }
+ return
+}
diff --git a/vendor/github.com/pkg/xattr/xattr_bsd.go b/vendor/github.com/pkg/xattr/xattr_bsd.go
new file mode 100644
index 0000000000000..f4a3f95390490
--- /dev/null
+++ b/vendor/github.com/pkg/xattr/xattr_bsd.go
@@ -0,0 +1,201 @@
+//go:build freebsd || netbsd
+// +build freebsd netbsd
+
+package xattr
+
+import (
+ "os"
+ "syscall"
+ "unsafe"
+)
+
+const (
+ // XATTR_SUPPORTED will be true if the current platform is supported
+ XATTR_SUPPORTED = true
+
+ EXTATTR_NAMESPACE_USER = 1
+
+ // ENOATTR is not exported by the syscall package on Linux, because it is
+ // an alias for ENODATA. We export it here so it is available on all
+ // our supported platforms.
+ ENOATTR = syscall.ENOATTR
+)
+
+func getxattr(path string, name string, data []byte) (int, error) {
+ return sysGet(syscall.SYS_EXTATTR_GET_FILE, path, name, data)
+}
+
+func lgetxattr(path string, name string, data []byte) (int, error) {
+ return sysGet(syscall.SYS_EXTATTR_GET_LINK, path, name, data)
+}
+
+func fgetxattr(f *os.File, name string, data []byte) (int, error) {
+ return getxattr(f.Name(), name, data)
+}
+
+// sysGet is called by getxattr and lgetxattr with the appropriate syscall
+// number. This works because syscalls have the same signature and return
+// values.
+func sysGet(syscallNum uintptr, path string, name string, data []byte) (int, error) {
+ ptr, nbytes := bytePtrFromSlice(data)
+ /*
+ ssize_t extattr_get_file(
+ const char *path,
+ int attrnamespace,
+ const char *attrname,
+ void *data,
+ size_t nbytes);
+
+ ssize_t extattr_get_link(
+ const char *path,
+ int attrnamespace,
+ const char *attrname,
+ void *data,
+ size_t nbytes);
+ */
+ r0, _, err := syscall.Syscall6(syscallNum, uintptr(unsafe.Pointer(syscall.StringBytePtr(path))),
+ EXTATTR_NAMESPACE_USER, uintptr(unsafe.Pointer(syscall.StringBytePtr(name))),
+ uintptr(unsafe.Pointer(ptr)), uintptr(nbytes), 0)
+ if err != syscall.Errno(0) {
+ return int(r0), err
+ }
+ return int(r0), nil
+}
+
+func setxattr(path string, name string, data []byte, flags int) error {
+ return sysSet(syscall.SYS_EXTATTR_SET_FILE, path, name, data)
+}
+
+func lsetxattr(path string, name string, data []byte, flags int) error {
+ return sysSet(syscall.SYS_EXTATTR_SET_LINK, path, name, data)
+}
+
+func fsetxattr(f *os.File, name string, data []byte, flags int) error {
+ return setxattr(f.Name(), name, data, flags)
+}
+
+// sysSet is called by setxattr and lsetxattr with the appropriate syscall
+// number. This works because syscalls have the same signature and return
+// values.
+func sysSet(syscallNum uintptr, path string, name string, data []byte) error {
+ ptr, nbytes := bytePtrFromSlice(data)
+ /*
+ ssize_t extattr_set_file(
+ const char *path,
+ int attrnamespace,
+ const char *attrname,
+ const void *data,
+ size_t nbytes
+ );
+
+ ssize_t extattr_set_link(
+ const char *path,
+ int attrnamespace,
+ const char *attrname,
+ const void *data,
+ size_t nbytes
+ );
+ */
+ r0, _, err := syscall.Syscall6(syscallNum, uintptr(unsafe.Pointer(syscall.StringBytePtr(path))),
+ EXTATTR_NAMESPACE_USER, uintptr(unsafe.Pointer(syscall.StringBytePtr(name))),
+ uintptr(unsafe.Pointer(ptr)), uintptr(nbytes), 0)
+ if err != syscall.Errno(0) {
+ return err
+ }
+ if int(r0) != nbytes {
+ return syscall.E2BIG
+ }
+ return nil
+}
+
+func removexattr(path string, name string) error {
+ return sysRemove(syscall.SYS_EXTATTR_DELETE_FILE, path, name)
+}
+
+func lremovexattr(path string, name string) error {
+ return sysRemove(syscall.SYS_EXTATTR_DELETE_LINK, path, name)
+}
+
+func fremovexattr(f *os.File, name string) error {
+ return removexattr(f.Name(), name)
+}
+
+// sysSet is called by removexattr and lremovexattr with the appropriate syscall
+// number. This works because syscalls have the same signature and return
+// values.
+func sysRemove(syscallNum uintptr, path string, name string) error {
+ /*
+ int extattr_delete_file(
+ const char *path,
+ int attrnamespace,
+ const char *attrname
+ );
+
+ int extattr_delete_link(
+ const char *path,
+ int attrnamespace,
+ const char *attrname
+ );
+ */
+ _, _, err := syscall.Syscall(syscallNum, uintptr(unsafe.Pointer(syscall.StringBytePtr(path))),
+ EXTATTR_NAMESPACE_USER, uintptr(unsafe.Pointer(syscall.StringBytePtr(name))),
+ )
+ if err != syscall.Errno(0) {
+ return err
+ }
+ return nil
+}
+
+func listxattr(path string, data []byte) (int, error) {
+ return sysList(syscall.SYS_EXTATTR_LIST_FILE, path, data)
+}
+
+func llistxattr(path string, data []byte) (int, error) {
+ return sysList(syscall.SYS_EXTATTR_LIST_LINK, path, data)
+}
+
+func flistxattr(f *os.File, data []byte) (int, error) {
+ return listxattr(f.Name(), data)
+}
+
+// sysSet is called by listxattr and llistxattr with the appropriate syscall
+// number. This works because syscalls have the same signature and return
+// values.
+func sysList(syscallNum uintptr, path string, data []byte) (int, error) {
+ ptr, nbytes := bytePtrFromSlice(data)
+ /*
+ ssize_t extattr_list_file(
+ const char *path,
+ int attrnamespace,
+ void *data,
+ size_t nbytes
+ );
+
+ ssize_t extattr_list_link(
+ const char *path,
+ int attrnamespace,
+ void *data,
+ size_t nbytes
+ );
+ */
+ r0, _, err := syscall.Syscall6(syscallNum, uintptr(unsafe.Pointer(syscall.StringBytePtr(path))),
+ EXTATTR_NAMESPACE_USER, uintptr(unsafe.Pointer(ptr)), uintptr(nbytes), 0, 0)
+ if err != syscall.Errno(0) {
+ return int(r0), err
+ }
+ return int(r0), nil
+}
+
+// stringsFromByteSlice converts a sequence of attributes to a []string.
+// On FreeBSD, each entry consists of a single byte containing the length
+// of the attribute name, followed by the attribute name.
+// The name is _not_ terminated by NULL.
+func stringsFromByteSlice(buf []byte) (result []string) {
+ index := 0
+ for index < len(buf) {
+ next := index + 1 + int(buf[index])
+ result = append(result, string(buf[index+1:next]))
+ index = next
+ }
+ return
+}
diff --git a/vendor/github.com/pkg/xattr/xattr_darwin.go b/vendor/github.com/pkg/xattr/xattr_darwin.go
new file mode 100644
index 0000000000000..ee7a501dae5cb
--- /dev/null
+++ b/vendor/github.com/pkg/xattr/xattr_darwin.go
@@ -0,0 +1,90 @@
+//go:build darwin
+// +build darwin
+
+package xattr
+
+import (
+ "os"
+ "syscall"
+
+ "golang.org/x/sys/unix"
+)
+
+// See https://opensource.apple.com/source/xnu/xnu-1504.15.3/bsd/sys/xattr.h.auto.html
+const (
+ // XATTR_SUPPORTED will be true if the current platform is supported
+ XATTR_SUPPORTED = true
+
+ XATTR_NOFOLLOW = 0x0001
+ XATTR_CREATE = 0x0002
+ XATTR_REPLACE = 0x0004
+ XATTR_NOSECURITY = 0x0008
+ XATTR_NODEFAULT = 0x0010
+ XATTR_SHOWCOMPRESSION = 0x0020
+
+ // ENOATTR is not exported by the syscall package on Linux, because it is
+ // an alias for ENODATA. We export it here so it is available on all
+ // our supported platforms.
+ ENOATTR = syscall.ENOATTR
+)
+
+func getxattr(path string, name string, data []byte) (int, error) {
+ return unix.Getxattr(path, name, data)
+}
+
+func lgetxattr(path string, name string, data []byte) (int, error) {
+ return unix.Lgetxattr(path, name, data)
+}
+
+func fgetxattr(f *os.File, name string, data []byte) (int, error) {
+ return getxattr(f.Name(), name, data)
+}
+
+func setxattr(path string, name string, data []byte, flags int) error {
+ return unix.Setxattr(path, name, data, flags)
+}
+
+func lsetxattr(path string, name string, data []byte, flags int) error {
+ return unix.Lsetxattr(path, name, data, flags)
+}
+
+func fsetxattr(f *os.File, name string, data []byte, flags int) error {
+ return setxattr(f.Name(), name, data, flags)
+}
+
+func removexattr(path string, name string) error {
+ return unix.Removexattr(path, name)
+}
+
+func lremovexattr(path string, name string) error {
+ return unix.Lremovexattr(path, name)
+}
+
+func fremovexattr(f *os.File, name string) error {
+ return removexattr(f.Name(), name)
+}
+
+func listxattr(path string, data []byte) (int, error) {
+ return unix.Listxattr(path, data)
+}
+
+func llistxattr(path string, data []byte) (int, error) {
+ return unix.Llistxattr(path, data)
+}
+
+func flistxattr(f *os.File, data []byte) (int, error) {
+ return listxattr(f.Name(), data)
+}
+
+// stringsFromByteSlice converts a sequence of attributes to a []string.
+// On Darwin and Linux, each entry is a NULL-terminated string.
+func stringsFromByteSlice(buf []byte) (result []string) {
+ offset := 0
+ for index, b := range buf {
+ if b == 0 {
+ result = append(result, string(buf[offset:index]))
+ offset = index + 1
+ }
+ }
+ return
+}
diff --git a/vendor/github.com/pkg/xattr/xattr_linux.go b/vendor/github.com/pkg/xattr/xattr_linux.go
new file mode 100644
index 0000000000000..879085ee5d453
--- /dev/null
+++ b/vendor/github.com/pkg/xattr/xattr_linux.go
@@ -0,0 +1,142 @@
+//go:build linux
+// +build linux
+
+package xattr
+
+import (
+ "os"
+ "syscall"
+
+ "golang.org/x/sys/unix"
+)
+
+const (
+ // XATTR_SUPPORTED will be true if the current platform is supported
+ XATTR_SUPPORTED = true
+
+ XATTR_CREATE = unix.XATTR_CREATE
+ XATTR_REPLACE = unix.XATTR_REPLACE
+
+ // ENOATTR is not exported by the syscall package on Linux, because it is
+ // an alias for ENODATA. We export it here so it is available on all
+ // our supported platforms.
+ ENOATTR = syscall.ENODATA
+)
+
+// On Linux, FUSE and CIFS filesystems can return EINTR for interrupted system
+// calls. This function works around this by retrying system calls until they
+// stop returning EINTR.
+//
+// See https://github.com/golang/go/commit/6b420169d798c7ebe733487b56ea5c3fa4aab5ce.
+func ignoringEINTR(fn func() error) (err error) {
+ for {
+ err = fn()
+ if err != unix.EINTR {
+ break
+ }
+ }
+ return err
+}
+
+func getxattr(path string, name string, data []byte) (int, error) {
+ var r int
+ err := ignoringEINTR(func() (err error) {
+ r, err = unix.Getxattr(path, name, data)
+ return err
+ })
+ return r, err
+}
+
+func lgetxattr(path string, name string, data []byte) (int, error) {
+ var r int
+ err := ignoringEINTR(func() (err error) {
+ r, err = unix.Lgetxattr(path, name, data)
+ return err
+ })
+ return r, err
+}
+
+func fgetxattr(f *os.File, name string, data []byte) (int, error) {
+ var r int
+ err := ignoringEINTR(func() (err error) {
+ r, err = unix.Fgetxattr(int(f.Fd()), name, data)
+ return err
+ })
+ return r, err
+}
+
+func setxattr(path string, name string, data []byte, flags int) error {
+ return ignoringEINTR(func() (err error) {
+ return unix.Setxattr(path, name, data, flags)
+ })
+}
+
+func lsetxattr(path string, name string, data []byte, flags int) error {
+ return ignoringEINTR(func() (err error) {
+ return unix.Lsetxattr(path, name, data, flags)
+ })
+}
+
+func fsetxattr(f *os.File, name string, data []byte, flags int) error {
+ return ignoringEINTR(func() (err error) {
+ return unix.Fsetxattr(int(f.Fd()), name, data, flags)
+ })
+}
+
+func removexattr(path string, name string) error {
+ return ignoringEINTR(func() (err error) {
+ return unix.Removexattr(path, name)
+ })
+}
+
+func lremovexattr(path string, name string) error {
+ return ignoringEINTR(func() (err error) {
+ return unix.Lremovexattr(path, name)
+ })
+}
+
+func fremovexattr(f *os.File, name string) error {
+ return ignoringEINTR(func() (err error) {
+ return unix.Fremovexattr(int(f.Fd()), name)
+ })
+}
+
+func listxattr(path string, data []byte) (int, error) {
+ var r int
+ err := ignoringEINTR(func() (err error) {
+ r, err = unix.Listxattr(path, data)
+ return err
+ })
+ return r, err
+}
+
+func llistxattr(path string, data []byte) (int, error) {
+ var r int
+ err := ignoringEINTR(func() (err error) {
+ r, err = unix.Llistxattr(path, data)
+ return err
+ })
+ return r, err
+}
+
+func flistxattr(f *os.File, data []byte) (int, error) {
+ var r int
+ err := ignoringEINTR(func() (err error) {
+ r, err = unix.Flistxattr(int(f.Fd()), data)
+ return err
+ })
+ return r, err
+}
+
+// stringsFromByteSlice converts a sequence of attributes to a []string.
+// On Darwin and Linux, each entry is a NULL-terminated string.
+func stringsFromByteSlice(buf []byte) (result []string) {
+ offset := 0
+ for index, b := range buf {
+ if b == 0 {
+ result = append(result, string(buf[offset:index]))
+ offset = index + 1
+ }
+ }
+ return
+}
diff --git a/vendor/github.com/pkg/xattr/xattr_solaris.go b/vendor/github.com/pkg/xattr/xattr_solaris.go
new file mode 100644
index 0000000000000..7c98b4afbac25
--- /dev/null
+++ b/vendor/github.com/pkg/xattr/xattr_solaris.go
@@ -0,0 +1,175 @@
+//go:build solaris
+// +build solaris
+
+package xattr
+
+import (
+ "os"
+ "syscall"
+
+ "golang.org/x/sys/unix"
+)
+
+const (
+ // XATTR_SUPPORTED will be true if the current platform is supported
+ XATTR_SUPPORTED = true
+
+ XATTR_CREATE = 0x1
+ XATTR_REPLACE = 0x2
+
+ // ENOATTR is not exported by the syscall package on Linux, because it is
+ // an alias for ENODATA. We export it here so it is available on all
+ // our supported platforms.
+ ENOATTR = syscall.ENODATA
+)
+
+func getxattr(path string, name string, data []byte) (int, error) {
+ f, err := openNonblock(path)
+ if err != nil {
+ return 0, err
+ }
+ defer func() {
+ _ = f.Close()
+ }()
+ return fgetxattr(f, name, data)
+}
+
+func lgetxattr(path string, name string, data []byte) (int, error) {
+ return 0, unix.ENOTSUP
+}
+
+func fgetxattr(f *os.File, name string, data []byte) (int, error) {
+ fd, err := unix.Openat(int(f.Fd()), name, unix.O_RDONLY|unix.O_XATTR, 0)
+ if err != nil {
+ return 0, err
+ }
+ defer func() {
+ _ = unix.Close(fd)
+ }()
+ return unix.Read(fd, data)
+}
+
+func setxattr(path string, name string, data []byte, flags int) error {
+ f, err := openNonblock(path)
+ if err != nil {
+ return err
+ }
+ err = fsetxattr(f, name, data, flags)
+ if err != nil {
+ _ = f.Close()
+ return err
+ }
+ return f.Close()
+}
+
+func lsetxattr(path string, name string, data []byte, flags int) error {
+ return unix.ENOTSUP
+}
+
+func fsetxattr(f *os.File, name string, data []byte, flags int) error {
+ mode := unix.O_WRONLY | unix.O_XATTR
+ if flags&XATTR_REPLACE != 0 {
+ mode |= unix.O_TRUNC
+ } else if flags&XATTR_CREATE != 0 {
+ mode |= unix.O_CREAT | unix.O_EXCL
+ } else {
+ mode |= unix.O_CREAT | unix.O_TRUNC
+ }
+ fd, err := unix.Openat(int(f.Fd()), name, mode, 0666)
+ if err != nil {
+ return err
+ }
+ if _, err = unix.Write(fd, data); err != nil {
+ _ = unix.Close(fd)
+ return err
+ }
+ return unix.Close(fd)
+}
+
+func removexattr(path string, name string) error {
+ mode := unix.O_RDONLY | unix.O_XATTR | unix.O_NONBLOCK | unix.O_CLOEXEC
+ fd, err := unix.Open(path, mode, 0)
+ if err != nil {
+ return err
+ }
+ f := os.NewFile(uintptr(fd), path)
+ defer func() {
+ _ = f.Close()
+ }()
+ return fremovexattr(f, name)
+}
+
+func lremovexattr(path string, name string) error {
+ return unix.ENOTSUP
+}
+
+func fremovexattr(f *os.File, name string) error {
+ fd, err := unix.Openat(int(f.Fd()), ".", unix.O_XATTR, 0)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ _ = unix.Close(fd)
+ }()
+ return unix.Unlinkat(fd, name, 0)
+}
+
+func listxattr(path string, data []byte) (int, error) {
+ f, err := openNonblock(path)
+ if err != nil {
+ return 0, err
+ }
+ defer func() {
+ _ = f.Close()
+ }()
+ return flistxattr(f, data)
+}
+
+func llistxattr(path string, data []byte) (int, error) {
+ return 0, unix.ENOTSUP
+}
+
+func flistxattr(f *os.File, data []byte) (int, error) {
+ fd, err := unix.Openat(int(f.Fd()), ".", unix.O_RDONLY|unix.O_XATTR, 0)
+ if err != nil {
+ return 0, unix.ENOTSUP
+ }
+ xf := os.NewFile(uintptr(fd), f.Name())
+ defer func() {
+ _ = xf.Close()
+ }()
+ names, err := xf.Readdirnames(-1)
+ if err != nil {
+ return 0, err
+ }
+ var buf []byte
+ for _, name := range names {
+ buf = append(buf, append([]byte(name), '\000')...)
+ }
+ if data == nil {
+ return len(buf), nil
+ }
+ return copy(data, buf), nil
+}
+
+// Like os.Open, but passes O_NONBLOCK to the open(2) syscall.
+func openNonblock(path string) (*os.File, error) {
+ fd, err := unix.Open(path, unix.O_RDONLY|unix.O_CLOEXEC|unix.O_NONBLOCK, 0)
+ if err != nil {
+ return nil, err
+ }
+ return os.NewFile(uintptr(fd), path), err
+}
+
+// stringsFromByteSlice converts a sequence of attributes to a []string.
+// We simulate Linux/Darwin, where each entry is a NULL-terminated string.
+func stringsFromByteSlice(buf []byte) (result []string) {
+ offset := 0
+ for index, b := range buf {
+ if b == 0 {
+ result = append(result, string(buf[offset:index]))
+ offset = index + 1
+ }
+ }
+ return
+}
diff --git a/vendor/github.com/pkg/xattr/xattr_unsupported.go b/vendor/github.com/pkg/xattr/xattr_unsupported.go
new file mode 100644
index 0000000000000..8886fbdc4216e
--- /dev/null
+++ b/vendor/github.com/pkg/xattr/xattr_unsupported.go
@@ -0,0 +1,70 @@
+//go:build !linux && !freebsd && !netbsd && !darwin && !solaris
+// +build !linux,!freebsd,!netbsd,!darwin,!solaris
+
+package xattr
+
+import (
+ "os"
+ "syscall"
+)
+
+const (
+ // We need to use the default for non supported operating systems
+ ENOATTR = syscall.Errno(0x59)
+)
+
+// XATTR_SUPPORTED will be true if the current platform is supported
+const XATTR_SUPPORTED = false
+
+func getxattr(path string, name string, data []byte) (int, error) {
+ return 0, nil
+}
+
+func lgetxattr(path string, name string, data []byte) (int, error) {
+ return 0, nil
+}
+
+func fgetxattr(f *os.File, name string, data []byte) (int, error) {
+ return 0, nil
+}
+
+func setxattr(path string, name string, data []byte, flags int) error {
+ return nil
+}
+
+func lsetxattr(path string, name string, data []byte, flags int) error {
+ return nil
+}
+
+func fsetxattr(f *os.File, name string, data []byte, flags int) error {
+ return nil
+}
+
+func removexattr(path string, name string) error {
+ return nil
+}
+
+func lremovexattr(path string, name string) error {
+ return nil
+}
+
+func fremovexattr(f *os.File, name string) error {
+ return nil
+}
+
+func listxattr(path string, data []byte) (int, error) {
+ return 0, nil
+}
+
+func llistxattr(path string, data []byte) (int, error) {
+ return 0, nil
+}
+
+func flistxattr(f *os.File, data []byte) (int, error) {
+ return 0, nil
+}
+
+// dummy
+func stringsFromByteSlice(buf []byte) (result []string) {
+ return []string{}
+}
diff --git a/vendor/github.com/shirou/gopsutil/v4/common/env.go b/vendor/github.com/shirou/gopsutil/v4/common/env.go
index 4acad1fd1e8a4..47e471c402f7e 100644
--- a/vendor/github.com/shirou/gopsutil/v4/common/env.go
+++ b/vendor/github.com/shirou/gopsutil/v4/common/env.go
@@ -12,13 +12,14 @@ type EnvKeyType string
var EnvKey = EnvKeyType("env")
const (
- HostProcEnvKey EnvKeyType = "HOST_PROC"
- HostSysEnvKey EnvKeyType = "HOST_SYS"
- HostEtcEnvKey EnvKeyType = "HOST_ETC"
- HostVarEnvKey EnvKeyType = "HOST_VAR"
- HostRunEnvKey EnvKeyType = "HOST_RUN"
- HostDevEnvKey EnvKeyType = "HOST_DEV"
- HostRootEnvKey EnvKeyType = "HOST_ROOT"
+ HostProcEnvKey EnvKeyType = "HOST_PROC"
+ HostSysEnvKey EnvKeyType = "HOST_SYS"
+ HostEtcEnvKey EnvKeyType = "HOST_ETC"
+ HostVarEnvKey EnvKeyType = "HOST_VAR"
+ HostRunEnvKey EnvKeyType = "HOST_RUN"
+ HostDevEnvKey EnvKeyType = "HOST_DEV"
+ HostRootEnvKey EnvKeyType = "HOST_ROOT"
+ HostProcMountinfo EnvKeyType = "HOST_PROC_MOUNTINFO"
)
type EnvMap map[EnvKeyType]string
diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin.go
index 79a458b8e21cc..b3e3a668de133 100644
--- a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin.go
+++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin.go
@@ -5,12 +5,15 @@ package cpu
import (
"context"
+ "fmt"
"strconv"
"strings"
+ "unsafe"
- "github.com/shoenig/go-m1cpu"
"github.com/tklauser/go-sysconf"
"golang.org/x/sys/unix"
+
+ "github.com/shirou/gopsutil/v4/internal/common"
)
// sys/resource.h
@@ -23,6 +26,24 @@ const (
cpUStates = 5
)
+// mach/machine.h
+const (
+ cpuStateUser = 0
+ cpuStateSystem = 1
+ cpuStateIdle = 2
+ cpuStateNice = 3
+ cpuStateMax = 4
+)
+
+// mach/processor_info.h
+const (
+ processorCpuLoadInfo = 2
+)
+
+type hostCpuLoadInfoData struct {
+ cpuTicks [cpuStateMax]uint32
+}
+
// default value. from time.h
var ClocksPerSec = float64(128)
@@ -39,11 +60,17 @@ func Times(percpu bool) ([]TimesStat, error) {
}
func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) {
+ lib, err := common.NewLibrary(common.System)
+ if err != nil {
+ return nil, err
+ }
+ defer lib.Close()
+
if percpu {
- return perCPUTimes()
+ return perCPUTimes(lib)
}
- return allCPUTimes()
+ return allCPUTimes(lib)
}
// Returns only one CPUInfoStat on FreeBSD
@@ -86,15 +113,9 @@ func InfoWithContext(ctx context.Context) ([]InfoStat, error) {
c.CacheSize = int32(cacheSize)
c.VendorID, _ = unix.Sysctl("machdep.cpu.vendor")
- if m1cpu.IsAppleSilicon() {
- c.Mhz = float64(m1cpu.PCoreHz() / 1_000_000)
- } else {
- // Use the rated frequency of the CPU. This is a static value and does not
- // account for low power or Turbo Boost modes.
- cpuFrequency, err := unix.SysctlUint64("hw.cpufrequency")
- if err == nil {
- c.Mhz = float64(cpuFrequency) / 1000000.0
- }
+ v, err := getFrequency()
+ if err == nil {
+ c.Mhz = v
}
return append(ret, c), nil
@@ -115,3 +136,63 @@ func CountsWithContext(ctx context.Context, logical bool) (int, error) {
return int(count), nil
}
+
+func perCPUTimes(machLib *common.Library) ([]TimesStat, error) {
+ machHostSelf := common.GetFunc[common.MachHostSelfFunc](machLib, common.MachHostSelfSym)
+ machTaskSelf := common.GetFunc[common.MachTaskSelfFunc](machLib, common.MachTaskSelfSym)
+ hostProcessorInfo := common.GetFunc[common.HostProcessorInfoFunc](machLib, common.HostProcessorInfoSym)
+ vmDeallocate := common.GetFunc[common.VMDeallocateFunc](machLib, common.VMDeallocateSym)
+
+ var count, ncpu uint32
+ var cpuload *hostCpuLoadInfoData
+
+ status := hostProcessorInfo(machHostSelf(), processorCpuLoadInfo, &ncpu, uintptr(unsafe.Pointer(&cpuload)), &count)
+
+ if status != common.KERN_SUCCESS {
+ return nil, fmt.Errorf("host_processor_info error=%d", status)
+ }
+
+ defer vmDeallocate(machTaskSelf(), uintptr(unsafe.Pointer(cpuload)), uintptr(ncpu))
+
+ ret := []TimesStat{}
+ loads := unsafe.Slice(cpuload, ncpu)
+
+ for i := 0; i < int(ncpu); i++ {
+ c := TimesStat{
+ CPU: fmt.Sprintf("cpu%d", i),
+ User: float64(loads[i].cpuTicks[cpuStateUser]) / ClocksPerSec,
+ System: float64(loads[i].cpuTicks[cpuStateSystem]) / ClocksPerSec,
+ Nice: float64(loads[i].cpuTicks[cpuStateNice]) / ClocksPerSec,
+ Idle: float64(loads[i].cpuTicks[cpuStateIdle]) / ClocksPerSec,
+ }
+
+ ret = append(ret, c)
+ }
+
+ return ret, nil
+}
+
+func allCPUTimes(machLib *common.Library) ([]TimesStat, error) {
+ machHostSelf := common.GetFunc[common.MachHostSelfFunc](machLib, common.MachHostSelfSym)
+ hostStatistics := common.GetFunc[common.HostStatisticsFunc](machLib, common.HostStatisticsSym)
+
+ var cpuload hostCpuLoadInfoData
+ count := uint32(cpuStateMax)
+
+ status := hostStatistics(machHostSelf(), common.HOST_CPU_LOAD_INFO,
+ uintptr(unsafe.Pointer(&cpuload)), &count)
+
+ if status != common.KERN_SUCCESS {
+ return nil, fmt.Errorf("host_statistics error=%d", status)
+ }
+
+ c := TimesStat{
+ CPU: "cpu-total",
+ User: float64(cpuload.cpuTicks[cpuStateUser]) / ClocksPerSec,
+ System: float64(cpuload.cpuTicks[cpuStateSystem]) / ClocksPerSec,
+ Nice: float64(cpuload.cpuTicks[cpuStateNice]) / ClocksPerSec,
+ Idle: float64(cpuload.cpuTicks[cpuStateIdle]) / ClocksPerSec,
+ }
+
+ return []TimesStat{c}, nil
+}
diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_arm64.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_arm64.go
new file mode 100644
index 0000000000000..5031842439092
--- /dev/null
+++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_arm64.go
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: BSD-3-Clause
+//go:build darwin && arm64
+
+package cpu
+
+import (
+ "encoding/binary"
+ "fmt"
+ "unsafe"
+
+ "github.com/shirou/gopsutil/v4/internal/common"
+)
+
+// https://github.com/shoenig/go-m1cpu/blob/v0.1.6/cpu.go
+func getFrequency() (float64, error) {
+ ioKit, err := common.NewLibrary(common.IOKit)
+ if err != nil {
+ return 0, err
+ }
+ defer ioKit.Close()
+
+ coreFoundation, err := common.NewLibrary(common.CoreFoundation)
+ if err != nil {
+ return 0, err
+ }
+ defer coreFoundation.Close()
+
+ ioServiceMatching := common.GetFunc[common.IOServiceMatchingFunc](ioKit, common.IOServiceMatchingSym)
+ ioServiceGetMatchingServices := common.GetFunc[common.IOServiceGetMatchingServicesFunc](ioKit, common.IOServiceGetMatchingServicesSym)
+ ioIteratorNext := common.GetFunc[common.IOIteratorNextFunc](ioKit, common.IOIteratorNextSym)
+ ioRegistryEntryGetName := common.GetFunc[common.IORegistryEntryGetNameFunc](ioKit, common.IORegistryEntryGetNameSym)
+ ioRegistryEntryCreateCFProperty := common.GetFunc[common.IORegistryEntryCreateCFPropertyFunc](ioKit, common.IORegistryEntryCreateCFPropertySym)
+ ioObjectRelease := common.GetFunc[common.IOObjectReleaseFunc](ioKit, common.IOObjectReleaseSym)
+
+ cfStringCreateWithCString := common.GetFunc[common.CFStringCreateWithCStringFunc](coreFoundation, common.CFStringCreateWithCStringSym)
+ cfDataGetLength := common.GetFunc[common.CFDataGetLengthFunc](coreFoundation, common.CFDataGetLengthSym)
+ cfDataGetBytePtr := common.GetFunc[common.CFDataGetBytePtrFunc](coreFoundation, common.CFDataGetBytePtrSym)
+ cfRelease := common.GetFunc[common.CFReleaseFunc](coreFoundation, common.CFReleaseSym)
+
+ matching := ioServiceMatching("AppleARMIODevice")
+
+ var iterator uint32
+ if status := ioServiceGetMatchingServices(common.KIOMainPortDefault, uintptr(matching), &iterator); status != common.KERN_SUCCESS {
+ return 0.0, fmt.Errorf("IOServiceGetMatchingServices error=%d", status)
+ }
+ defer ioObjectRelease(iterator)
+
+ pCorekey := cfStringCreateWithCString(common.KCFAllocatorDefault, "voltage-states5-sram", common.KCFStringEncodingUTF8)
+ defer cfRelease(uintptr(pCorekey))
+
+ var pCoreHz uint32
+ for {
+ service := ioIteratorNext(iterator)
+ if !(service > 0) {
+ break
+ }
+
+ buf := make([]byte, 512)
+ ioRegistryEntryGetName(service, &buf[0])
+
+ if common.GoString(&buf[0]) == "pmgr" {
+ pCoreRef := ioRegistryEntryCreateCFProperty(service, uintptr(pCorekey), common.KCFAllocatorDefault, common.KNilOptions)
+ length := cfDataGetLength(uintptr(pCoreRef))
+ data := cfDataGetBytePtr(uintptr(pCoreRef))
+
+ // composite uint32 from the byte array
+ buf := unsafe.Slice((*byte)(data), length)
+
+ // combine the bytes into a uint32 value
+ b := buf[length-8 : length-4]
+ pCoreHz = binary.LittleEndian.Uint32(b)
+ ioObjectRelease(service)
+ break
+ }
+
+ ioObjectRelease(service)
+ }
+
+ return float64(pCoreHz / 1_000_000), nil
+}
diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_cgo.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_cgo.go
deleted file mode 100644
index 3a02024c5be46..0000000000000
--- a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_cgo.go
+++ /dev/null
@@ -1,111 +0,0 @@
-// SPDX-License-Identifier: BSD-3-Clause
-//go:build darwin && cgo
-
-package cpu
-
-/*
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#if TARGET_OS_MAC
-#include
-#endif
-#include
-#include
-*/
-import "C"
-
-import (
- "bytes"
- "encoding/binary"
- "fmt"
- "unsafe"
-)
-
-// these CPU times for darwin is borrowed from influxdb/telegraf.
-
-func perCPUTimes() ([]TimesStat, error) {
- var (
- count C.mach_msg_type_number_t
- cpuload *C.processor_cpu_load_info_data_t
- ncpu C.natural_t
- )
-
- status := C.host_processor_info(C.host_t(C.mach_host_self()),
- C.PROCESSOR_CPU_LOAD_INFO,
- &ncpu,
- (*C.processor_info_array_t)(unsafe.Pointer(&cpuload)),
- &count)
-
- if status != C.KERN_SUCCESS {
- return nil, fmt.Errorf("host_processor_info error=%d", status)
- }
-
- // jump through some cgo casting hoops and ensure we properly free
- // the memory that cpuload points to
- target := C.vm_map_t(C.mach_task_self_)
- address := C.vm_address_t(uintptr(unsafe.Pointer(cpuload)))
- defer C.vm_deallocate(target, address, C.vm_size_t(ncpu))
-
- // the body of struct processor_cpu_load_info
- // aka processor_cpu_load_info_data_t
- var cpu_ticks [C.CPU_STATE_MAX]uint32
-
- // copy the cpuload array to a []byte buffer
- // where we can binary.Read the data
- size := int(ncpu) * binary.Size(cpu_ticks)
- buf := (*[1 << 30]byte)(unsafe.Pointer(cpuload))[:size:size]
-
- bbuf := bytes.NewBuffer(buf)
-
- var ret []TimesStat
-
- for i := 0; i < int(ncpu); i++ {
- err := binary.Read(bbuf, binary.LittleEndian, &cpu_ticks)
- if err != nil {
- return nil, err
- }
-
- c := TimesStat{
- CPU: fmt.Sprintf("cpu%d", i),
- User: float64(cpu_ticks[C.CPU_STATE_USER]) / ClocksPerSec,
- System: float64(cpu_ticks[C.CPU_STATE_SYSTEM]) / ClocksPerSec,
- Nice: float64(cpu_ticks[C.CPU_STATE_NICE]) / ClocksPerSec,
- Idle: float64(cpu_ticks[C.CPU_STATE_IDLE]) / ClocksPerSec,
- }
-
- ret = append(ret, c)
- }
-
- return ret, nil
-}
-
-func allCPUTimes() ([]TimesStat, error) {
- var count C.mach_msg_type_number_t
- var cpuload C.host_cpu_load_info_data_t
-
- count = C.HOST_CPU_LOAD_INFO_COUNT
-
- status := C.host_statistics(C.host_t(C.mach_host_self()),
- C.HOST_CPU_LOAD_INFO,
- C.host_info_t(unsafe.Pointer(&cpuload)),
- &count)
-
- if status != C.KERN_SUCCESS {
- return nil, fmt.Errorf("host_statistics error=%d", status)
- }
-
- c := TimesStat{
- CPU: "cpu-total",
- User: float64(cpuload.cpu_ticks[C.CPU_STATE_USER]) / ClocksPerSec,
- System: float64(cpuload.cpu_ticks[C.CPU_STATE_SYSTEM]) / ClocksPerSec,
- Nice: float64(cpuload.cpu_ticks[C.CPU_STATE_NICE]) / ClocksPerSec,
- Idle: float64(cpuload.cpu_ticks[C.CPU_STATE_IDLE]) / ClocksPerSec,
- }
-
- return []TimesStat{c}, nil
-}
diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_fallback.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_fallback.go
new file mode 100644
index 0000000000000..b9e52aba1762c
--- /dev/null
+++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_fallback.go
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: BSD-3-Clause
+//go:build darwin && !arm64
+
+package cpu
+
+import "golang.org/x/sys/unix"
+
+func getFrequency() (float64, error) {
+ // Use the rated frequency of the CPU. This is a static value and does not
+ // account for low power or Turbo Boost modes.
+ cpuFrequency, err := unix.SysctlUint64("hw.cpufrequency")
+ return float64(cpuFrequency) / 1000000.0, err
+}
diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_nocgo.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_nocgo.go
deleted file mode 100644
index 1af8566a67bef..0000000000000
--- a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_nocgo.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// SPDX-License-Identifier: BSD-3-Clause
-//go:build darwin && !cgo
-
-package cpu
-
-import "github.com/shirou/gopsutil/v4/internal/common"
-
-func perCPUTimes() ([]TimesStat, error) {
- return []TimesStat{}, common.ErrNotImplementedError
-}
-
-func allCPUTimes() ([]TimesStat, error) {
- return []TimesStat{}, common.ErrNotImplementedError
-}
diff --git a/vendor/github.com/shirou/gopsutil/v4/internal/common/common_darwin.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_darwin.go
index 53f9ae8d9a5a4..b473f88666eba 100644
--- a/vendor/github.com/shirou/gopsutil/v4/internal/common/common_darwin.go
+++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_darwin.go
@@ -5,11 +5,13 @@ package common
import (
"context"
+ "fmt"
"os"
"os/exec"
"strings"
"unsafe"
+ "github.com/ebitengine/purego"
"golang.org/x/sys/unix"
)
@@ -64,3 +66,299 @@ func CallSyscall(mib []int32) ([]byte, uint64, error) {
return buf, length, nil
}
+
+// Library represents a dynamic library loaded by purego.
+type Library struct {
+ addr uintptr
+ path string
+ close func()
+}
+
+// library paths
+const (
+ IOKit = "/System/Library/Frameworks/IOKit.framework/IOKit"
+ CoreFoundation = "/System/Library/Frameworks/CoreFoundation.framework/CoreFoundation"
+ System = "/usr/lib/libSystem.B.dylib"
+)
+
+func NewLibrary(path string) (*Library, error) {
+ lib, err := purego.Dlopen(path, purego.RTLD_LAZY|purego.RTLD_GLOBAL)
+ if err != nil {
+ return nil, err
+ }
+
+ closeFunc := func() {
+ purego.Dlclose(lib)
+ }
+
+ return &Library{
+ addr: lib,
+ path: path,
+ close: closeFunc,
+ }, nil
+}
+
+func (lib *Library) Dlsym(symbol string) (uintptr, error) {
+ return purego.Dlsym(lib.addr, symbol)
+}
+
+func GetFunc[T any](lib *Library, symbol string) T {
+ var fptr T
+ purego.RegisterLibFunc(&fptr, lib.addr, symbol)
+ return fptr
+}
+
+func (lib *Library) Close() {
+ lib.close()
+}
+
+// status codes
+const (
+ KERN_SUCCESS = 0
+)
+
+// IOKit functions and symbols.
+type (
+ IOServiceGetMatchingServiceFunc func(mainPort uint32, matching uintptr) uint32
+ IOServiceGetMatchingServicesFunc func(mainPort uint32, matching uintptr, existing *uint32) int
+ IOServiceMatchingFunc func(name string) unsafe.Pointer
+ IOServiceOpenFunc func(service, owningTask, connType uint32, connect *uint32) int
+ IOServiceCloseFunc func(connect uint32) int
+ IOIteratorNextFunc func(iterator uint32) uint32
+ IORegistryEntryGetNameFunc func(entry uint32, name *byte) int
+ IORegistryEntryGetParentEntryFunc func(entry uint32, plane string, parent *uint32) int
+ IORegistryEntryCreateCFPropertyFunc func(entry uint32, key, allocator uintptr, options uint32) unsafe.Pointer
+ IORegistryEntryCreateCFPropertiesFunc func(entry uint32, properties unsafe.Pointer, allocator uintptr, options uint32) int
+ IOObjectConformsToFunc func(object uint32, className string) bool
+ IOObjectReleaseFunc func(object uint32) int
+ IOConnectCallStructMethodFunc func(connection, selector uint32, inputStruct, inputStructCnt, outputStruct uintptr, outputStructCnt *uintptr) int
+
+ IOHIDEventSystemClientCreateFunc func(allocator uintptr) unsafe.Pointer
+ IOHIDEventSystemClientSetMatchingFunc func(client, match uintptr) int
+ IOHIDServiceClientCopyEventFunc func(service uintptr, eventType int64,
+ options int32, timeout int64) unsafe.Pointer
+ IOHIDServiceClientCopyPropertyFunc func(service, property uintptr) unsafe.Pointer
+ IOHIDEventGetFloatValueFunc func(event uintptr, field int32) float64
+ IOHIDEventSystemClientCopyServicesFunc func(client uintptr) unsafe.Pointer
+)
+
+const (
+ IOServiceGetMatchingServiceSym = "IOServiceGetMatchingService"
+ IOServiceGetMatchingServicesSym = "IOServiceGetMatchingServices"
+ IOServiceMatchingSym = "IOServiceMatching"
+ IOServiceOpenSym = "IOServiceOpen"
+ IOServiceCloseSym = "IOServiceClose"
+ IOIteratorNextSym = "IOIteratorNext"
+ IORegistryEntryGetNameSym = "IORegistryEntryGetName"
+ IORegistryEntryGetParentEntrySym = "IORegistryEntryGetParentEntry"
+ IORegistryEntryCreateCFPropertySym = "IORegistryEntryCreateCFProperty"
+ IORegistryEntryCreateCFPropertiesSym = "IORegistryEntryCreateCFProperties"
+ IOObjectConformsToSym = "IOObjectConformsTo"
+ IOObjectReleaseSym = "IOObjectRelease"
+ IOConnectCallStructMethodSym = "IOConnectCallStructMethod"
+
+ IOHIDEventSystemClientCreateSym = "IOHIDEventSystemClientCreate"
+ IOHIDEventSystemClientSetMatchingSym = "IOHIDEventSystemClientSetMatching"
+ IOHIDServiceClientCopyEventSym = "IOHIDServiceClientCopyEvent"
+ IOHIDServiceClientCopyPropertySym = "IOHIDServiceClientCopyProperty"
+ IOHIDEventGetFloatValueSym = "IOHIDEventGetFloatValue"
+ IOHIDEventSystemClientCopyServicesSym = "IOHIDEventSystemClientCopyServices"
+)
+
+const (
+ KIOMainPortDefault = 0
+
+ KIOHIDEventTypeTemperature = 15
+
+ KNilOptions = 0
+)
+
+const (
+ KIOMediaWholeKey = "Media"
+ KIOServicePlane = "IOService"
+)
+
+// CoreFoundation functions and symbols.
+type (
+ CFGetTypeIDFunc func(cf uintptr) int32
+ CFNumberCreateFunc func(allocator uintptr, theType int32, valuePtr uintptr) unsafe.Pointer
+ CFNumberGetValueFunc func(num uintptr, theType int32, valuePtr uintptr) bool
+ CFDictionaryCreateFunc func(allocator uintptr, keys, values *unsafe.Pointer, numValues int32,
+ keyCallBacks, valueCallBacks uintptr) unsafe.Pointer
+ CFDictionaryAddValueFunc func(theDict, key, value uintptr)
+ CFDictionaryGetValueFunc func(theDict, key uintptr) unsafe.Pointer
+ CFArrayGetCountFunc func(theArray uintptr) int32
+ CFArrayGetValueAtIndexFunc func(theArray uintptr, index int32) unsafe.Pointer
+ CFStringCreateMutableFunc func(alloc uintptr, maxLength int32) unsafe.Pointer
+ CFStringGetLengthFunc func(theString uintptr) int32
+ CFStringGetCStringFunc func(theString uintptr, buffer *byte, bufferSize int32, encoding uint32)
+ CFStringCreateWithCStringFunc func(alloc uintptr, cStr string, encoding uint32) unsafe.Pointer
+ CFDataGetLengthFunc func(theData uintptr) int32
+ CFDataGetBytePtrFunc func(theData uintptr) unsafe.Pointer
+ CFReleaseFunc func(cf uintptr)
+)
+
+const (
+ CFGetTypeIDSym = "CFGetTypeID"
+ CFNumberCreateSym = "CFNumberCreate"
+ CFNumberGetValueSym = "CFNumberGetValue"
+ CFDictionaryCreateSym = "CFDictionaryCreate"
+ CFDictionaryAddValueSym = "CFDictionaryAddValue"
+ CFDictionaryGetValueSym = "CFDictionaryGetValue"
+ CFArrayGetCountSym = "CFArrayGetCount"
+ CFArrayGetValueAtIndexSym = "CFArrayGetValueAtIndex"
+ CFStringCreateMutableSym = "CFStringCreateMutable"
+ CFStringGetLengthSym = "CFStringGetLength"
+ CFStringGetCStringSym = "CFStringGetCString"
+ CFStringCreateWithCStringSym = "CFStringCreateWithCString"
+ CFDataGetLengthSym = "CFDataGetLength"
+ CFDataGetBytePtrSym = "CFDataGetBytePtr"
+ CFReleaseSym = "CFRelease"
+)
+
+const (
+ KCFStringEncodingUTF8 = 0x08000100
+ KCFNumberSInt64Type = 4
+ KCFNumberIntType = 9
+ KCFAllocatorDefault = 0
+)
+
+// Kernel functions and symbols.
+type MachTimeBaseInfo struct {
+ Numer uint32
+ Denom uint32
+}
+
+type (
+ HostProcessorInfoFunc func(host uint32, flavor int32, outProcessorCount *uint32, outProcessorInfo uintptr,
+ outProcessorInfoCnt *uint32) int
+ HostStatisticsFunc func(host uint32, flavor int32, hostInfoOut uintptr, hostInfoOutCnt *uint32) int
+ MachHostSelfFunc func() uint32
+ MachTaskSelfFunc func() uint32
+ MachTimeBaseInfoFunc func(info uintptr) int
+ VMDeallocateFunc func(targetTask uint32, vmAddress, vmSize uintptr) int
+)
+
+const (
+ HostProcessorInfoSym = "host_processor_info"
+ HostStatisticsSym = "host_statistics"
+ MachHostSelfSym = "mach_host_self"
+ MachTaskSelfSym = "mach_task_self"
+ MachTimeBaseInfoSym = "mach_timebase_info"
+ VMDeallocateSym = "vm_deallocate"
+)
+
+const (
+ CTL_KERN = 1
+ KERN_ARGMAX = 8
+ KERN_PROCARGS2 = 49
+
+ HOST_VM_INFO = 2
+ HOST_CPU_LOAD_INFO = 3
+
+ HOST_VM_INFO_COUNT = 0xf
+)
+
+// System functions and symbols.
+type (
+ ProcPidPathFunc func(pid int32, buffer uintptr, bufferSize uint32) int32
+ ProcPidInfoFunc func(pid, flavor int32, arg uint64, buffer uintptr, bufferSize int32) int32
+)
+
+const (
+ SysctlSym = "sysctl"
+ ProcPidPathSym = "proc_pidpath"
+ ProcPidInfoSym = "proc_pidinfo"
+)
+
+const (
+ MAXPATHLEN = 1024
+ PROC_PIDPATHINFO_MAXSIZE = 4 * MAXPATHLEN
+ PROC_PIDTASKINFO = 4
+ PROC_PIDVNODEPATHINFO = 9
+)
+
+// SMC represents a SMC instance.
+type SMC struct {
+ lib *Library
+ conn uint32
+ callStruct IOConnectCallStructMethodFunc
+}
+
+const ioServiceSMC = "AppleSMC"
+
+const (
+ KSMCUserClientOpen = 0
+ KSMCUserClientClose = 1
+ KSMCHandleYPCEvent = 2
+ KSMCReadKey = 5
+ KSMCWriteKey = 6
+ KSMCGetKeyCount = 7
+ KSMCGetKeyFromIndex = 8
+ KSMCGetKeyInfo = 9
+)
+
+const (
+ KSMCSuccess = 0
+ KSMCError = 1
+ KSMCKeyNotFound = 132
+)
+
+func NewSMC(ioKit *Library) (*SMC, error) {
+ if ioKit.path != IOKit {
+ return nil, fmt.Errorf("library is not IOKit")
+ }
+
+ ioServiceGetMatchingService := GetFunc[IOServiceGetMatchingServiceFunc](ioKit, IOServiceGetMatchingServiceSym)
+ ioServiceMatching := GetFunc[IOServiceMatchingFunc](ioKit, IOServiceMatchingSym)
+ ioServiceOpen := GetFunc[IOServiceOpenFunc](ioKit, IOServiceOpenSym)
+ ioObjectRelease := GetFunc[IOObjectReleaseFunc](ioKit, IOObjectReleaseSym)
+ machTaskSelf := GetFunc[MachTaskSelfFunc](ioKit, MachTaskSelfSym)
+
+ ioConnectCallStructMethod := GetFunc[IOConnectCallStructMethodFunc](ioKit, IOConnectCallStructMethodSym)
+
+ service := ioServiceGetMatchingService(0, uintptr(ioServiceMatching(ioServiceSMC)))
+ if service == 0 {
+ return nil, fmt.Errorf("ERROR: %s NOT FOUND", ioServiceSMC)
+ }
+
+ var conn uint32
+ if result := ioServiceOpen(service, machTaskSelf(), 0, &conn); result != 0 {
+ return nil, fmt.Errorf("ERROR: IOServiceOpen failed")
+ }
+
+ ioObjectRelease(service)
+ return &SMC{
+ lib: ioKit,
+ conn: conn,
+ callStruct: ioConnectCallStructMethod,
+ }, nil
+}
+
+func (s *SMC) CallStruct(selector uint32, inputStruct, inputStructCnt, outputStruct uintptr, outputStructCnt *uintptr) int {
+ return s.callStruct(s.conn, selector, inputStruct, inputStructCnt, outputStruct, outputStructCnt)
+}
+
+func (s *SMC) Close() error {
+ ioServiceClose := GetFunc[IOServiceCloseFunc](s.lib, IOServiceCloseSym)
+
+ if result := ioServiceClose(s.conn); result != 0 {
+ return fmt.Errorf("ERROR: IOServiceClose failed")
+ }
+ return nil
+}
+
+// https://github.com/ebitengine/purego/blob/main/internal/strings/strings.go#L26
+func GoString(cStr *byte) string {
+ if cStr == nil {
+ return ""
+ }
+ var length int
+ for {
+ if *(*byte)(unsafe.Add(unsafe.Pointer(cStr), uintptr(length))) == '\x00' {
+ break
+ }
+ length++
+ }
+ return string(unsafe.Slice(cStr, length))
+}
diff --git a/vendor/github.com/shirou/gopsutil/v4/internal/common/common_unix.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_unix.go
index 2715b890bef70..c9f91b1698ae1 100644
--- a/vendor/github.com/shirou/gopsutil/v4/internal/common/common_unix.go
+++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_unix.go
@@ -40,23 +40,3 @@ func CallLsofWithContext(ctx context.Context, invoke Invoker, pid int32, args ..
}
return ret, nil
}
-
-func CallPgrepWithContext(ctx context.Context, invoke Invoker, pid int32) ([]int32, error) {
- out, err := invoke.CommandWithContext(ctx, "pgrep", "-P", strconv.Itoa(int(pid)))
- if err != nil {
- return []int32{}, err
- }
- lines := strings.Split(string(out), "\n")
- ret := make([]int32, 0, len(lines))
- for _, l := range lines {
- if len(l) == 0 {
- continue
- }
- i, err := strconv.ParseInt(l, 10, 32)
- if err != nil {
- continue
- }
- ret = append(ret, int32(i))
- }
- return ret, nil
-}
diff --git a/vendor/github.com/shirou/gopsutil/v4/mem/mem_darwin.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_darwin.go
index a33c5f125a2b0..a4c15f6915d97 100644
--- a/vendor/github.com/shirou/gopsutil/v4/mem/mem_darwin.go
+++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_darwin.go
@@ -70,3 +70,61 @@ func SwapDevices() ([]*SwapDevice, error) {
func SwapDevicesWithContext(ctx context.Context) ([]*SwapDevice, error) {
return nil, common.ErrNotImplementedError
}
+
+type vmStatisticsData struct {
+ freeCount uint32
+ activeCount uint32
+ inactiveCount uint32
+ wireCount uint32
+ _ [44]byte // Not used here
+}
+
+// VirtualMemory returns VirtualmemoryStat.
+func VirtualMemory() (*VirtualMemoryStat, error) {
+ return VirtualMemoryWithContext(context.Background())
+}
+
+func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) {
+ machLib, err := common.NewLibrary(common.System)
+ if err != nil {
+ return nil, err
+ }
+ defer machLib.Close()
+
+ hostStatistics := common.GetFunc[common.HostStatisticsFunc](machLib, common.HostStatisticsSym)
+ machHostSelf := common.GetFunc[common.MachHostSelfFunc](machLib, common.MachHostSelfSym)
+
+ count := uint32(common.HOST_VM_INFO_COUNT)
+ var vmstat vmStatisticsData
+
+ status := hostStatistics(machHostSelf(), common.HOST_VM_INFO,
+ uintptr(unsafe.Pointer(&vmstat)), &count)
+
+ if status != common.KERN_SUCCESS {
+ return nil, fmt.Errorf("host_statistics error=%d", status)
+ }
+
+ pageSizeAddr, _ := machLib.Dlsym("vm_kernel_page_size")
+ pageSize := **(**uint64)(unsafe.Pointer(&pageSizeAddr))
+ total, err := getHwMemsize()
+ if err != nil {
+ return nil, err
+ }
+ totalCount := uint32(total / pageSize)
+
+ availableCount := vmstat.inactiveCount + vmstat.freeCount
+ usedPercent := 100 * float64(totalCount-availableCount) / float64(totalCount)
+
+ usedCount := totalCount - availableCount
+
+ return &VirtualMemoryStat{
+ Total: total,
+ Available: pageSize * uint64(availableCount),
+ Used: pageSize * uint64(usedCount),
+ UsedPercent: usedPercent,
+ Free: pageSize * uint64(vmstat.freeCount),
+ Active: pageSize * uint64(vmstat.activeCount),
+ Inactive: pageSize * uint64(vmstat.inactiveCount),
+ Wired: pageSize * uint64(vmstat.wireCount),
+ }, nil
+}
diff --git a/vendor/github.com/shirou/gopsutil/v4/mem/mem_darwin_cgo.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_darwin_cgo.go
deleted file mode 100644
index cc6657d045c13..0000000000000
--- a/vendor/github.com/shirou/gopsutil/v4/mem/mem_darwin_cgo.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// SPDX-License-Identifier: BSD-3-Clause
-//go:build darwin && cgo
-
-package mem
-
-/*
-#include
-#include
-*/
-import "C"
-
-import (
- "context"
- "fmt"
- "unsafe"
-)
-
-// VirtualMemory returns VirtualmemoryStat.
-func VirtualMemory() (*VirtualMemoryStat, error) {
- return VirtualMemoryWithContext(context.Background())
-}
-
-func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) {
- count := C.mach_msg_type_number_t(C.HOST_VM_INFO_COUNT)
- var vmstat C.vm_statistics_data_t
-
- status := C.host_statistics(C.host_t(C.mach_host_self()),
- C.HOST_VM_INFO,
- C.host_info_t(unsafe.Pointer(&vmstat)),
- &count)
-
- if status != C.KERN_SUCCESS {
- return nil, fmt.Errorf("host_statistics error=%d", status)
- }
-
- pageSize := uint64(C.vm_kernel_page_size)
- total, err := getHwMemsize()
- if err != nil {
- return nil, err
- }
- totalCount := C.natural_t(total / pageSize)
-
- availableCount := vmstat.inactive_count + vmstat.free_count
- usedPercent := 100 * float64(totalCount-availableCount) / float64(totalCount)
-
- usedCount := totalCount - availableCount
-
- return &VirtualMemoryStat{
- Total: total,
- Available: pageSize * uint64(availableCount),
- Used: pageSize * uint64(usedCount),
- UsedPercent: usedPercent,
- Free: pageSize * uint64(vmstat.free_count),
- Active: pageSize * uint64(vmstat.active_count),
- Inactive: pageSize * uint64(vmstat.inactive_count),
- Wired: pageSize * uint64(vmstat.wire_count),
- }, nil
-}
diff --git a/vendor/github.com/shirou/gopsutil/v4/mem/mem_darwin_nocgo.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_darwin_nocgo.go
deleted file mode 100644
index 097a93e63e4cc..0000000000000
--- a/vendor/github.com/shirou/gopsutil/v4/mem/mem_darwin_nocgo.go
+++ /dev/null
@@ -1,89 +0,0 @@
-// SPDX-License-Identifier: BSD-3-Clause
-//go:build darwin && !cgo
-
-package mem
-
-import (
- "context"
- "strconv"
- "strings"
-
- "golang.org/x/sys/unix"
-)
-
-// Runs vm_stat and returns Free and inactive pages
-func getVMStat(vms *VirtualMemoryStat) error {
- out, err := invoke.Command("vm_stat")
- if err != nil {
- return err
- }
- return parseVMStat(string(out), vms)
-}
-
-func parseVMStat(out string, vms *VirtualMemoryStat) error {
- var err error
-
- lines := strings.Split(out, "\n")
- pagesize := uint64(unix.Getpagesize())
- for _, line := range lines {
- fields := strings.Split(line, ":")
- if len(fields) < 2 {
- continue
- }
- key := strings.TrimSpace(fields[0])
- value := strings.Trim(fields[1], " .")
- switch key {
- case "Pages free":
- free, e := strconv.ParseUint(value, 10, 64)
- if e != nil {
- err = e
- }
- vms.Free = free * pagesize
- case "Pages inactive":
- inactive, e := strconv.ParseUint(value, 10, 64)
- if e != nil {
- err = e
- }
- vms.Inactive = inactive * pagesize
- case "Pages active":
- active, e := strconv.ParseUint(value, 10, 64)
- if e != nil {
- err = e
- }
- vms.Active = active * pagesize
- case "Pages wired down":
- wired, e := strconv.ParseUint(value, 10, 64)
- if e != nil {
- err = e
- }
- vms.Wired = wired * pagesize
- }
- }
- return err
-}
-
-// VirtualMemory returns VirtualmemoryStat.
-func VirtualMemory() (*VirtualMemoryStat, error) {
- return VirtualMemoryWithContext(context.Background())
-}
-
-func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) {
- ret := &VirtualMemoryStat{}
-
- total, err := getHwMemsize()
- if err != nil {
- return nil, err
- }
- err = getVMStat(ret)
- if err != nil {
- return nil, err
- }
-
- ret.Available = ret.Free + ret.Inactive
- ret.Total = total
-
- ret.Used = ret.Total - ret.Available
- ret.UsedPercent = 100 * float64(ret.Used) / float64(ret.Total)
-
- return ret, nil
-}
diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process.go b/vendor/github.com/shirou/gopsutil/v4/process/process.go
index d73f1f972fa99..70411c61644db 100644
--- a/vendor/github.com/shirou/gopsutil/v4/process/process.go
+++ b/vendor/github.com/shirou/gopsutil/v4/process/process.go
@@ -18,7 +18,7 @@ import (
var (
invoke common.Invoker = common.Invoke{}
- ErrorNoChildren = errors.New("process does not have children")
+ ErrorNoChildren = errors.New("process does not have children") // Deprecated: ErrorNoChildren is never returned by process.Children(), check its returned []*Process slice length instead
ErrorProcessNotRunning = errors.New("process does not exist")
ErrorNotPermitted = errors.New("operation not permitted")
)
diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_darwin.go b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin.go
index 66b3684eae4af..05c7562b767c1 100644
--- a/vendor/github.com/shirou/gopsutil/v4/process/process_darwin.go
+++ b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin.go
@@ -4,15 +4,20 @@
package process
import (
+ "bytes"
"context"
+ "encoding/binary"
"fmt"
"path/filepath"
+ "runtime"
+ "sort"
"strconv"
"strings"
+ "unsafe"
- "github.com/tklauser/go-sysconf"
"golang.org/x/sys/unix"
+ "github.com/shirou/gopsutil/v4/cpu"
"github.com/shirou/gopsutil/v4/internal/common"
"github.com/shirou/gopsutil/v4/net"
)
@@ -27,16 +32,6 @@ const (
KernProcPathname = 12 // path to executable
)
-var clockTicks = 100 // default value
-
-func init() {
- clkTck, err := sysconf.Sysconf(sysconf.SC_CLK_TCK)
- // ignore errors
- if err == nil {
- clockTicks = int(clkTck)
- }
-}
-
type _Ctype_struct___0 struct {
Pad uint64
}
@@ -186,65 +181,22 @@ func (p *Process) IOCountersWithContext(ctx context.Context) (*IOCountersStat, e
return nil, common.ErrNotImplementedError
}
-func convertCPUTimes(s string) (ret float64, err error) {
- var t int
- var _tmp string
- if strings.Contains(s, ":") {
- _t := strings.Split(s, ":")
- switch len(_t) {
- case 3:
- hour, err := strconv.ParseInt(_t[0], 10, 32)
- if err != nil {
- return ret, err
- }
- t += int(hour) * 60 * 60 * clockTicks
-
- mins, err := strconv.ParseInt(_t[1], 10, 32)
- if err != nil {
- return ret, err
- }
- t += int(mins) * 60 * clockTicks
- _tmp = _t[2]
- case 2:
- mins, err := strconv.ParseInt(_t[0], 10, 32)
- if err != nil {
- return ret, err
- }
- t += int(mins) * 60 * clockTicks
- _tmp = _t[1]
- case 1, 0:
- _tmp = s
- default:
- return ret, fmt.Errorf("wrong cpu time string")
- }
- } else {
- _tmp = s
- }
-
- _t := strings.Split(_tmp, ".")
- if err != nil {
- return ret, err
- }
- h, err := strconv.ParseInt(_t[0], 10, 32)
- t += int(h) * clockTicks
- h, err = strconv.ParseInt(_t[1], 10, 32)
- t += int(h)
- return float64(t) / float64(clockTicks), nil
-}
-
func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) {
- pids, err := common.CallPgrepWithContext(ctx, invoke, p.Pid)
+ procs, err := ProcessesWithContext(ctx)
if err != nil {
- return nil, err
+ return nil, nil
}
- ret := make([]*Process, 0, len(pids))
- for _, pid := range pids {
- np, err := NewProcessWithContext(ctx, pid)
+ ret := make([]*Process, 0, len(procs))
+ for _, proc := range procs {
+ ppid, err := proc.PpidWithContext(ctx)
if err != nil {
- return nil, err
+ continue
+ }
+ if ppid == p.Pid {
+ ret = append(ret, proc)
}
- ret = append(ret, np)
}
+ sort.Slice(ret, func(i, j int) bool { return ret[i].Pid < ret[j].Pid })
return ret, nil
}
@@ -323,3 +275,206 @@ func callPsWithContext(ctx context.Context, arg string, pid int32, threadOption
return ret, nil
}
+
+var (
+ procPidPath common.ProcPidPathFunc
+ procPidInfo common.ProcPidInfoFunc
+ machTimeBaseInfo common.MachTimeBaseInfoFunc
+)
+
+func registerFuncs() (*common.Library, error) {
+ lib, err := common.NewLibrary(common.System)
+ if err != nil {
+ return nil, err
+ }
+
+ procPidPath = common.GetFunc[common.ProcPidPathFunc](lib, common.ProcPidPathSym)
+ procPidInfo = common.GetFunc[common.ProcPidInfoFunc](lib, common.ProcPidInfoSym)
+ machTimeBaseInfo = common.GetFunc[common.MachTimeBaseInfoFunc](lib, common.MachTimeBaseInfoSym)
+
+ return lib, nil
+}
+
+func getTimeScaleToNanoSeconds() float64 {
+ var timeBaseInfo common.MachTimeBaseInfo
+
+ machTimeBaseInfo(uintptr(unsafe.Pointer(&timeBaseInfo)))
+
+ return float64(timeBaseInfo.Numer) / float64(timeBaseInfo.Denom)
+}
+
+func (p *Process) ExeWithContext(ctx context.Context) (string, error) {
+ lib, err := registerFuncs()
+ if err != nil {
+ return "", err
+ }
+ defer lib.Close()
+
+ buf := make([]byte, common.PROC_PIDPATHINFO_MAXSIZE)
+ ret := procPidPath(p.Pid, uintptr(unsafe.Pointer(&buf[0])), common.PROC_PIDPATHINFO_MAXSIZE)
+
+ if ret <= 0 {
+ return "", fmt.Errorf("unknown error: proc_pidpath returned %d", ret)
+ }
+
+ return common.GoString(&buf[0]), nil
+}
+
+// sys/proc_info.h
+type vnodePathInfo struct {
+ _ [152]byte
+ vipPath [common.MAXPATHLEN]byte
+ _ [1176]byte
+}
+
+// CwdWithContext retrieves the Current Working Directory for the given process.
+// It uses the proc_pidinfo from libproc and will only work for processes the
+// EUID can access. Otherwise "operation not permitted" will be returned as the
+// error.
+// Note: This might also work for other *BSD OSs.
+func (p *Process) CwdWithContext(ctx context.Context) (string, error) {
+ lib, err := registerFuncs()
+ if err != nil {
+ return "", err
+ }
+ defer lib.Close()
+
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+
+ var vpi vnodePathInfo
+ const vpiSize = int32(unsafe.Sizeof(vpi))
+ ret := procPidInfo(p.Pid, common.PROC_PIDVNODEPATHINFO, 0, uintptr(unsafe.Pointer(&vpi)), vpiSize)
+ errno, _ := lib.Dlsym("errno")
+ err = *(**unix.Errno)(unsafe.Pointer(&errno))
+ if err == unix.EPERM {
+ return "", ErrorNotPermitted
+ }
+
+ if ret <= 0 {
+ return "", fmt.Errorf("unknown error: proc_pidinfo returned %d", ret)
+ }
+
+ if ret != vpiSize {
+ return "", fmt.Errorf("too few bytes; expected %d, got %d", vpiSize, ret)
+ }
+ return common.GoString(&vpi.vipPath[0]), nil
+}
+
+func procArgs(pid int32) ([]byte, int, error) {
+ procargs, _, err := common.CallSyscall([]int32{common.CTL_KERN, common.KERN_PROCARGS2, pid})
+ if err != nil {
+ return nil, 0, err
+ }
+ nargs := procargs[:4]
+ return procargs, int(binary.LittleEndian.Uint32(nargs)), nil
+}
+
+func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) {
+ return p.cmdlineSliceWithContext(ctx, true)
+}
+
+func (p *Process) cmdlineSliceWithContext(ctx context.Context, fallback bool) ([]string, error) {
+ pargs, nargs, err := procArgs(p.Pid)
+ if err != nil {
+ return nil, err
+ }
+ // The first bytes hold the nargs int, skip it.
+ args := bytes.Split((pargs)[unsafe.Sizeof(int(0)):], []byte{0})
+ var argStr string
+ // The first element is the actual binary/command path.
+ // command := args[0]
+ var argSlice []string
+ // var envSlice []string
+ // All other, non-zero elements are arguments. The first "nargs" elements
+ // are the arguments. Everything else in the slice is then the environment
+ // of the process.
+ for _, arg := range args[1:] {
+ argStr = string(arg[:])
+ if len(argStr) > 0 {
+ if nargs > 0 {
+ argSlice = append(argSlice, argStr)
+ nargs--
+ continue
+ }
+ break
+ // envSlice = append(envSlice, argStr)
+ }
+ }
+ return argSlice, err
+}
+
+// cmdNameWithContext returns the command name (including spaces) without any arguments
+func (p *Process) cmdNameWithContext(ctx context.Context) (string, error) {
+ r, err := p.cmdlineSliceWithContext(ctx, false)
+ if err != nil {
+ return "", err
+ }
+
+ if len(r) == 0 {
+ return "", nil
+ }
+
+ return r[0], err
+}
+
+func (p *Process) CmdlineWithContext(ctx context.Context) (string, error) {
+ r, err := p.CmdlineSliceWithContext(ctx)
+ if err != nil {
+ return "", err
+ }
+ return strings.Join(r, " "), err
+}
+
+func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) {
+ lib, err := registerFuncs()
+ if err != nil {
+ return 0, err
+ }
+ defer lib.Close()
+
+ var ti ProcTaskInfo
+ const tiSize = int32(unsafe.Sizeof(ti))
+ procPidInfo(p.Pid, common.PROC_PIDTASKINFO, 0, uintptr(unsafe.Pointer(&ti)), tiSize)
+
+ return int32(ti.Threadnum), nil
+}
+
+func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) {
+ lib, err := registerFuncs()
+ if err != nil {
+ return nil, err
+ }
+ defer lib.Close()
+
+ var ti ProcTaskInfo
+ const tiSize = int32(unsafe.Sizeof(ti))
+ procPidInfo(p.Pid, common.PROC_PIDTASKINFO, 0, uintptr(unsafe.Pointer(&ti)), tiSize)
+
+ timescaleToNanoSeconds := getTimeScaleToNanoSeconds()
+ ret := &cpu.TimesStat{
+ CPU: "cpu",
+ User: float64(ti.Total_user) * timescaleToNanoSeconds / 1e9,
+ System: float64(ti.Total_system) * timescaleToNanoSeconds / 1e9,
+ }
+ return ret, nil
+}
+
+func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, error) {
+ lib, err := registerFuncs()
+ if err != nil {
+ return nil, err
+ }
+ defer lib.Close()
+
+ var ti ProcTaskInfo
+ const tiSize = int32(unsafe.Sizeof(ti))
+ procPidInfo(p.Pid, common.PROC_PIDTASKINFO, 0, uintptr(unsafe.Pointer(&ti)), tiSize)
+
+ ret := &MemoryInfoStat{
+ RSS: uint64(ti.Resident_size),
+ VMS: uint64(ti.Virtual_size),
+ Swap: uint64(ti.Pageins),
+ }
+ return ret, nil
+}
diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_amd64.go b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_amd64.go
index a13522473a1c3..890a5d5331a49 100644
--- a/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_amd64.go
+++ b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_amd64.go
@@ -212,6 +212,27 @@ type Posix_cred struct {
type Label struct{}
+type ProcTaskInfo struct {
+ Virtual_size uint64
+ Resident_size uint64
+ Total_user uint64
+ Total_system uint64
+ Threads_user uint64
+ Threads_system uint64
+ Policy int32
+ Faults int32
+ Pageins int32
+ Cow_faults int32
+ Messages_sent int32
+ Messages_received int32
+ Syscalls_mach int32
+ Syscalls_unix int32
+ Csw int32
+ Threadnum int32
+ Numrunning int32
+ Priority int32
+}
+
type AuditinfoAddr struct {
Auid uint32
Mask AuMask
diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_arm64.go b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_arm64.go
index f1f3df365d919..8075cf227d19a 100644
--- a/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_arm64.go
+++ b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_arm64.go
@@ -190,6 +190,27 @@ type Posix_cred struct{}
type Label struct{}
+type ProcTaskInfo struct {
+ Virtual_size uint64
+ Resident_size uint64
+ Total_user uint64
+ Total_system uint64
+ Threads_user uint64
+ Threads_system uint64
+ Policy int32
+ Faults int32
+ Pageins int32
+ Cow_faults int32
+ Messages_sent int32
+ Messages_received int32
+ Syscalls_mach int32
+ Syscalls_unix int32
+ Csw int32
+ Threadnum int32
+ Numrunning int32
+ Priority int32
+}
+
type AuditinfoAddr struct {
Auid uint32
Mask AuMask
diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_cgo.go b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_cgo.go
deleted file mode 100644
index bbdfc963ebbee..0000000000000
--- a/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_cgo.go
+++ /dev/null
@@ -1,222 +0,0 @@
-// SPDX-License-Identifier: BSD-3-Clause
-//go:build darwin && cgo
-
-package process
-
-// #include
-// #include
-// #include
-// #include
-// #include
-// #include
-// #include
-import "C"
-
-import (
- "bytes"
- "context"
- "fmt"
- "strings"
- "syscall"
- "unsafe"
-
- "github.com/shirou/gopsutil/v4/cpu"
-)
-
-var (
- argMax int
- timescaleToNanoSeconds float64
-)
-
-func init() {
- argMax = getArgMax()
- timescaleToNanoSeconds = getTimeScaleToNanoSeconds()
-}
-
-func getArgMax() int {
- var (
- mib = [...]C.int{C.CTL_KERN, C.KERN_ARGMAX}
- argmax C.int
- size C.size_t = C.ulong(unsafe.Sizeof(argmax))
- )
- retval := C.sysctl(&mib[0], 2, unsafe.Pointer(&argmax), &size, C.NULL, 0)
- if retval == 0 {
- return int(argmax)
- }
- return 0
-}
-
-func getTimeScaleToNanoSeconds() float64 {
- var timeBaseInfo C.struct_mach_timebase_info
-
- C.mach_timebase_info(&timeBaseInfo)
-
- return float64(timeBaseInfo.numer) / float64(timeBaseInfo.denom)
-}
-
-func (p *Process) ExeWithContext(ctx context.Context) (string, error) {
- var c C.char // need a var for unsafe.Sizeof need a var
- const bufsize = C.PROC_PIDPATHINFO_MAXSIZE * unsafe.Sizeof(c)
- buffer := (*C.char)(C.malloc(C.size_t(bufsize)))
- defer C.free(unsafe.Pointer(buffer))
-
- ret, err := C.proc_pidpath(C.int(p.Pid), unsafe.Pointer(buffer), C.uint32_t(bufsize))
- if err != nil {
- return "", err
- }
- if ret <= 0 {
- return "", fmt.Errorf("unknown error: proc_pidpath returned %d", ret)
- }
-
- return C.GoString(buffer), nil
-}
-
-// CwdWithContext retrieves the Current Working Directory for the given process.
-// It uses the proc_pidinfo from libproc and will only work for processes the
-// EUID can access. Otherwise "operation not permitted" will be returned as the
-// error.
-// Note: This might also work for other *BSD OSs.
-func (p *Process) CwdWithContext(ctx context.Context) (string, error) {
- const vpiSize = C.sizeof_struct_proc_vnodepathinfo
- vpi := (*C.struct_proc_vnodepathinfo)(C.malloc(vpiSize))
- defer C.free(unsafe.Pointer(vpi))
- ret, err := C.proc_pidinfo(C.int(p.Pid), C.PROC_PIDVNODEPATHINFO, 0, unsafe.Pointer(vpi), vpiSize)
- if err != nil {
- // fmt.Printf("ret: %d %T\n", ret, err)
- if err == syscall.EPERM {
- return "", ErrorNotPermitted
- }
- return "", err
- }
- if ret <= 0 {
- return "", fmt.Errorf("unknown error: proc_pidinfo returned %d", ret)
- }
- if ret != C.sizeof_struct_proc_vnodepathinfo {
- return "", fmt.Errorf("too few bytes; expected %d, got %d", vpiSize, ret)
- }
- return C.GoString(&vpi.pvi_cdir.vip_path[0]), err
-}
-
-func procArgs(pid int32) ([]byte, int, error) {
- var (
- mib = [...]C.int{C.CTL_KERN, C.KERN_PROCARGS2, C.int(pid)}
- size C.size_t = C.ulong(argMax)
- nargs C.int
- result []byte
- )
- procargs := (*C.char)(C.malloc(C.ulong(argMax)))
- defer C.free(unsafe.Pointer(procargs))
- retval, err := C.sysctl(&mib[0], 3, unsafe.Pointer(procargs), &size, C.NULL, 0)
- if retval == 0 {
- C.memcpy(unsafe.Pointer(&nargs), unsafe.Pointer(procargs), C.sizeof_int)
- result = C.GoBytes(unsafe.Pointer(procargs), C.int(size))
- // fmt.Printf("size: %d %d\n%s\n", size, nargs, hex.Dump(result))
- return result, int(nargs), nil
- }
- return nil, 0, err
-}
-
-func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) {
- return p.cmdlineSliceWithContext(ctx, true)
-}
-
-func (p *Process) cmdlineSliceWithContext(ctx context.Context, fallback bool) ([]string, error) {
- pargs, nargs, err := procArgs(p.Pid)
- if err != nil {
- return nil, err
- }
- // The first bytes hold the nargs int, skip it.
- args := bytes.Split((pargs)[C.sizeof_int:], []byte{0})
- var argStr string
- // The first element is the actual binary/command path.
- // command := args[0]
- var argSlice []string
- // var envSlice []string
- // All other, non-zero elements are arguments. The first "nargs" elements
- // are the arguments. Everything else in the slice is then the environment
- // of the process.
- for _, arg := range args[1:] {
- argStr = string(arg[:])
- if len(argStr) > 0 {
- if nargs > 0 {
- argSlice = append(argSlice, argStr)
- nargs--
- continue
- }
- break
- // envSlice = append(envSlice, argStr)
- }
- }
- return argSlice, err
-}
-
-// cmdNameWithContext returns the command name (including spaces) without any arguments
-func (p *Process) cmdNameWithContext(ctx context.Context) (string, error) {
- r, err := p.cmdlineSliceWithContext(ctx, false)
- if err != nil {
- return "", err
- }
-
- if len(r) == 0 {
- return "", nil
- }
-
- return r[0], err
-}
-
-func (p *Process) CmdlineWithContext(ctx context.Context) (string, error) {
- r, err := p.CmdlineSliceWithContext(ctx)
- if err != nil {
- return "", err
- }
- return strings.Join(r, " "), err
-}
-
-func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) {
- const tiSize = C.sizeof_struct_proc_taskinfo
- ti := (*C.struct_proc_taskinfo)(C.malloc(tiSize))
- defer C.free(unsafe.Pointer(ti))
-
- _, err := C.proc_pidinfo(C.int(p.Pid), C.PROC_PIDTASKINFO, 0, unsafe.Pointer(ti), tiSize)
- if err != nil {
- return 0, err
- }
-
- return int32(ti.pti_threadnum), nil
-}
-
-func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) {
- const tiSize = C.sizeof_struct_proc_taskinfo
- ti := (*C.struct_proc_taskinfo)(C.malloc(tiSize))
- defer C.free(unsafe.Pointer(ti))
-
- _, err := C.proc_pidinfo(C.int(p.Pid), C.PROC_PIDTASKINFO, 0, unsafe.Pointer(ti), tiSize)
- if err != nil {
- return nil, err
- }
-
- ret := &cpu.TimesStat{
- CPU: "cpu",
- User: float64(ti.pti_total_user) * timescaleToNanoSeconds / 1e9,
- System: float64(ti.pti_total_system) * timescaleToNanoSeconds / 1e9,
- }
- return ret, nil
-}
-
-func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, error) {
- const tiSize = C.sizeof_struct_proc_taskinfo
- ti := (*C.struct_proc_taskinfo)(C.malloc(tiSize))
- defer C.free(unsafe.Pointer(ti))
-
- _, err := C.proc_pidinfo(C.int(p.Pid), C.PROC_PIDTASKINFO, 0, unsafe.Pointer(ti), tiSize)
- if err != nil {
- return nil, err
- }
-
- ret := &MemoryInfoStat{
- RSS: uint64(ti.pti_resident_size),
- VMS: uint64(ti.pti_virtual_size),
- Swap: uint64(ti.pti_pageins),
- }
- return ret, nil
-}
diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_nocgo.go b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_nocgo.go
deleted file mode 100644
index d498c9377a060..0000000000000
--- a/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_nocgo.go
+++ /dev/null
@@ -1,134 +0,0 @@
-// SPDX-License-Identifier: BSD-3-Clause
-//go:build darwin && !cgo
-
-package process
-
-import (
- "context"
- "fmt"
- "strconv"
- "strings"
-
- "github.com/shirou/gopsutil/v4/cpu"
- "github.com/shirou/gopsutil/v4/internal/common"
-)
-
-func (p *Process) CwdWithContext(ctx context.Context) (string, error) {
- return "", common.ErrNotImplementedError
-}
-
-func (p *Process) ExeWithContext(ctx context.Context) (string, error) {
- out, err := invoke.CommandWithContext(ctx, "lsof", "-p", strconv.Itoa(int(p.Pid)), "-Fpfn")
- if err != nil {
- return "", fmt.Errorf("bad call to lsof: %w", err)
- }
- txtFound := 0
- lines := strings.Split(string(out), "\n")
- fallback := ""
- for i := 1; i < len(lines); i++ {
- if lines[i] == "ftxt" {
- txtFound++
- if txtFound == 1 {
- fallback = lines[i-1][1:]
- }
- if txtFound == 2 {
- return lines[i-1][1:], nil
- }
- }
- }
- if fallback != "" {
- return fallback, nil
- }
- return "", fmt.Errorf("missing txt data returned by lsof")
-}
-
-func (p *Process) CmdlineWithContext(ctx context.Context) (string, error) {
- r, err := callPsWithContext(ctx, "command", p.Pid, false, false)
- if err != nil {
- return "", err
- }
- return strings.Join(r[0], " "), err
-}
-
-func (p *Process) cmdNameWithContext(ctx context.Context) (string, error) {
- r, err := callPsWithContext(ctx, "command", p.Pid, false, true)
- if err != nil {
- return "", err
- }
- if len(r) > 0 && len(r[0]) > 0 {
- return r[0][0], err
- }
-
- return "", err
-}
-
-// CmdlineSliceWithContext returns the command line arguments of the process as a slice with each
-// element being an argument. Because of current deficiencies in the way that the command
-// line arguments are found, single arguments that have spaces in the will actually be
-// reported as two separate items. In order to do something better CGO would be needed
-// to use the native darwin functions.
-func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) {
- r, err := callPsWithContext(ctx, "command", p.Pid, false, false)
- if err != nil {
- return nil, err
- }
- return r[0], err
-}
-
-func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) {
- r, err := callPsWithContext(ctx, "utime,stime", p.Pid, true, false)
- if err != nil {
- return 0, err
- }
- return int32(len(r)), nil
-}
-
-func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) {
- r, err := callPsWithContext(ctx, "utime,stime", p.Pid, false, false)
- if err != nil {
- return nil, err
- }
-
- utime, err := convertCPUTimes(r[0][0])
- if err != nil {
- return nil, err
- }
- stime, err := convertCPUTimes(r[0][1])
- if err != nil {
- return nil, err
- }
-
- ret := &cpu.TimesStat{
- CPU: "cpu",
- User: utime,
- System: stime,
- }
- return ret, nil
-}
-
-func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, error) {
- r, err := callPsWithContext(ctx, "rss,vsize,pagein", p.Pid, false, false)
- if err != nil {
- return nil, err
- }
- rss, err := strconv.ParseInt(r[0][0], 10, 64)
- if err != nil {
- return nil, err
- }
- vms, err := strconv.ParseInt(r[0][1], 10, 64)
- if err != nil {
- return nil, err
- }
- pagein, err := strconv.ParseInt(r[0][2], 10, 64)
- if err != nil {
- return nil, err
- }
-
- ret := &MemoryInfoStat{
- RSS: uint64(rss) * 1024,
- VMS: uint64(vms) * 1024,
- Swap: uint64(pagein),
- }
-
- return ret, nil
-}
diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd.go b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd.go
index 436dcf0300531..76373736bfc5b 100644
--- a/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd.go
+++ b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd.go
@@ -8,6 +8,7 @@ import (
"context"
"errors"
"path/filepath"
+ "sort"
"strconv"
"strings"
@@ -269,18 +270,21 @@ func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, e
}
func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) {
- pids, err := common.CallPgrepWithContext(ctx, invoke, p.Pid)
+ procs, err := ProcessesWithContext(ctx)
if err != nil {
- return nil, err
+ return nil, nil
}
- ret := make([]*Process, 0, len(pids))
- for _, pid := range pids {
- np, err := NewProcessWithContext(ctx, pid)
+ ret := make([]*Process, 0, len(procs))
+ for _, proc := range procs {
+ ppid, err := proc.PpidWithContext(ctx)
if err != nil {
- return nil, err
+ continue
+ }
+ if ppid == p.Pid {
+ ret = append(ret, proc)
}
- ret = append(ret, np)
}
+ sort.Slice(ret, func(i, j int) bool { return ret[i].Pid < ret[j].Pid })
return ret, nil
}
diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_linux.go b/vendor/github.com/shirou/gopsutil/v4/process/process_linux.go
index 7aff0448dfedf..68a8c88c4a904 100644
--- a/vendor/github.com/shirou/gopsutil/v4/process/process_linux.go
+++ b/vendor/github.com/shirou/gopsutil/v4/process/process_linux.go
@@ -12,6 +12,7 @@ import (
"math"
"os"
"path/filepath"
+ "sort"
"strconv"
"strings"
@@ -338,21 +339,34 @@ func (p *Process) PageFaultsWithContext(ctx context.Context) (*PageFaultsStat, e
}
func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) {
- pids, err := common.CallPgrepWithContext(ctx, invoke, p.Pid)
+ statFiles, err := filepath.Glob(common.HostProcWithContext(ctx, "[0-9]*/stat"))
if err != nil {
return nil, err
}
- if len(pids) == 0 {
- return nil, ErrorNoChildren
- }
- ret := make([]*Process, 0, len(pids))
- for _, pid := range pids {
- np, err := NewProcessWithContext(ctx, pid)
+ ret := make([]*Process, 0, len(statFiles))
+ for _, statFile := range statFiles {
+ statContents, err := os.ReadFile(statFile)
if err != nil {
- return nil, err
+ continue
+ }
+ fields := splitProcStat(statContents)
+ pid, err := strconv.ParseInt(fields[1], 10, 32)
+ if err != nil {
+ continue
+ }
+ ppid, err := strconv.ParseInt(fields[4], 10, 32)
+ if err != nil {
+ continue
+ }
+ if int32(ppid) == p.Pid {
+ np, err := NewProcessWithContext(ctx, int32(pid))
+ if err != nil {
+ continue
+ }
+ ret = append(ret, np)
}
- ret = append(ret, np)
}
+ sort.Slice(ret, func(i, j int) bool { return ret[i].Pid < ret[j].Pid })
return ret, nil
}
@@ -1082,8 +1096,7 @@ func (p *Process) fillFromTIDStatWithContext(ctx context.Context, tid int32) (ui
if err != nil {
return 0, 0, nil, 0, 0, 0, nil, err
}
- ctime := (t / uint64(clockTicks)) + uint64(bootTime)
- createTime := int64(ctime * 1000)
+ createTime := int64((t * 1000 / uint64(clockTicks)) + uint64(bootTime*1000))
rtpriority, err := strconv.ParseInt(fields[18], 10, 32)
if err != nil {
diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd.go b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd.go
index e2d0ab462263d..5e8a9e0b45ee4 100644
--- a/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd.go
+++ b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd.go
@@ -11,6 +11,7 @@ import (
"fmt"
"io"
"path/filepath"
+ "sort"
"strconv"
"strings"
"unsafe"
@@ -286,18 +287,21 @@ func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, e
}
func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) {
- pids, err := common.CallPgrepWithContext(ctx, invoke, p.Pid)
+ procs, err := ProcessesWithContext(ctx)
if err != nil {
- return nil, err
+ return nil, nil
}
- ret := make([]*Process, 0, len(pids))
- for _, pid := range pids {
- np, err := NewProcessWithContext(ctx, pid)
+ ret := make([]*Process, 0, len(procs))
+ for _, proc := range procs {
+ ppid, err := proc.PpidWithContext(ctx)
if err != nil {
- return nil, err
+ continue
+ }
+ if ppid == p.Pid {
+ ret = append(ret, proc)
}
- ret = append(ret, np)
}
+ sort.Slice(ret, func(i, j int) bool { return ret[i].Pid < ret[j].Pid })
return ret, nil
}
diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_windows.go b/vendor/github.com/shirou/gopsutil/v4/process/process_windows.go
index 52e1086f7805d..b00c671e9f325 100644
--- a/vendor/github.com/shirou/gopsutil/v4/process/process_windows.go
+++ b/vendor/github.com/shirou/gopsutil/v4/process/process_windows.go
@@ -43,6 +43,7 @@ var (
procGetPriorityClass = common.Modkernel32.NewProc("GetPriorityClass")
procGetProcessIoCounters = common.Modkernel32.NewProc("GetProcessIoCounters")
procGetNativeSystemInfo = common.Modkernel32.NewProc("GetNativeSystemInfo")
+ procGetProcessHandleCount = common.Modkernel32.NewProc("GetProcessHandleCount")
processorArchitecture uint
)
@@ -548,8 +549,21 @@ func (p *Process) NumCtxSwitchesWithContext(ctx context.Context) (*NumCtxSwitche
return nil, common.ErrNotImplementedError
}
+// NumFDsWithContext returns the number of handles for a process on Windows,
+// not the number of file descriptors (FDs).
func (p *Process) NumFDsWithContext(ctx context.Context) (int32, error) {
- return 0, common.ErrNotImplementedError
+ handle, err := windows.OpenProcess(processQueryInformation, false, uint32(p.Pid))
+ if err != nil {
+ return 0, err
+ }
+ defer windows.CloseHandle(handle)
+
+ var handleCount uint32
+ ret, _, err := procGetProcessHandleCount.Call(uintptr(handle), uintptr(unsafe.Pointer(&handleCount)))
+ if ret == 0 {
+ return 0, err
+ }
+ return int32(handleCount), nil
}
func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) {
diff --git a/vendor/github.com/shoenig/go-m1cpu/.golangci.yaml b/vendor/github.com/shoenig/go-m1cpu/.golangci.yaml
deleted file mode 100644
index dc6fefb979ec0..0000000000000
--- a/vendor/github.com/shoenig/go-m1cpu/.golangci.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
-run:
- timeout: 5m
-linters:
- enable:
- - gofmt
- - errcheck
- - errname
- - errorlint
- - bodyclose
- - durationcheck
- - whitespace
-
diff --git a/vendor/github.com/shoenig/go-m1cpu/LICENSE b/vendor/github.com/shoenig/go-m1cpu/LICENSE
deleted file mode 100644
index e87a115e462e1..0000000000000
--- a/vendor/github.com/shoenig/go-m1cpu/LICENSE
+++ /dev/null
@@ -1,363 +0,0 @@
-Mozilla Public License, version 2.0
-
-1. Definitions
-
-1.1. "Contributor"
-
- means each individual or legal entity that creates, contributes to the
- creation of, or owns Covered Software.
-
-1.2. "Contributor Version"
-
- means the combination of the Contributions of others (if any) used by a
- Contributor and that particular Contributor's Contribution.
-
-1.3. "Contribution"
-
- means Covered Software of a particular Contributor.
-
-1.4. "Covered Software"
-
- means Source Code Form to which the initial Contributor has attached the
- notice in Exhibit A, the Executable Form of such Source Code Form, and
- Modifications of such Source Code Form, in each case including portions
- thereof.
-
-1.5. "Incompatible With Secondary Licenses"
- means
-
- a. that the initial Contributor has attached the notice described in
- Exhibit B to the Covered Software; or
-
- b. that the Covered Software was made available under the terms of
- version 1.1 or earlier of the License, but not also under the terms of
- a Secondary License.
-
-1.6. "Executable Form"
-
- means any form of the work other than Source Code Form.
-
-1.7. "Larger Work"
-
- means a work that combines Covered Software with other material, in a
- separate file or files, that is not Covered Software.
-
-1.8. "License"
-
- means this document.
-
-1.9. "Licensable"
-
- means having the right to grant, to the maximum extent possible, whether
- at the time of the initial grant or subsequently, any and all of the
- rights conveyed by this License.
-
-1.10. "Modifications"
-
- means any of the following:
-
- a. any file in Source Code Form that results from an addition to,
- deletion from, or modification of the contents of Covered Software; or
-
- b. any new file in Source Code Form that contains any Covered Software.
-
-1.11. "Patent Claims" of a Contributor
-
- means any patent claim(s), including without limitation, method,
- process, and apparatus claims, in any patent Licensable by such
- Contributor that would be infringed, but for the grant of the License,
- by the making, using, selling, offering for sale, having made, import,
- or transfer of either its Contributions or its Contributor Version.
-
-1.12. "Secondary License"
-
- means either the GNU General Public License, Version 2.0, the GNU Lesser
- General Public License, Version 2.1, the GNU Affero General Public
- License, Version 3.0, or any later versions of those licenses.
-
-1.13. "Source Code Form"
-
- means the form of the work preferred for making modifications.
-
-1.14. "You" (or "Your")
-
- means an individual or a legal entity exercising rights under this
- License. For legal entities, "You" includes any entity that controls, is
- controlled by, or is under common control with You. For purposes of this
- definition, "control" means (a) the power, direct or indirect, to cause
- the direction or management of such entity, whether by contract or
- otherwise, or (b) ownership of more than fifty percent (50%) of the
- outstanding shares or beneficial ownership of such entity.
-
-
-2. License Grants and Conditions
-
-2.1. Grants
-
- Each Contributor hereby grants You a world-wide, royalty-free,
- non-exclusive license:
-
- a. under intellectual property rights (other than patent or trademark)
- Licensable by such Contributor to use, reproduce, make available,
- modify, display, perform, distribute, and otherwise exploit its
- Contributions, either on an unmodified basis, with Modifications, or
- as part of a Larger Work; and
-
- b. under Patent Claims of such Contributor to make, use, sell, offer for
- sale, have made, import, and otherwise transfer either its
- Contributions or its Contributor Version.
-
-2.2. Effective Date
-
- The licenses granted in Section 2.1 with respect to any Contribution
- become effective for each Contribution on the date the Contributor first
- distributes such Contribution.
-
-2.3. Limitations on Grant Scope
-
- The licenses granted in this Section 2 are the only rights granted under
- this License. No additional rights or licenses will be implied from the
- distribution or licensing of Covered Software under this License.
- Notwithstanding Section 2.1(b) above, no patent license is granted by a
- Contributor:
-
- a. for any code that a Contributor has removed from Covered Software; or
-
- b. for infringements caused by: (i) Your and any other third party's
- modifications of Covered Software, or (ii) the combination of its
- Contributions with other software (except as part of its Contributor
- Version); or
-
- c. under Patent Claims infringed by Covered Software in the absence of
- its Contributions.
-
- This License does not grant any rights in the trademarks, service marks,
- or logos of any Contributor (except as may be necessary to comply with
- the notice requirements in Section 3.4).
-
-2.4. Subsequent Licenses
-
- No Contributor makes additional grants as a result of Your choice to
- distribute the Covered Software under a subsequent version of this
- License (see Section 10.2) or under the terms of a Secondary License (if
- permitted under the terms of Section 3.3).
-
-2.5. Representation
-
- Each Contributor represents that the Contributor believes its
- Contributions are its original creation(s) or it has sufficient rights to
- grant the rights to its Contributions conveyed by this License.
-
-2.6. Fair Use
-
- This License is not intended to limit any rights You have under
- applicable copyright doctrines of fair use, fair dealing, or other
- equivalents.
-
-2.7. Conditions
-
- Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
- Section 2.1.
-
-
-3. Responsibilities
-
-3.1. Distribution of Source Form
-
- All distribution of Covered Software in Source Code Form, including any
- Modifications that You create or to which You contribute, must be under
- the terms of this License. You must inform recipients that the Source
- Code Form of the Covered Software is governed by the terms of this
- License, and how they can obtain a copy of this License. You may not
- attempt to alter or restrict the recipients' rights in the Source Code
- Form.
-
-3.2. Distribution of Executable Form
-
- If You distribute Covered Software in Executable Form then:
-
- a. such Covered Software must also be made available in Source Code Form,
- as described in Section 3.1, and You must inform recipients of the
- Executable Form how they can obtain a copy of such Source Code Form by
- reasonable means in a timely manner, at a charge no more than the cost
- of distribution to the recipient; and
-
- b. You may distribute such Executable Form under the terms of this
- License, or sublicense it under different terms, provided that the
- license for the Executable Form does not attempt to limit or alter the
- recipients' rights in the Source Code Form under this License.
-
-3.3. Distribution of a Larger Work
-
- You may create and distribute a Larger Work under terms of Your choice,
- provided that You also comply with the requirements of this License for
- the Covered Software. If the Larger Work is a combination of Covered
- Software with a work governed by one or more Secondary Licenses, and the
- Covered Software is not Incompatible With Secondary Licenses, this
- License permits You to additionally distribute such Covered Software
- under the terms of such Secondary License(s), so that the recipient of
- the Larger Work may, at their option, further distribute the Covered
- Software under the terms of either this License or such Secondary
- License(s).
-
-3.4. Notices
-
- You may not remove or alter the substance of any license notices
- (including copyright notices, patent notices, disclaimers of warranty, or
- limitations of liability) contained within the Source Code Form of the
- Covered Software, except that You may alter any license notices to the
- extent required to remedy known factual inaccuracies.
-
-3.5. Application of Additional Terms
-
- You may choose to offer, and to charge a fee for, warranty, support,
- indemnity or liability obligations to one or more recipients of Covered
- Software. However, You may do so only on Your own behalf, and not on
- behalf of any Contributor. You must make it absolutely clear that any
- such warranty, support, indemnity, or liability obligation is offered by
- You alone, and You hereby agree to indemnify every Contributor for any
- liability incurred by such Contributor as a result of warranty, support,
- indemnity or liability terms You offer. You may include additional
- disclaimers of warranty and limitations of liability specific to any
- jurisdiction.
-
-4. Inability to Comply Due to Statute or Regulation
-
- If it is impossible for You to comply with any of the terms of this License
- with respect to some or all of the Covered Software due to statute,
- judicial order, or regulation then You must: (a) comply with the terms of
- this License to the maximum extent possible; and (b) describe the
- limitations and the code they affect. Such description must be placed in a
- text file included with all distributions of the Covered Software under
- this License. Except to the extent prohibited by statute or regulation,
- such description must be sufficiently detailed for a recipient of ordinary
- skill to be able to understand it.
-
-5. Termination
-
-5.1. The rights granted under this License will terminate automatically if You
- fail to comply with any of its terms. However, if You become compliant,
- then the rights granted under this License from a particular Contributor
- are reinstated (a) provisionally, unless and until such Contributor
- explicitly and finally terminates Your grants, and (b) on an ongoing
- basis, if such Contributor fails to notify You of the non-compliance by
- some reasonable means prior to 60 days after You have come back into
- compliance. Moreover, Your grants from a particular Contributor are
- reinstated on an ongoing basis if such Contributor notifies You of the
- non-compliance by some reasonable means, this is the first time You have
- received notice of non-compliance with this License from such
- Contributor, and You become compliant prior to 30 days after Your receipt
- of the notice.
-
-5.2. If You initiate litigation against any entity by asserting a patent
- infringement claim (excluding declaratory judgment actions,
- counter-claims, and cross-claims) alleging that a Contributor Version
- directly or indirectly infringes any patent, then the rights granted to
- You by any and all Contributors for the Covered Software under Section
- 2.1 of this License shall terminate.
-
-5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
- license agreements (excluding distributors and resellers) which have been
- validly granted by You or Your distributors under this License prior to
- termination shall survive termination.
-
-6. Disclaimer of Warranty
-
- Covered Software is provided under this License on an "as is" basis,
- without warranty of any kind, either expressed, implied, or statutory,
- including, without limitation, warranties that the Covered Software is free
- of defects, merchantable, fit for a particular purpose or non-infringing.
- The entire risk as to the quality and performance of the Covered Software
- is with You. Should any Covered Software prove defective in any respect,
- You (not any Contributor) assume the cost of any necessary servicing,
- repair, or correction. This disclaimer of warranty constitutes an essential
- part of this License. No use of any Covered Software is authorized under
- this License except under this disclaimer.
-
-7. Limitation of Liability
-
- Under no circumstances and under no legal theory, whether tort (including
- negligence), contract, or otherwise, shall any Contributor, or anyone who
- distributes Covered Software as permitted above, be liable to You for any
- direct, indirect, special, incidental, or consequential damages of any
- character including, without limitation, damages for lost profits, loss of
- goodwill, work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses, even if such party shall have been
- informed of the possibility of such damages. This limitation of liability
- shall not apply to liability for death or personal injury resulting from
- such party's negligence to the extent applicable law prohibits such
- limitation. Some jurisdictions do not allow the exclusion or limitation of
- incidental or consequential damages, so this exclusion and limitation may
- not apply to You.
-
-8. Litigation
-
- Any litigation relating to this License may be brought only in the courts
- of a jurisdiction where the defendant maintains its principal place of
- business and such litigation shall be governed by laws of that
- jurisdiction, without reference to its conflict-of-law provisions. Nothing
- in this Section shall prevent a party's ability to bring cross-claims or
- counter-claims.
-
-9. Miscellaneous
-
- This License represents the complete agreement concerning the subject
- matter hereof. If any provision of this License is held to be
- unenforceable, such provision shall be reformed only to the extent
- necessary to make it enforceable. Any law or regulation which provides that
- the language of a contract shall be construed against the drafter shall not
- be used to construe this License against a Contributor.
-
-
-10. Versions of the License
-
-10.1. New Versions
-
- Mozilla Foundation is the license steward. Except as provided in Section
- 10.3, no one other than the license steward has the right to modify or
- publish new versions of this License. Each version will be given a
- distinguishing version number.
-
-10.2. Effect of New Versions
-
- You may distribute the Covered Software under the terms of the version
- of the License under which You originally received the Covered Software,
- or under the terms of any subsequent version published by the license
- steward.
-
-10.3. Modified Versions
-
- If you create software not governed by this License, and you want to
- create a new license for such software, you may create and use a
- modified version of this License if you rename the license and remove
- any references to the name of the license steward (except to note that
- such modified license differs from this License).
-
-10.4. Distributing Source Code Form that is Incompatible With Secondary
- Licenses If You choose to distribute Source Code Form that is
- Incompatible With Secondary Licenses under the terms of this version of
- the License, the notice described in Exhibit B of this License must be
- attached.
-
-Exhibit A - Source Code Form License Notice
-
- This Source Code Form is subject to the
- terms of the Mozilla Public License, v.
- 2.0. If a copy of the MPL was not
- distributed with this file, You can
- obtain one at
- http://mozilla.org/MPL/2.0/.
-
-If it is not possible or desirable to put the notice in a particular file,
-then You may include the notice in a location (such as a LICENSE file in a
-relevant directory) where a recipient would be likely to look for such a
-notice.
-
-You may add additional accurate notices of copyright ownership.
-
-Exhibit B - "Incompatible With Secondary Licenses" Notice
-
- This Source Code Form is "Incompatible
- With Secondary Licenses", as defined by
- the Mozilla Public License, v. 2.0.
-
diff --git a/vendor/github.com/shoenig/go-m1cpu/Makefile b/vendor/github.com/shoenig/go-m1cpu/Makefile
deleted file mode 100644
index 28d786397d4b4..0000000000000
--- a/vendor/github.com/shoenig/go-m1cpu/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
-SHELL = bash
-
-default: test
-
-.PHONY: test
-test:
- @echo "--> Running Tests ..."
- @go test -v -race ./...
-
-vet:
- @echo "--> Vet Go sources ..."
- @go vet ./...
diff --git a/vendor/github.com/shoenig/go-m1cpu/README.md b/vendor/github.com/shoenig/go-m1cpu/README.md
deleted file mode 100644
index 399657acf861c..0000000000000
--- a/vendor/github.com/shoenig/go-m1cpu/README.md
+++ /dev/null
@@ -1,66 +0,0 @@
-# m1cpu
-
-[](https://pkg.go.dev/github.com/shoenig/go-m1cpu)
-[](https://github.com/shoenig/go-m1cpu/blob/main/LICENSE)
-[](https://github.com/shoenig/go-m1cpu/actions/workflows/ci.yaml)
-
-The `go-m1cpu` module is a library for inspecting Apple Silicon CPUs in Go.
-
-Use the `m1cpu` Go package for looking up the CPU frequency for Apple M1 and M2 CPUs.
-
-# Install
-
-```shell
-go get github.com/shoenig/go-m1cpu@latest
-```
-
-# CGO
-
-This package requires the use of [CGO](https://go.dev/blog/cgo).
-
-Extracting the CPU properties is done via Apple's [IOKit](https://developer.apple.com/documentation/iokit?language=objc)
-framework, which is accessible only through system C libraries.
-
-# Example
-
-Simple Go program to print Apple Silicon M1/M2 CPU speeds.
-
-```go
-package main
-
-import (
- "fmt"
-
- "github.com/shoenig/go-m1cpu"
-)
-
-func main() {
- fmt.Println("Apple Silicon", m1cpu.IsAppleSilicon())
-
- fmt.Println("pCore GHz", m1cpu.PCoreGHz())
- fmt.Println("eCore GHz", m1cpu.ECoreGHz())
-
- fmt.Println("pCore Hz", m1cpu.PCoreHz())
- fmt.Println("eCore Hz", m1cpu.ECoreHz())
-}
-```
-
-Using `go test` to print out available information.
-
-```
-➜ go test -v -run Show
-=== RUN Test_Show
- cpu_test.go:42: pCore Hz 3504000000
- cpu_test.go:43: eCore Hz 2424000000
- cpu_test.go:44: pCore GHz 3.504
- cpu_test.go:45: eCore GHz 2.424
- cpu_test.go:46: pCore count 8
- cpu_test.go:47: eCoreCount 4
- cpu_test.go:50: pCore Caches 196608 131072 16777216
- cpu_test.go:53: eCore Caches 131072 65536 4194304
---- PASS: Test_Show (0.00s)
-```
-
-# License
-
-Open source under the [MPL](LICENSE)
diff --git a/vendor/github.com/shoenig/go-m1cpu/cpu.go b/vendor/github.com/shoenig/go-m1cpu/cpu.go
deleted file mode 100644
index 502a8cce92e67..0000000000000
--- a/vendor/github.com/shoenig/go-m1cpu/cpu.go
+++ /dev/null
@@ -1,213 +0,0 @@
-//go:build darwin && arm64 && cgo
-
-package m1cpu
-
-// #cgo LDFLAGS: -framework CoreFoundation -framework IOKit
-// #include
-// #include
-// #include
-// #include
-//
-// #if !defined(MAC_OS_VERSION_12_0) || MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_VERSION_12_0
-// #define kIOMainPortDefault kIOMasterPortDefault
-// #endif
-//
-// #define HzToGHz(hz) ((hz) / 1000000000.0)
-//
-// UInt64 global_pCoreHz;
-// UInt64 global_eCoreHz;
-// int global_pCoreCount;
-// int global_eCoreCount;
-// int global_pCoreL1InstCacheSize;
-// int global_eCoreL1InstCacheSize;
-// int global_pCoreL1DataCacheSize;
-// int global_eCoreL1DataCacheSize;
-// int global_pCoreL2CacheSize;
-// int global_eCoreL2CacheSize;
-// char global_brand[32];
-//
-// UInt64 getFrequency(CFTypeRef typeRef) {
-// CFDataRef cfData = typeRef;
-//
-// CFIndex size = CFDataGetLength(cfData);
-// UInt8 buf[size];
-// CFDataGetBytes(cfData, CFRangeMake(0, size), buf);
-//
-// UInt8 b1 = buf[size-5];
-// UInt8 b2 = buf[size-6];
-// UInt8 b3 = buf[size-7];
-// UInt8 b4 = buf[size-8];
-//
-// UInt64 pCoreHz = 0x00000000FFFFFFFF & ((b1<<24) | (b2 << 16) | (b3 << 8) | (b4));
-// return pCoreHz;
-// }
-//
-// int sysctl_int(const char * name) {
-// int value = -1;
-// size_t size = 8;
-// sysctlbyname(name, &value, &size, NULL, 0);
-// return value;
-// }
-//
-// void sysctl_string(const char * name, char * dest) {
-// size_t size = 32;
-// sysctlbyname(name, dest, &size, NULL, 0);
-// }
-//
-// void initialize() {
-// global_pCoreCount = sysctl_int("hw.perflevel0.physicalcpu");
-// global_eCoreCount = sysctl_int("hw.perflevel1.physicalcpu");
-// global_pCoreL1InstCacheSize = sysctl_int("hw.perflevel0.l1icachesize");
-// global_eCoreL1InstCacheSize = sysctl_int("hw.perflevel1.l1icachesize");
-// global_pCoreL1DataCacheSize = sysctl_int("hw.perflevel0.l1dcachesize");
-// global_eCoreL1DataCacheSize = sysctl_int("hw.perflevel1.l1dcachesize");
-// global_pCoreL2CacheSize = sysctl_int("hw.perflevel0.l2cachesize");
-// global_eCoreL2CacheSize = sysctl_int("hw.perflevel1.l2cachesize");
-// sysctl_string("machdep.cpu.brand_string", global_brand);
-//
-// CFMutableDictionaryRef matching = IOServiceMatching("AppleARMIODevice");
-// io_iterator_t iter;
-// IOServiceGetMatchingServices(kIOMainPortDefault, matching, &iter);
-//
-// const size_t bufsize = 512;
-// io_object_t obj;
-// while ((obj = IOIteratorNext(iter))) {
-// char class[bufsize];
-// IOObjectGetClass(obj, class);
-// char name[bufsize];
-// IORegistryEntryGetName(obj, name);
-//
-// if (strncmp(name, "pmgr", bufsize) == 0) {
-// CFTypeRef pCoreRef = IORegistryEntryCreateCFProperty(obj, CFSTR("voltage-states5-sram"), kCFAllocatorDefault, 0);
-// CFTypeRef eCoreRef = IORegistryEntryCreateCFProperty(obj, CFSTR("voltage-states1-sram"), kCFAllocatorDefault, 0);
-//
-// long long pCoreHz = getFrequency(pCoreRef);
-// long long eCoreHz = getFrequency(eCoreRef);
-//
-// global_pCoreHz = pCoreHz;
-// global_eCoreHz = eCoreHz;
-// return;
-// }
-// }
-// }
-//
-// UInt64 eCoreHz() {
-// return global_eCoreHz;
-// }
-//
-// UInt64 pCoreHz() {
-// return global_pCoreHz;
-// }
-//
-// Float64 eCoreGHz() {
-// return HzToGHz(global_eCoreHz);
-// }
-//
-// Float64 pCoreGHz() {
-// return HzToGHz(global_pCoreHz);
-// }
-//
-// int pCoreCount() {
-// return global_pCoreCount;
-// }
-//
-// int eCoreCount() {
-// return global_eCoreCount;
-// }
-//
-// int pCoreL1InstCacheSize() {
-// return global_pCoreL1InstCacheSize;
-// }
-//
-// int pCoreL1DataCacheSize() {
-// return global_pCoreL1DataCacheSize;
-// }
-//
-// int pCoreL2CacheSize() {
-// return global_pCoreL2CacheSize;
-// }
-//
-// int eCoreL1InstCacheSize() {
-// return global_eCoreL1InstCacheSize;
-// }
-//
-// int eCoreL1DataCacheSize() {
-// return global_eCoreL1DataCacheSize;
-// }
-//
-// int eCoreL2CacheSize() {
-// return global_eCoreL2CacheSize;
-// }
-//
-// char * modelName() {
-// return global_brand;
-// }
-import "C"
-
-func init() {
- C.initialize()
-}
-
-// IsAppleSilicon returns true on this platform.
-func IsAppleSilicon() bool {
- return true
-}
-
-// PCoreHZ returns the max frequency in Hertz of the P-Core of an Apple Silicon CPU.
-func PCoreHz() uint64 {
- return uint64(C.pCoreHz())
-}
-
-// ECoreHZ returns the max frequency in Hertz of the E-Core of an Apple Silicon CPU.
-func ECoreHz() uint64 {
- return uint64(C.eCoreHz())
-}
-
-// PCoreGHz returns the max frequency in Gigahertz of the P-Core of an Apple Silicon CPU.
-func PCoreGHz() float64 {
- return float64(C.pCoreGHz())
-}
-
-// ECoreGHz returns the max frequency in Gigahertz of the E-Core of an Apple Silicon CPU.
-func ECoreGHz() float64 {
- return float64(C.eCoreGHz())
-}
-
-// PCoreCount returns the number of physical P (performance) cores.
-func PCoreCount() int {
- return int(C.pCoreCount())
-}
-
-// ECoreCount returns the number of physical E (efficiency) cores.
-func ECoreCount() int {
- return int(C.eCoreCount())
-}
-
-// PCoreCacheSize returns the sizes of the P (performance) core cache sizes
-// in the order of
-//
-// - L1 instruction cache
-// - L1 data cache
-// - L2 cache
-func PCoreCache() (int, int, int) {
- return int(C.pCoreL1InstCacheSize()),
- int(C.pCoreL1DataCacheSize()),
- int(C.pCoreL2CacheSize())
-}
-
-// ECoreCacheSize returns the sizes of the E (efficiency) core cache sizes
-// in the order of
-//
-// - L1 instruction cache
-// - L1 data cache
-// - L2 cache
-func ECoreCache() (int, int, int) {
- return int(C.eCoreL1InstCacheSize()),
- int(C.eCoreL1DataCacheSize()),
- int(C.eCoreL2CacheSize())
-}
-
-// ModelName returns the model name of the CPU.
-func ModelName() string {
- return C.GoString(C.modelName())
-}
diff --git a/vendor/github.com/shoenig/go-m1cpu/incompatible.go b/vendor/github.com/shoenig/go-m1cpu/incompatible.go
deleted file mode 100644
index d425025aa84d5..0000000000000
--- a/vendor/github.com/shoenig/go-m1cpu/incompatible.go
+++ /dev/null
@@ -1,53 +0,0 @@
-//go:build !darwin || !arm64 || !cgo
-
-package m1cpu
-
-// IsAppleSilicon return false on this platform.
-func IsAppleSilicon() bool {
- return false
-}
-
-// PCoreHZ requires darwin/arm64
-func PCoreHz() uint64 {
- panic("m1cpu: not a darwin/arm64 system")
-}
-
-// ECoreHZ requires darwin/arm64
-func ECoreHz() uint64 {
- panic("m1cpu: not a darwin/arm64 system")
-}
-
-// PCoreGHz requires darwin/arm64
-func PCoreGHz() float64 {
- panic("m1cpu: not a darwin/arm64 system")
-}
-
-// ECoreGHz requires darwin/arm64
-func ECoreGHz() float64 {
- panic("m1cpu: not a darwin/arm64 system")
-}
-
-// PCoreCount requires darwin/arm64
-func PCoreCount() int {
- panic("m1cpu: not a darwin/arm64 system")
-}
-
-// ECoreCount requires darwin/arm64
-func ECoreCount() int {
- panic("m1cpu: not a darwin/arm64 system")
-}
-
-// PCoreCacheSize requires darwin/arm64
-func PCoreCache() (int, int, int) {
- panic("m1cpu: not a darwin/arm64 system")
-}
-
-// ECoreCacheSize requires darwin/arm64
-func ECoreCache() (int, int, int) {
- panic("m1cpu: not a darwin/arm64 system")
-}
-
-// ModelName requires darwin/arm64
-func ModelName() string {
- panic("m1cpu: not a darwin/arm64 system")
-}
diff --git a/vendor/github.com/thanos-io/objstore/CHANGELOG.md b/vendor/github.com/thanos-io/objstore/CHANGELOG.md
index 6aae677ac289f..4686a47002b4d 100644
--- a/vendor/github.com/thanos-io/objstore/CHANGELOG.md
+++ b/vendor/github.com/thanos-io/objstore/CHANGELOG.md
@@ -9,6 +9,8 @@ NOTE: As semantic versioning states all 0.y.z releases can contain breaking chan
We use *breaking :warning:* to mark changes that are not backward compatible (relates only to v0.y.z releases.)
## Unreleased
+- [#38](https://github.com/thanos-io/objstore/pull/38) GCS: Upgrade cloud.google.com/go/storage version to `v1.43.0`.
+- [#145](https://github.com/thanos-io/objstore/pull/145) Include content length in the response of Get and GetRange.
### Fixed
- [#117](https://github.com/thanos-io/objstore/pull/117) Metrics: Fix `objstore_bucket_operation_failures_total` incorrectly incremented if context is cancelled while reading object contents.
diff --git a/vendor/github.com/thanos-io/objstore/inmem.go b/vendor/github.com/thanos-io/objstore/inmem.go
index 3f6f35e94e770..ed256c9cd9de8 100644
--- a/vendor/github.com/thanos-io/objstore/inmem.go
+++ b/vendor/github.com/thanos-io/objstore/inmem.go
@@ -119,7 +119,12 @@ func (b *InMemBucket) Get(_ context.Context, name string) (io.ReadCloser, error)
return nil, errNotFound
}
- return io.NopCloser(bytes.NewReader(file)), nil
+ return ObjectSizerReadCloser{
+ ReadCloser: io.NopCloser(bytes.NewReader(file)),
+ Size: func() (int64, error) {
+ return int64(len(file)), nil
+ },
+ }, nil
}
// GetRange returns a new range reader for the given object name and range.
@@ -136,15 +141,27 @@ func (b *InMemBucket) GetRange(_ context.Context, name string, off, length int64
}
if int64(len(file)) < off {
- return io.NopCloser(bytes.NewReader(nil)), nil
+ return ObjectSizerReadCloser{
+ ReadCloser: io.NopCloser(bytes.NewReader(nil)),
+ Size: func() (int64, error) { return 0, nil },
+ }, nil
}
if length == -1 {
- return io.NopCloser(bytes.NewReader(file[off:])), nil
+ return ObjectSizerReadCloser{
+ ReadCloser: io.NopCloser(bytes.NewReader(file[off:])),
+ Size: func() (int64, error) {
+ return int64(len(file[off:])), nil
+ },
+ }, nil
}
if length <= 0 {
- return io.NopCloser(bytes.NewReader(nil)), errors.New("length cannot be smaller or equal 0")
+ // wrap with ObjectSizerReadCloser to return 0 size.
+ return ObjectSizerReadCloser{
+ ReadCloser: io.NopCloser(bytes.NewReader(nil)),
+ Size: func() (int64, error) { return 0, nil },
+ }, errors.New("length cannot be smaller or equal 0")
}
if int64(len(file)) <= off+length {
@@ -152,7 +169,12 @@ func (b *InMemBucket) GetRange(_ context.Context, name string, off, length int64
length = int64(len(file)) - off
}
- return io.NopCloser(bytes.NewReader(file[off : off+length])), nil
+ return ObjectSizerReadCloser{
+ ReadCloser: io.NopCloser(bytes.NewReader(file[off : off+length])),
+ Size: func() (int64, error) {
+ return length, nil
+ },
+ }, nil
}
// Exists checks if the given directory exists in memory.
diff --git a/vendor/github.com/thanos-io/objstore/objstore.go b/vendor/github.com/thanos-io/objstore/objstore.go
index 87ec9e9863561..5bce3ef660f53 100644
--- a/vendor/github.com/thanos-io/objstore/objstore.go
+++ b/vendor/github.com/thanos-io/objstore/objstore.go
@@ -566,14 +566,18 @@ func (b *metricBucket) Get(ctx context.Context, name string) (io.ReadCloser, err
const op = OpGet
b.metrics.ops.WithLabelValues(op).Inc()
+ start := time.Now()
+
rc, err := b.bkt.Get(ctx, name)
if err != nil {
if !b.metrics.isOpFailureExpected(err) && ctx.Err() != context.Canceled {
b.metrics.opsFailures.WithLabelValues(op).Inc()
}
+ b.metrics.opsDuration.WithLabelValues(op).Observe(float64(time.Since(start)))
return nil, err
}
return newTimingReader(
+ start,
rc,
true,
op,
@@ -589,14 +593,18 @@ func (b *metricBucket) GetRange(ctx context.Context, name string, off, length in
const op = OpGetRange
b.metrics.ops.WithLabelValues(op).Inc()
+ start := time.Now()
+
rc, err := b.bkt.GetRange(ctx, name, off, length)
if err != nil {
if !b.metrics.isOpFailureExpected(err) && ctx.Err() != context.Canceled {
b.metrics.opsFailures.WithLabelValues(op).Inc()
}
+ b.metrics.opsDuration.WithLabelValues(op).Observe(float64(time.Since(start)))
return nil, err
}
return newTimingReader(
+ start,
rc,
true,
op,
@@ -628,7 +636,10 @@ func (b *metricBucket) Upload(ctx context.Context, name string, r io.Reader) err
const op = OpUpload
b.metrics.ops.WithLabelValues(op).Inc()
+ start := time.Now()
+
trc := newTimingReader(
+ start,
r,
false,
op,
@@ -705,7 +716,7 @@ type timingReader struct {
transferredBytes *prometheus.HistogramVec
}
-func newTimingReader(r io.Reader, closeReader bool, op string, dur *prometheus.HistogramVec, failed *prometheus.CounterVec, isFailureExpected IsOpFailureExpectedFunc, fetchedBytes *prometheus.CounterVec, transferredBytes *prometheus.HistogramVec) io.ReadCloser {
+func newTimingReader(start time.Time, r io.Reader, closeReader bool, op string, dur *prometheus.HistogramVec, failed *prometheus.CounterVec, isFailureExpected IsOpFailureExpectedFunc, fetchedBytes *prometheus.CounterVec, transferredBytes *prometheus.HistogramVec) io.ReadCloser {
// Initialize the metrics with 0.
dur.WithLabelValues(op)
failed.WithLabelValues(op)
@@ -716,7 +727,7 @@ func newTimingReader(r io.Reader, closeReader bool, op string, dur *prometheus.H
closeReader: closeReader,
objSize: objSize,
objSizeErr: objSizeErr,
- start: time.Now(),
+ start: start,
op: op,
duration: dur,
failed: failed,
@@ -728,7 +739,6 @@ func newTimingReader(r io.Reader, closeReader bool, op string, dur *prometheus.H
_, isSeeker := r.(io.Seeker)
_, isReaderAt := r.(io.ReaderAt)
-
if isSeeker && isReaderAt {
// The assumption is that in most cases when io.ReaderAt() is implemented then
// io.Seeker is implemented too (e.g. os.File).
@@ -737,6 +747,9 @@ func newTimingReader(r io.Reader, closeReader bool, op string, dur *prometheus.H
if isSeeker {
return &timingReaderSeeker{timingReader: trc}
}
+ if _, isWriterTo := r.(io.WriterTo); isWriterTo {
+ return &timingReaderWriterTo{timingReader: trc}
+ }
return &trc
}
@@ -772,11 +785,16 @@ func (r *timingReader) Close() error {
func (r *timingReader) Read(b []byte) (n int, err error) {
n, err = r.Reader.Read(b)
+ r.updateMetrics(n, err)
+ return n, err
+}
+
+func (r *timingReader) updateMetrics(n int, err error) {
if r.fetchedBytes != nil {
r.fetchedBytes.WithLabelValues(r.op).Add(float64(n))
}
-
r.readBytes += int64(n)
+
// Report metric just once.
if !r.alreadyGotErr && err != nil && err != io.EOF {
if !r.isFailureExpected(err) && !errors.Is(err, context.Canceled) {
@@ -784,7 +802,6 @@ func (r *timingReader) Read(b []byte) (n int, err error) {
}
r.alreadyGotErr = true
}
- return n, err
}
type timingReaderSeeker struct {
@@ -802,3 +819,27 @@ type timingReaderSeekerReaderAt struct {
func (rsc *timingReaderSeekerReaderAt) ReadAt(p []byte, off int64) (int, error) {
return (rsc.Reader).(io.ReaderAt).ReadAt(p, off)
}
+
+type timingReaderWriterTo struct {
+ timingReader
+}
+
+func (t *timingReaderWriterTo) WriteTo(w io.Writer) (n int64, err error) {
+ n, err = (t.Reader).(io.WriterTo).WriteTo(w)
+ t.timingReader.updateMetrics(int(n), err)
+ return n, err
+}
+
+type ObjectSizerReadCloser struct {
+ io.ReadCloser
+ Size func() (int64, error)
+}
+
+// ObjectSize implement ObjectSizer.
+func (o ObjectSizerReadCloser) ObjectSize() (int64, error) {
+ if o.Size == nil {
+ return 0, errors.New("unknown size")
+ }
+
+ return o.Size()
+}
diff --git a/vendor/github.com/thanos-io/objstore/providers/azure/azure.go b/vendor/github.com/thanos-io/objstore/providers/azure/azure.go
index 63bd2c1aadb1d..9a4e8518e3d00 100644
--- a/vendor/github.com/thanos-io/objstore/providers/azure/azure.go
+++ b/vendor/github.com/thanos-io/objstore/providers/azure/azure.go
@@ -6,6 +6,7 @@ package azure
import (
"context"
"io"
+ "net/http"
"os"
"strings"
"testing"
@@ -145,7 +146,7 @@ type Bucket struct {
}
// NewBucket returns a new Bucket using the provided Azure config.
-func NewBucket(logger log.Logger, azureConfig []byte, component string) (*Bucket, error) {
+func NewBucket(logger log.Logger, azureConfig []byte, component string, rt http.RoundTripper) (*Bucket, error) {
level.Debug(logger).Log("msg", "creating new Azure bucket connection", "component", component)
conf, err := parseConfig(azureConfig)
if err != nil {
@@ -154,11 +155,14 @@ func NewBucket(logger log.Logger, azureConfig []byte, component string) (*Bucket
if conf.MSIResource != "" {
level.Warn(logger).Log("msg", "The field msi_resource has been deprecated and should no longer be set")
}
- return NewBucketWithConfig(logger, conf, component)
+ return NewBucketWithConfig(logger, conf, component, rt)
}
// NewBucketWithConfig returns a new Bucket using the provided Azure config struct.
-func NewBucketWithConfig(logger log.Logger, conf Config, component string) (*Bucket, error) {
+func NewBucketWithConfig(logger log.Logger, conf Config, component string, rt http.RoundTripper) (*Bucket, error) {
+ if rt != nil {
+ conf.HTTPConfig.Transport = rt
+ }
if err := conf.validate(); err != nil {
return nil, err
}
@@ -269,7 +273,13 @@ func (b *Bucket) getBlobReader(ctx context.Context, name string, httpRange blob.
return nil, errors.Wrapf(err, "cannot download blob, address: %s", blobClient.URL())
}
retryOpts := azblob.RetryReaderOptions{MaxRetries: int32(b.readerMaxRetries)}
- return resp.NewRetryReader(ctx, &retryOpts), nil
+
+ return objstore.ObjectSizerReadCloser{
+ ReadCloser: resp.NewRetryReader(ctx, &retryOpts),
+ Size: func() (int64, error) {
+ return *resp.ContentLength, nil
+ },
+ }, nil
}
// Get returns a reader for the given object name.
@@ -355,7 +365,7 @@ func NewTestBucket(t testing.TB, component string) (objstore.Bucket, func(), err
if err != nil {
return nil, nil, err
}
- bkt, err := NewBucket(log.NewNopLogger(), bc, component)
+ bkt, err := NewBucket(log.NewNopLogger(), bc, component, nil)
if err != nil {
t.Errorf("Cannot create Azure storage container:")
return nil, nil, err
diff --git a/vendor/github.com/thanos-io/objstore/providers/azure/helpers.go b/vendor/github.com/thanos-io/objstore/providers/azure/helpers.go
index 846394a08a747..2915fbbbc8384 100644
--- a/vendor/github.com/thanos-io/objstore/providers/azure/helpers.go
+++ b/vendor/github.com/thanos-io/objstore/providers/azure/helpers.go
@@ -20,10 +20,14 @@ import (
const DirDelim = "/"
func getContainerClient(conf Config) (*container.Client, error) {
- dt, err := exthttp.DefaultTransport(conf.HTTPConfig)
+ var rt http.RoundTripper
+ rt, err := exthttp.DefaultTransport(conf.HTTPConfig)
if err != nil {
return nil, err
}
+ if conf.HTTPConfig.Transport != nil {
+ rt = conf.HTTPConfig.Transport
+ }
opt := &container.ClientOptions{
ClientOptions: azcore.ClientOptions{
Retry: policy.RetryOptions{
@@ -35,7 +39,7 @@ func getContainerClient(conf Config) (*container.Client, error) {
Telemetry: policy.TelemetryOptions{
ApplicationID: "Thanos",
},
- Transport: &http.Client{Transport: dt},
+ Transport: &http.Client{Transport: rt},
},
}
diff --git a/vendor/github.com/thanos-io/objstore/providers/filesystem/filesystem.go b/vendor/github.com/thanos-io/objstore/providers/filesystem/filesystem.go
index 21c7048505456..2ed42ee8b64a3 100644
--- a/vendor/github.com/thanos-io/objstore/providers/filesystem/filesystem.go
+++ b/vendor/github.com/thanos-io/objstore/providers/filesystem/filesystem.go
@@ -150,8 +150,12 @@ func (b *Bucket) GetRange(ctx context.Context, name string, off, length int64) (
return nil, errors.New("object name is empty")
}
- file := filepath.Join(b.rootDir, name)
- if _, err := os.Stat(file); err != nil {
+ var (
+ file = filepath.Join(b.rootDir, name)
+ stat os.FileInfo
+ err error
+ )
+ if stat, err = os.Stat(file); err != nil {
return nil, errors.Wrapf(err, "stat %s", file)
}
@@ -160,18 +164,33 @@ func (b *Bucket) GetRange(ctx context.Context, name string, off, length int64) (
return nil, err
}
+ var newOffset int64
if off > 0 {
- _, err := f.Seek(off, 0)
+ newOffset, err = f.Seek(off, 0)
if err != nil {
return nil, errors.Wrapf(err, "seek %v", off)
}
}
+ size := stat.Size() - newOffset
if length == -1 {
- return f, nil
+ return objstore.ObjectSizerReadCloser{
+ ReadCloser: f,
+ Size: func() (int64, error) {
+ return size, nil
+ },
+ }, nil
}
- return &rangeReaderCloser{Reader: io.LimitReader(f, length), f: f}, nil
+ return objstore.ObjectSizerReadCloser{
+ ReadCloser: &rangeReaderCloser{
+ Reader: io.LimitReader(f, length),
+ f: f,
+ },
+ Size: func() (int64, error) {
+ return min(length, size), nil
+ },
+ }, nil
}
// Exists checks if the given directory exists in memory.
diff --git a/vendor/github.com/thanos-io/objstore/providers/gcs/gcs.go b/vendor/github.com/thanos-io/objstore/providers/gcs/gcs.go
index a697e597f043e..e022b14fffffa 100644
--- a/vendor/github.com/thanos-io/objstore/providers/gcs/gcs.go
+++ b/vendor/github.com/thanos-io/objstore/providers/gcs/gcs.go
@@ -22,9 +22,7 @@ import (
"google.golang.org/api/iterator"
"google.golang.org/api/option"
htransport "google.golang.org/api/transport/http"
- "google.golang.org/grpc"
"google.golang.org/grpc/codes"
- "google.golang.org/grpc/experimental"
"google.golang.org/grpc/status"
"gopkg.in/yaml.v2"
@@ -54,7 +52,8 @@ type Config struct {
// ChunkSizeBytes controls the maximum number of bytes of the object that the
// Writer will attempt to send to the server in a single request
// Used as storage.Writer.ChunkSize of https://pkg.go.dev/google.golang.org/cloud/storage#Writer
- ChunkSizeBytes int `yaml:"chunk_size_bytes"`
+ ChunkSizeBytes int `yaml:"chunk_size_bytes"`
+ noAuth bool `yaml:"no_auth"`
}
// Bucket implements the store.Bucket and shipper.Bucket interfaces against GCS.
@@ -78,20 +77,22 @@ func parseConfig(conf []byte) (Config, error) {
}
// NewBucket returns a new Bucket against the given bucket handle.
-func NewBucket(ctx context.Context, logger log.Logger, conf []byte, component string) (*Bucket, error) {
+func NewBucket(ctx context.Context, logger log.Logger, conf []byte, component string, rt http.RoundTripper) (*Bucket, error) {
config, err := parseConfig(conf)
if err != nil {
return nil, err
}
- return NewBucketWithConfig(ctx, logger, config, component)
+ return NewBucketWithConfig(ctx, logger, config, component, rt)
}
// NewBucketWithConfig returns a new Bucket with gcs Config struct.
-func NewBucketWithConfig(ctx context.Context, logger log.Logger, gc Config, component string) (*Bucket, error) {
+func NewBucketWithConfig(ctx context.Context, logger log.Logger, gc Config, component string, rt http.RoundTripper) (*Bucket, error) {
if gc.Bucket == "" {
return nil, errors.New("missing Google Cloud Storage bucket name for stored blocks")
}
-
+ if rt != nil {
+ gc.HTTPConfig.Transport = rt
+ }
var opts []option.ClientOption
// If ServiceAccount is provided, use them in GCS client, otherwise fallback to Google default logic.
@@ -102,7 +103,9 @@ func NewBucketWithConfig(ctx context.Context, logger log.Logger, gc Config, comp
}
opts = append(opts, option.WithCredentials(credentials))
}
-
+ if gc.noAuth {
+ opts = append(opts, option.WithoutAuthentication())
+ }
opts = append(opts,
option.WithUserAgent(fmt.Sprintf("thanos-%s/%s (%s)", component, version.Version, runtime.Version())),
)
@@ -122,14 +125,12 @@ func appendHttpOptions(gc Config, opts []option.ClientOption) ([]option.ClientOp
// Check if a roundtripper has been set in the config
// otherwise build the default transport.
var rt http.RoundTripper
+ rt, err := exthttp.DefaultTransport(gc.HTTPConfig)
+ if err != nil {
+ return nil, err
+ }
if gc.HTTPConfig.Transport != nil {
rt = gc.HTTPConfig.Transport
- } else {
- var err error
- rt, err = exthttp.DefaultTransport(gc.HTTPConfig)
- if err != nil {
- return nil, err
- }
}
// GCS uses some defaults when "options.WithHTTPClient" is not used that are important when we call
@@ -155,7 +156,6 @@ func newBucket(ctx context.Context, logger log.Logger, gc Config, opts []option.
)
if gc.UseGRPC {
opts = append(opts,
- option.WithGRPCDialOption(experimental.WithRecvBufferPool(grpc.NewSharedBufferPool())),
option.WithGRPCConnectionPool(gc.GRPCConnPoolSize),
)
gcsClient, err = storage.NewGRPCClient(ctx, opts...)
@@ -226,12 +226,33 @@ func (b *Bucket) Iter(ctx context.Context, dir string, f func(string) error, opt
// Get returns a reader for the given object name.
func (b *Bucket) Get(ctx context.Context, name string) (io.ReadCloser, error) {
- return b.bkt.Object(name).NewReader(ctx)
+ r, err := b.bkt.Object(name).NewReader(ctx)
+ if err != nil {
+ return r, err
+ }
+
+ return objstore.ObjectSizerReadCloser{
+ ReadCloser: r,
+ Size: func() (int64, error) {
+ return r.Attrs.Size, nil
+ },
+ }, nil
}
// GetRange returns a new range reader for the given object name and range.
func (b *Bucket) GetRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) {
- return b.bkt.Object(name).NewRangeReader(ctx, off, length)
+ r, err := b.bkt.Object(name).NewRangeReader(ctx, off, length)
+ if err != nil {
+ return r, err
+ }
+
+ sz := r.Remain()
+ return objstore.ObjectSizerReadCloser{
+ ReadCloser: r,
+ Size: func() (int64, error) {
+ return sz, nil
+ },
+ }, nil
}
// Attributes returns information about the specified object.
@@ -315,7 +336,7 @@ func NewTestBucket(t testing.TB, project string) (objstore.Bucket, func(), error
return nil, nil, err
}
- b, err := NewBucket(ctx, log.NewNopLogger(), bc, "thanos-e2e-test")
+ b, err := NewBucket(ctx, log.NewNopLogger(), bc, "thanos-e2e-test", nil)
if err != nil {
return nil, nil, err
}
diff --git a/vendor/github.com/thanos-io/objstore/providers/s3/s3.go b/vendor/github.com/thanos-io/objstore/providers/s3/s3.go
index dad89e6698e29..eac8191a04628 100644
--- a/vendor/github.com/thanos-io/objstore/providers/s3/s3.go
+++ b/vendor/github.com/thanos-io/objstore/providers/s3/s3.go
@@ -176,13 +176,13 @@ func parseConfig(conf []byte) (Config, error) {
}
// NewBucket returns a new Bucket using the provided s3 config values.
-func NewBucket(logger log.Logger, conf []byte, component string) (*Bucket, error) {
+func NewBucket(logger log.Logger, conf []byte, component string, rt http.RoundTripper) (*Bucket, error) {
config, err := parseConfig(conf)
if err != nil {
return nil, err
}
- return NewBucketWithConfig(logger, config, component)
+ return NewBucketWithConfig(logger, config, component, rt)
}
type overrideSignerType struct {
@@ -202,7 +202,7 @@ func (s *overrideSignerType) Retrieve() (credentials.Value, error) {
}
// NewBucketWithConfig returns a new Bucket using the provided s3 config values.
-func NewBucketWithConfig(logger log.Logger, config Config, component string) (*Bucket, error) {
+func NewBucketWithConfig(logger log.Logger, config Config, component string, rt http.RoundTripper) (*Bucket, error) {
var chain []credentials.Provider
// TODO(bwplotka): Don't do flags as they won't scale, use actual params like v2, v4 instead
@@ -242,25 +242,25 @@ func NewBucketWithConfig(logger log.Logger, config Config, component string) (*B
}),
}
}
-
+ if rt != nil {
+ config.HTTPConfig.Transport = rt
+ }
// Check if a roundtripper has been set in the config
// otherwise build the default transport.
- var rt http.RoundTripper
+ var tpt http.RoundTripper
+ tpt, err := exthttp.DefaultTransport(config.HTTPConfig)
+ if err != nil {
+ return nil, err
+ }
if config.HTTPConfig.Transport != nil {
- rt = config.HTTPConfig.Transport
- } else {
- var err error
- rt, err = exthttp.DefaultTransport(config.HTTPConfig)
- if err != nil {
- return nil, err
- }
+ tpt = config.HTTPConfig.Transport
}
client, err := minio.New(config.Endpoint, &minio.Options{
Creds: credentials.NewChainCredentials(chain),
Secure: !config.Insecure,
Region: config.Region,
- Transport: rt,
+ Transport: tpt,
BucketLookup: config.BucketLookupType.MinioType(),
})
if err != nil {
@@ -452,7 +452,17 @@ func (b *Bucket) getRange(ctx context.Context, name string, off, length int64) (
return nil, err
}
- return r, nil
+ return objstore.ObjectSizerReadCloser{
+ ReadCloser: r,
+ Size: func() (int64, error) {
+ stat, err := r.Stat()
+ if err != nil {
+ return 0, err
+ }
+
+ return stat.Size, nil
+ },
+ }, nil
}
// Get returns a reader for the given object name.
@@ -611,7 +621,7 @@ func NewTestBucketFromConfig(t testing.TB, location string, c Config, reuseBucke
if err != nil {
return nil, nil, err
}
- b, err := NewBucket(log.NewNopLogger(), bc, "thanos-e2e-test")
+ b, err := NewBucket(log.NewNopLogger(), bc, "thanos-e2e-test", nil)
if err != nil {
return nil, nil, err
}
diff --git a/vendor/github.com/thanos-io/objstore/providers/swift/swift.go b/vendor/github.com/thanos-io/objstore/providers/swift/swift.go
index a8c56c55884d2..44fa6ed0542ae 100644
--- a/vendor/github.com/thanos-io/objstore/providers/swift/swift.go
+++ b/vendor/github.com/thanos-io/objstore/providers/swift/swift.go
@@ -154,12 +154,12 @@ type Container struct {
segmentsContainer string
}
-func NewContainer(logger log.Logger, conf []byte) (*Container, error) {
+func NewContainer(logger log.Logger, conf []byte, rt http.RoundTripper) (*Container, error) {
sc, err := parseConfig(conf)
if err != nil {
return nil, errors.Wrap(err, "parse config")
}
- return NewContainerFromConfig(logger, sc, false)
+ return NewContainerFromConfig(logger, sc, false, rt)
}
func ensureContainer(connection *swift.Connection, name string, createIfNotExist bool) error {
@@ -178,22 +178,22 @@ func ensureContainer(connection *swift.Connection, name string, createIfNotExist
return nil
}
-func NewContainerFromConfig(logger log.Logger, sc *Config, createContainer bool) (*Container, error) {
-
+func NewContainerFromConfig(logger log.Logger, sc *Config, createContainer bool, rt http.RoundTripper) (*Container, error) {
+ if rt != nil {
+ sc.HTTPConfig.Transport = rt
+ }
// Check if a roundtripper has been set in the config
// otherwise build the default transport.
- var rt http.RoundTripper
+ var tpt http.RoundTripper
+ tpt, err := exthttp.DefaultTransport(sc.HTTPConfig)
+ if err != nil {
+ return nil, err
+ }
if sc.HTTPConfig.Transport != nil {
- rt = sc.HTTPConfig.Transport
- } else {
- var err error
- rt, err = exthttp.DefaultTransport(sc.HTTPConfig)
- if err != nil {
- return nil, err
- }
+ tpt = sc.HTTPConfig.Transport
}
- connection := connectionFromConfig(sc, rt)
+ connection := connectionFromConfig(sc, tpt)
if err := connection.Authenticate(); err != nil {
return nil, errors.Wrap(err, "authentication")
}
@@ -262,7 +262,11 @@ func (c *Container) get(name string, headers swift.Headers, checkHash bool) (io.
if err != nil {
return nil, errors.Wrap(err, "open object")
}
- return file, err
+
+ return objstore.ObjectSizerReadCloser{
+ ReadCloser: file,
+ Size: file.Length,
+ }, nil
}
// Get returns a reader for the given object name.
@@ -378,7 +382,7 @@ func NewTestContainer(t testing.TB) (objstore.Bucket, func(), error) {
"needs to be manually cleared. This means that it is only useful to run one test in a time. This is due " +
"to safety (accidentally pointing prod container for test) as well as swift not being fully strong consistent.")
}
- c, err := NewContainerFromConfig(log.NewNopLogger(), config, false)
+ c, err := NewContainerFromConfig(log.NewNopLogger(), config, false, nil)
if err != nil {
return nil, nil, errors.Wrap(err, "initializing new container")
}
@@ -392,7 +396,7 @@ func NewTestContainer(t testing.TB) (objstore.Bucket, func(), error) {
}
config.ContainerName = objstore.CreateTemporaryTestBucketName(t)
config.SegmentContainerName = config.ContainerName
- c, err := NewContainerFromConfig(log.NewNopLogger(), config, true)
+ c, err := NewContainerFromConfig(log.NewNopLogger(), config, true, nil)
if err != nil {
return nil, nil, errors.Wrap(err, "initializing new container")
}
diff --git a/vendor/github.com/thanos-io/objstore/testing.go b/vendor/github.com/thanos-io/objstore/testing.go
index b8e3744cb89c2..28cbd65889494 100644
--- a/vendor/github.com/thanos-io/objstore/testing.go
+++ b/vendor/github.com/thanos-io/objstore/testing.go
@@ -106,6 +106,11 @@ func AcceptanceTest(t *testing.T, bkt Bucket) {
rc1, err := bkt.Get(ctx, "id1/obj_1.some")
testutil.Ok(t, err)
defer func() { testutil.Ok(t, rc1.Close()) }()
+
+ sz, err := TryToGetSize(rc1)
+ testutil.Ok(t, err)
+ testutil.Equals(t, int64(11), sz, "expected size to be equal to 11")
+
content, err := io.ReadAll(rc1)
testutil.Ok(t, err)
testutil.Equals(t, "@test-data@", string(content))
@@ -118,6 +123,11 @@ func AcceptanceTest(t *testing.T, bkt Bucket) {
rc2, err := bkt.GetRange(ctx, "id1/obj_1.some", 1, 3)
testutil.Ok(t, err)
defer func() { testutil.Ok(t, rc2.Close()) }()
+
+ sz, err = TryToGetSize(rc2)
+ testutil.Ok(t, err)
+ testutil.Equals(t, int64(3), sz, "expected size to be equal to 3")
+
content, err = io.ReadAll(rc2)
testutil.Ok(t, err)
testutil.Equals(t, "tes", string(content))
@@ -126,6 +136,11 @@ func AcceptanceTest(t *testing.T, bkt Bucket) {
rcUnspecifiedLen, err := bkt.GetRange(ctx, "id1/obj_1.some", 1, -1)
testutil.Ok(t, err)
defer func() { testutil.Ok(t, rcUnspecifiedLen.Close()) }()
+
+ sz, err = TryToGetSize(rcUnspecifiedLen)
+ testutil.Ok(t, err)
+ testutil.Equals(t, int64(10), sz, "expected size to be equal to 10")
+
content, err = io.ReadAll(rcUnspecifiedLen)
testutil.Ok(t, err)
testutil.Equals(t, "test-data@", string(content))
@@ -141,6 +156,11 @@ func AcceptanceTest(t *testing.T, bkt Bucket) {
rcLength, err := bkt.GetRange(ctx, "id1/obj_1.some", 3, 9999)
testutil.Ok(t, err)
defer func() { testutil.Ok(t, rcLength.Close()) }()
+
+ sz, err = TryToGetSize(rcLength)
+ testutil.Ok(t, err)
+ testutil.Equals(t, int64(8), sz, "expected size to be equal to 8")
+
content, err = io.ReadAll(rcLength)
testutil.Ok(t, err)
testutil.Equals(t, "st-data@", string(content))
diff --git a/vendor/github.com/twmb/franz-go/pkg/kfake/data.go b/vendor/github.com/twmb/franz-go/pkg/kfake/data.go
index 9f5d46c6b8687..566d51b69560b 100644
--- a/vendor/github.com/twmb/franz-go/pkg/kfake/data.go
+++ b/vendor/github.com/twmb/franz-go/pkg/kfake/data.go
@@ -92,7 +92,9 @@ func (d *data) mkt(t string, nparts int, nreplicas int, configs map[string]*stri
d.id2t[id] = t
d.t2id[t] = id
d.treplicas[t] = nreplicas
- d.tcfgs[t] = configs
+ if configs != nil {
+ d.tcfgs[t] = configs
+ }
for i := 0; i < nparts; i++ {
d.tps.mkp(t, int32(i), d.c.newPartData)
}
diff --git a/vendor/github.com/twmb/franz-go/pkg/sasl/plain/plain.go b/vendor/github.com/twmb/franz-go/pkg/sasl/plain/plain.go
new file mode 100644
index 0000000000000..97a9369d13723
--- /dev/null
+++ b/vendor/github.com/twmb/franz-go/pkg/sasl/plain/plain.go
@@ -0,0 +1,60 @@
+// Package plain provides PLAIN sasl authentication as specified in RFC4616.
+package plain
+
+import (
+ "context"
+ "errors"
+
+ "github.com/twmb/franz-go/pkg/sasl"
+)
+
+// Auth contains information for authentication.
+type Auth struct {
+ // Zid is an optional authorization ID to use in authenticating.
+ Zid string
+
+ // User is username to use for authentication.
+ User string
+
+ // Pass is the password to use for authentication.
+ Pass string
+
+ _ struct{} // require explicit field initialization
+}
+
+// AsMechanism returns a sasl mechanism that will use 'a' as credentials for
+// all sasl sessions.
+//
+// This is a shortcut for using the Plain function and is useful when you do
+// not need to live-rotate credentials.
+func (a Auth) AsMechanism() sasl.Mechanism {
+ return Plain(func(context.Context) (Auth, error) {
+ return a, nil
+ })
+}
+
+// Plain returns a sasl mechanism that will call authFn whenever sasl
+// authentication is needed. The returned Auth is used for a single session.
+func Plain(authFn func(context.Context) (Auth, error)) sasl.Mechanism {
+ return plain(authFn)
+}
+
+type plain func(context.Context) (Auth, error)
+
+func (plain) Name() string { return "PLAIN" }
+func (fn plain) Authenticate(ctx context.Context, _ string) (sasl.Session, []byte, error) {
+ auth, err := fn(ctx)
+ if err != nil {
+ return nil, nil, err
+ }
+ if auth.User == "" || auth.Pass == "" {
+ return nil, nil, errors.New("PLAIN user and pass must be non-empty")
+ }
+ return session{}, []byte(auth.Zid + "\x00" + auth.User + "\x00" + auth.Pass), nil
+}
+
+type session struct{}
+
+func (session) Challenge([]byte) (bool, []byte, error) {
+ return true, nil, nil
+}
diff --git a/vendor/go.opentelemetry.io/contrib/detectors/gcp/LICENSE b/vendor/go.opentelemetry.io/contrib/detectors/gcp/LICENSE
new file mode 100644
index 0000000000000..261eeb9e9f8b2
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/detectors/gcp/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/go.opentelemetry.io/contrib/detectors/gcp/README.md b/vendor/go.opentelemetry.io/contrib/detectors/gcp/README.md
new file mode 100644
index 0000000000000..ec35080b4ec3a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/detectors/gcp/README.md
@@ -0,0 +1,76 @@
+# GCP Resource detector
+
+The GCP resource detector supports detecting resources on:
+
+ * Google Compute Engine (GCE)
+ * Google Kubernetes Engine (GKE)
+ * Google App Engine (GAE)
+ * Cloud Run
+ * Cloud Run jobs
+ * Cloud Functions
+
+## Usage
+
+```golang
+ctx := context.Background()
+// Detect your resources
+res, err := resource.New(ctx,
+ // Use the GCP resource detector!
+ resource.WithDetectors(gcp.NewDetector()),
+ // Keep the default detectors
+ resource.WithTelemetrySDK(),
+ // Add your own custom attributes to identify your application
+ resource.WithAttributes(
+ semconv.ServiceNameKey.String("my-application"),
+ semconv.ServiceNamespaceKey.String("my-company-frontend-team"),
+ ),
+)
+if err != nil {
+ // Handle err
+}
+// Use the resource in your tracerprovider (or meterprovider)
+tp := trace.NewTracerProvider(
+ // ... other options
+ trace.WithResource(res),
+)
+```
+
+## Setting Kubernetes attributes
+
+Previous iterations of GCP resource detection attempted to detect
+`container.name`, `k8s.pod.name` and `k8s.namespace.name`. When using this detector,
+you should use this in your Pod Spec to set these using
+[`OTEL_RESOURCE_ATTRIBUTES`](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/resource/sdk.md#specifying-resource-information-via-an-environment-variable):
+
+```yaml
+env:
+- name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+- name: NAMESPACE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+- name: CONTAINER_NAME
+ value: my-container-name
+- name: OTEL_RESOURCE_ATTRIBUTES
+ value: k8s.pod.name=$(POD_NAME),k8s.namespace.name=$(NAMESPACE_NAME),k8s.container.name=$(CONTAINER_NAME)
+```
+To have a detector unpack the `OTEL_RESOURCE_ATTRIBUTES` envvar, use the `WithFromEnv` option:
+
+```golang
+...
+// Detect your resources
+res, err := resource.New(ctx,
+ resource.WithDetectors(gcp.NewDetector()),
+ resource.WithTelemetrySDK(),
+ resource.WithFromEnv(), // unpacks OTEL_RESOURCE_ATTRIBUTES
+ // Add your own custom attributes to identify your application
+ resource.WithAttributes(
+ semconv.ServiceNameKey.String("my-application"),
+ semconv.ServiceNamespaceKey.String("my-company-frontend-team"),
+ ),
+)
+...
+```
diff --git a/vendor/go.opentelemetry.io/contrib/detectors/gcp/cloud-function.go b/vendor/go.opentelemetry.io/contrib/detectors/gcp/cloud-function.go
new file mode 100644
index 0000000000000..1c1490b02c573
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/detectors/gcp/cloud-function.go
@@ -0,0 +1,61 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package gcp // import "go.opentelemetry.io/contrib/detectors/gcp"
+
+import (
+ "context"
+ "os"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/resource"
+ semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
+)
+
+const (
+ gcpFunctionNameKey = "K_SERVICE"
+)
+
+// NewCloudFunction will return a GCP Cloud Function resource detector.
+//
+// Deprecated: Use gcp.NewDetector() instead, which sets the same resource attributes.
+func NewCloudFunction() resource.Detector {
+ return &cloudFunction{
+ cloudRun: NewCloudRun(),
+ }
+}
+
+// cloudFunction collects resource information of GCP Cloud Function.
+type cloudFunction struct {
+ cloudRun *CloudRun
+}
+
+// Detect detects associated resources when running in GCP Cloud Function.
+func (f *cloudFunction) Detect(ctx context.Context) (*resource.Resource, error) {
+ functionName, ok := f.googleCloudFunctionName()
+ if !ok {
+ return nil, nil
+ }
+
+ projectID, err := f.cloudRun.mc.ProjectID()
+ if err != nil {
+ return nil, err
+ }
+ region, err := f.cloudRun.cloudRegion()
+ if err != nil {
+ return nil, err
+ }
+
+ attributes := []attribute.KeyValue{
+ semconv.CloudProviderGCP,
+ semconv.CloudPlatformGCPCloudFunctions,
+ semconv.FaaSName(functionName),
+ semconv.CloudAccountID(projectID),
+ semconv.CloudRegion(region),
+ }
+ return resource.NewWithAttributes(semconv.SchemaURL, attributes...), nil
+}
+
+func (f *cloudFunction) googleCloudFunctionName() (string, bool) {
+ return os.LookupEnv(gcpFunctionNameKey)
+}
diff --git a/vendor/go.opentelemetry.io/contrib/detectors/gcp/cloud-run.go b/vendor/go.opentelemetry.io/contrib/detectors/gcp/cloud-run.go
new file mode 100644
index 0000000000000..7754b466838b8
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/detectors/gcp/cloud-run.go
@@ -0,0 +1,114 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package gcp // import "go.opentelemetry.io/contrib/detectors/gcp"
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "strings"
+
+ "cloud.google.com/go/compute/metadata"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/resource"
+ semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
+)
+
+const serviceNamespace = "cloud-run-managed"
+
+// The minimal list of metadata.Client methods we use. Use an interface so we
+// can replace it with a fake implementation in the unit test.
+type metadataClient interface {
+ ProjectID() (string, error)
+ Get(string) (string, error)
+ InstanceID() (string, error)
+}
+
+// CloudRun collects resource information of Cloud Run instance.
+//
+// Deprecated: Use gcp.NewDetector() instead. Note that it sets faas.* resource attributes instead of service.* attributes.
+type CloudRun struct {
+ mc metadataClient
+ onGCE func() bool
+ getenv func(string) string
+}
+
+// compile time assertion that CloudRun implements the resource.Detector
+// interface.
+var _ resource.Detector = (*CloudRun)(nil)
+
+// NewCloudRun creates a CloudRun detector.
+//
+// Deprecated: Use gcp.NewDetector() instead. Note that it sets faas.* resource attributes instead of service.* attributes.
+func NewCloudRun() *CloudRun {
+ return &CloudRun{
+ mc: metadata.NewClient(nil),
+ onGCE: metadata.OnGCE,
+ getenv: os.Getenv,
+ }
+}
+
+func (c *CloudRun) cloudRegion() (string, error) {
+ region, err := c.mc.Get("instance/region")
+ if err != nil {
+ return "", err
+ }
+ // Region from the metadata server is in the format /projects/123/regions/r.
+ // https://cloud.google.com/run/docs/reference/container-contract#metadata-server
+ return region[strings.LastIndex(region, "/")+1:], nil
+}
+
+// Detect detects associated resources when running on Cloud Run hosts.
+// NOTE: the service.namespace attribute is currently hardcoded to be
+// "cloud-run-managed". This may change in the future, please do not rely on
+// this behavior yet.
+func (c *CloudRun) Detect(ctx context.Context) (*resource.Resource, error) {
+ // .OnGCE is actually testing whether the metadata server is available.
+ // Metadata server is supported on Cloud Run.
+ if !c.onGCE() {
+ return nil, nil
+ }
+
+ attributes := []attribute.KeyValue{
+ semconv.CloudProviderGCP,
+ semconv.ServiceNamespace(serviceNamespace),
+ }
+
+ var errInfo []string
+
+ if projectID, err := c.mc.ProjectID(); hasProblem(err) {
+ errInfo = append(errInfo, err.Error())
+ } else if projectID != "" {
+ attributes = append(attributes, semconv.CloudAccountID(projectID))
+ }
+
+ if region, err := c.cloudRegion(); hasProblem(err) {
+ errInfo = append(errInfo, err.Error())
+ } else if region != "" {
+ attributes = append(attributes, semconv.CloudRegion(region))
+ }
+
+ if instanceID, err := c.mc.InstanceID(); hasProblem(err) {
+ errInfo = append(errInfo, err.Error())
+ } else if instanceID != "" {
+ attributes = append(attributes, semconv.ServiceInstanceID(instanceID))
+ }
+
+ // Part of Cloud Run container runtime contract.
+ // See https://cloud.google.com/run/docs/reference/container-contract
+ if service := c.getenv("K_SERVICE"); service == "" {
+ errInfo = append(errInfo, "envvar K_SERVICE contains empty string.")
+ } else {
+ attributes = append(attributes, semconv.ServiceName(service))
+ }
+ res := resource.NewWithAttributes(semconv.SchemaURL, attributes...)
+
+ var aggregatedErr error
+ if len(errInfo) > 0 {
+ aggregatedErr = fmt.Errorf("detecting Cloud Run resources: %s", errInfo)
+ }
+
+ return res, aggregatedErr
+}
diff --git a/vendor/go.opentelemetry.io/contrib/detectors/gcp/detector.go b/vendor/go.opentelemetry.io/contrib/detectors/gcp/detector.go
new file mode 100644
index 0000000000000..b9eb1e1e14958
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/detectors/gcp/detector.go
@@ -0,0 +1,153 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package gcp // import "go.opentelemetry.io/contrib/detectors/gcp"
+
+import (
+ "context"
+ "fmt"
+ "strconv"
+
+ "cloud.google.com/go/compute/metadata"
+ "github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/resource"
+ semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
+)
+
+// NewDetector returns a resource detector which detects resource attributes on:
+// * Google Compute Engine (GCE).
+// * Google Kubernetes Engine (GKE).
+// * Google App Engine (GAE).
+// * Cloud Run.
+// * Cloud Functions.
+func NewDetector() resource.Detector {
+ return &detector{detector: gcp.NewDetector()}
+}
+
+type detector struct {
+ detector gcpDetector
+}
+
+// Detect detects associated resources when running on GCE, GKE, GAE,
+// Cloud Run, and Cloud functions.
+func (d *detector) Detect(ctx context.Context) (*resource.Resource, error) {
+ if !metadata.OnGCE() {
+ return nil, nil
+ }
+ b := &resourceBuilder{}
+ b.attrs = append(b.attrs, semconv.CloudProviderGCP)
+ b.add(semconv.CloudAccountIDKey, d.detector.ProjectID)
+
+ switch d.detector.CloudPlatform() {
+ case gcp.GKE:
+ b.attrs = append(b.attrs, semconv.CloudPlatformGCPKubernetesEngine)
+ b.addZoneOrRegion(d.detector.GKEAvailabilityZoneOrRegion)
+ b.add(semconv.K8SClusterNameKey, d.detector.GKEClusterName)
+ b.add(semconv.HostIDKey, d.detector.GKEHostID)
+ case gcp.CloudRun:
+ b.attrs = append(b.attrs, semconv.CloudPlatformGCPCloudRun)
+ b.add(semconv.FaaSNameKey, d.detector.FaaSName)
+ b.add(semconv.FaaSVersionKey, d.detector.FaaSVersion)
+ b.add(semconv.FaaSInstanceKey, d.detector.FaaSID)
+ b.add(semconv.CloudRegionKey, d.detector.FaaSCloudRegion)
+ case gcp.CloudRunJob:
+ b.attrs = append(b.attrs, semconv.CloudPlatformGCPCloudRun)
+ b.add(semconv.FaaSNameKey, d.detector.FaaSName)
+ b.add(semconv.FaaSInstanceKey, d.detector.FaaSID)
+ b.add(semconv.GCPCloudRunJobExecutionKey, d.detector.CloudRunJobExecution)
+ b.addInt(semconv.GCPCloudRunJobTaskIndexKey, d.detector.CloudRunJobTaskIndex)
+ b.add(semconv.CloudRegionKey, d.detector.FaaSCloudRegion)
+ case gcp.CloudFunctions:
+ b.attrs = append(b.attrs, semconv.CloudPlatformGCPCloudFunctions)
+ b.add(semconv.FaaSNameKey, d.detector.FaaSName)
+ b.add(semconv.FaaSVersionKey, d.detector.FaaSVersion)
+ b.add(semconv.FaaSInstanceKey, d.detector.FaaSID)
+ b.add(semconv.CloudRegionKey, d.detector.FaaSCloudRegion)
+ case gcp.AppEngineFlex:
+ b.attrs = append(b.attrs, semconv.CloudPlatformGCPAppEngine)
+ b.addZoneAndRegion(d.detector.AppEngineFlexAvailabilityZoneAndRegion)
+ b.add(semconv.FaaSNameKey, d.detector.AppEngineServiceName)
+ b.add(semconv.FaaSVersionKey, d.detector.AppEngineServiceVersion)
+ b.add(semconv.FaaSInstanceKey, d.detector.AppEngineServiceInstance)
+ case gcp.AppEngineStandard:
+ b.attrs = append(b.attrs, semconv.CloudPlatformGCPAppEngine)
+ b.add(semconv.CloudAvailabilityZoneKey, d.detector.AppEngineStandardAvailabilityZone)
+ b.add(semconv.CloudRegionKey, d.detector.AppEngineStandardCloudRegion)
+ b.add(semconv.FaaSNameKey, d.detector.AppEngineServiceName)
+ b.add(semconv.FaaSVersionKey, d.detector.AppEngineServiceVersion)
+ b.add(semconv.FaaSInstanceKey, d.detector.AppEngineServiceInstance)
+ case gcp.GCE:
+ b.attrs = append(b.attrs, semconv.CloudPlatformGCPComputeEngine)
+ b.addZoneAndRegion(d.detector.GCEAvailabilityZoneAndRegion)
+ b.add(semconv.HostTypeKey, d.detector.GCEHostType)
+ b.add(semconv.HostIDKey, d.detector.GCEHostID)
+ b.add(semconv.HostNameKey, d.detector.GCEHostName)
+ b.add(semconv.GCPGceInstanceNameKey, d.detector.GCEInstanceName)
+ b.add(semconv.GCPGceInstanceHostnameKey, d.detector.GCEInstanceHostname)
+ default:
+ // We don't support this platform yet, so just return with what we have
+ }
+ return b.build()
+}
+
+// resourceBuilder simplifies constructing resources using GCP detection
+// library functions.
+type resourceBuilder struct {
+ errs []error
+ attrs []attribute.KeyValue
+}
+
+func (r *resourceBuilder) add(key attribute.Key, detect func() (string, error)) {
+ if v, err := detect(); err == nil {
+ r.attrs = append(r.attrs, key.String(v))
+ } else {
+ r.errs = append(r.errs, err)
+ }
+}
+
+func (r *resourceBuilder) addInt(key attribute.Key, detect func() (string, error)) {
+ if v, err := detect(); err == nil {
+ if vi, err := strconv.Atoi(v); err == nil {
+ r.attrs = append(r.attrs, key.Int(vi))
+ } else {
+ r.errs = append(r.errs, err)
+ }
+ } else {
+ r.errs = append(r.errs, err)
+ }
+}
+
+// zoneAndRegion functions are expected to return zone, region, err.
+func (r *resourceBuilder) addZoneAndRegion(detect func() (string, string, error)) {
+ if zone, region, err := detect(); err == nil {
+ r.attrs = append(r.attrs, semconv.CloudAvailabilityZone(zone))
+ r.attrs = append(r.attrs, semconv.CloudRegion(region))
+ } else {
+ r.errs = append(r.errs, err)
+ }
+}
+
+func (r *resourceBuilder) addZoneOrRegion(detect func() (string, gcp.LocationType, error)) {
+ if v, locType, err := detect(); err == nil {
+ switch locType {
+ case gcp.Zone:
+ r.attrs = append(r.attrs, semconv.CloudAvailabilityZone(v))
+ case gcp.Region:
+ r.attrs = append(r.attrs, semconv.CloudRegion(v))
+ default:
+ r.errs = append(r.errs, fmt.Errorf("location must be zone or region. Got %v", locType))
+ }
+ } else {
+ r.errs = append(r.errs, err)
+ }
+}
+
+func (r *resourceBuilder) build() (*resource.Resource, error) {
+ var err error
+ if len(r.errs) > 0 {
+ err = fmt.Errorf("%w: %s", resource.ErrPartialResource, r.errs)
+ }
+ return resource.NewWithAttributes(semconv.SchemaURL, r.attrs...), err
+}
diff --git a/vendor/go.opentelemetry.io/contrib/detectors/gcp/gce.go b/vendor/go.opentelemetry.io/contrib/detectors/gcp/gce.go
new file mode 100644
index 0000000000000..2a29c420b498e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/detectors/gcp/gce.go
@@ -0,0 +1,100 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package gcp // import "go.opentelemetry.io/contrib/detectors/gcp"
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "os"
+ "strings"
+
+ "cloud.google.com/go/compute/metadata"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/resource"
+ semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
+)
+
+// GCE collects resource information of GCE computing instances.
+//
+// Deprecated: Use gcp.NewDetector() instead, which sets the same resource attributes on GCE.
+type GCE struct{}
+
+// compile time assertion that GCE implements the resource.Detector interface.
+var _ resource.Detector = (*GCE)(nil)
+
+// Detect detects associated resources when running on GCE hosts.
+func (gce *GCE) Detect(ctx context.Context) (*resource.Resource, error) {
+ if !metadata.OnGCE() {
+ return nil, nil
+ }
+
+ attributes := []attribute.KeyValue{
+ semconv.CloudProviderGCP,
+ }
+
+ var errInfo []string
+
+ if projectID, err := metadata.ProjectIDWithContext(ctx); hasProblem(err) {
+ errInfo = append(errInfo, err.Error())
+ } else if projectID != "" {
+ attributes = append(attributes, semconv.CloudAccountID(projectID))
+ }
+
+ if zone, err := metadata.ZoneWithContext(ctx); hasProblem(err) {
+ errInfo = append(errInfo, err.Error())
+ } else if zone != "" {
+ attributes = append(attributes, semconv.CloudAvailabilityZone(zone))
+
+ splitArr := strings.SplitN(zone, "-", 3)
+ if len(splitArr) == 3 {
+ attributes = append(attributes, semconv.CloudRegion(strings.Join(splitArr[0:2], "-")))
+ }
+ }
+
+ if instanceID, err := metadata.InstanceIDWithContext(ctx); hasProblem(err) {
+ errInfo = append(errInfo, err.Error())
+ } else if instanceID != "" {
+ attributes = append(attributes, semconv.HostID(instanceID))
+ }
+
+ if name, err := metadata.InstanceNameWithContext(ctx); hasProblem(err) {
+ errInfo = append(errInfo, err.Error())
+ } else if name != "" {
+ attributes = append(attributes, semconv.HostName(name))
+ }
+
+ if hostname, err := os.Hostname(); hasProblem(err) {
+ errInfo = append(errInfo, err.Error())
+ } else if hostname != "" {
+ attributes = append(attributes, semconv.HostName(hostname))
+ }
+
+ if hostType, err := metadata.GetWithContext(ctx, "instance/machine-type"); hasProblem(err) {
+ errInfo = append(errInfo, err.Error())
+ } else if hostType != "" {
+ attributes = append(attributes, semconv.HostType(hostType))
+ }
+
+ var aggregatedErr error
+ if len(errInfo) > 0 {
+ aggregatedErr = fmt.Errorf("detecting GCE resources: %s", errInfo)
+ }
+
+ return resource.NewWithAttributes(semconv.SchemaURL, attributes...), aggregatedErr
+}
+
+// hasProblem checks if the err is not nil or for missing resources.
+func hasProblem(err error) bool {
+ if err == nil {
+ return false
+ }
+
+ var nde metadata.NotDefinedError
+ if undefined := errors.As(err, &nde); undefined {
+ return false
+ }
+ return true
+}
diff --git a/vendor/go.opentelemetry.io/contrib/detectors/gcp/gke.go b/vendor/go.opentelemetry.io/contrib/detectors/gcp/gke.go
new file mode 100644
index 0000000000000..0588ad6a48526
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/detectors/gcp/gke.go
@@ -0,0 +1,69 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package gcp // import "go.opentelemetry.io/contrib/detectors/gcp"
+
+import (
+ "context"
+ "fmt"
+ "os"
+
+ "cloud.google.com/go/compute/metadata"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/resource"
+ semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
+)
+
+// GKE collects resource information of GKE computing instances.
+//
+// Deprecated: Use gcp.NewDetector() instead, which does NOT detect container, pod, and namespace attributes.
+// Set those using name using the OTEL_RESOURCE_ATTRIBUTES env var instead.
+type GKE struct{}
+
+// compile time assertion that GKE implements the resource.Detector interface.
+var _ resource.Detector = (*GKE)(nil)
+
+// Detect detects associated resources when running in GKE environment.
+func (gke *GKE) Detect(ctx context.Context) (*resource.Resource, error) {
+ gcpDetecor := GCE{}
+ gceLablRes, err := gcpDetecor.Detect(ctx)
+
+ if os.Getenv("KUBERNETES_SERVICE_HOST") == "" {
+ return gceLablRes, err
+ }
+
+ var errInfo []string
+ if err != nil {
+ errInfo = append(errInfo, err.Error())
+ }
+
+ attributes := []attribute.KeyValue{
+ semconv.K8SNamespaceName(os.Getenv("NAMESPACE")),
+ semconv.K8SPodName(os.Getenv("HOSTNAME")),
+ }
+
+ if containerName := os.Getenv("CONTAINER_NAME"); containerName != "" {
+ attributes = append(attributes, semconv.ContainerName(containerName))
+ }
+
+ if clusterName, err := metadata.InstanceAttributeValueWithContext(ctx, "cluster-name"); hasProblem(err) {
+ errInfo = append(errInfo, err.Error())
+ } else if clusterName != "" {
+ attributes = append(attributes, semconv.K8SClusterName(clusterName))
+ }
+
+ k8sattributeRes := resource.NewWithAttributes(semconv.SchemaURL, attributes...)
+
+ res, err := resource.Merge(gceLablRes, k8sattributeRes)
+ if err != nil {
+ errInfo = append(errInfo, err.Error())
+ }
+
+ var aggregatedErr error
+ if len(errInfo) > 0 {
+ aggregatedErr = fmt.Errorf("detecting GKE resources: %s", errInfo)
+ }
+
+ return res, aggregatedErr
+}
diff --git a/vendor/go.opentelemetry.io/contrib/detectors/gcp/types.go b/vendor/go.opentelemetry.io/contrib/detectors/gcp/types.go
new file mode 100644
index 0000000000000..666d82e616c38
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/detectors/gcp/types.go
@@ -0,0 +1,33 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package gcp // import "go.opentelemetry.io/contrib/detectors/gcp"
+
+import "github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp"
+
+// gcpDetector can detect attributes of GCP environments.
+type gcpDetector interface {
+ ProjectID() (string, error)
+ CloudPlatform() gcp.Platform
+ GKEAvailabilityZoneOrRegion() (string, gcp.LocationType, error)
+ GKEClusterName() (string, error)
+ GKEHostID() (string, error)
+ FaaSName() (string, error)
+ FaaSVersion() (string, error)
+ FaaSID() (string, error)
+ FaaSCloudRegion() (string, error)
+ AppEngineFlexAvailabilityZoneAndRegion() (string, string, error)
+ AppEngineStandardAvailabilityZone() (string, error)
+ AppEngineStandardCloudRegion() (string, error)
+ AppEngineServiceName() (string, error)
+ AppEngineServiceVersion() (string, error)
+ AppEngineServiceInstance() (string, error)
+ GCEAvailabilityZoneAndRegion() (string, string, error)
+ GCEHostType() (string, error)
+ GCEHostID() (string, error)
+ GCEHostName() (string, error)
+ GCEInstanceHostname() (string, error)
+ GCEInstanceName() (string, error)
+ CloudRunJobExecution() (string, error)
+ CloudRunJobTaskIndex() (string, error)
+}
diff --git a/vendor/go.opentelemetry.io/contrib/detectors/gcp/version.go b/vendor/go.opentelemetry.io/contrib/detectors/gcp/version.go
new file mode 100644
index 0000000000000..1acc898319839
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/detectors/gcp/version.go
@@ -0,0 +1,17 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package gcp // import "go.opentelemetry.io/contrib/detectors/gcp"
+
+// Version is the current release version of the GCP resource detector.
+func Version() string {
+ return "1.29.0"
+ // This string is updated by the pre_release.sh script during release
+}
+
+// SemVersion is the semantic version to be supplied to tracer/meter creation.
+//
+// Deprecated: Use [Version] instead.
+func SemVersion() string {
+ return Version()
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go
index ab091cf6ade33..18436eaedffd8 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go
@@ -42,6 +42,8 @@ type config struct {
TracerProvider trace.TracerProvider
MeterProvider metric.MeterProvider
SpanStartOptions []trace.SpanStartOption
+ SpanAttributes []attribute.KeyValue
+ MetricAttributes []attribute.KeyValue
ReceivedEvent bool
SentEvent bool
@@ -257,3 +259,29 @@ func (o spanStartOption) apply(c *config) {
func WithSpanOptions(opts ...trace.SpanStartOption) Option {
return spanStartOption{opts}
}
+
+type spanAttributesOption struct{ a []attribute.KeyValue }
+
+func (o spanAttributesOption) apply(c *config) {
+ if o.a != nil {
+ c.SpanAttributes = o.a
+ }
+}
+
+// WithSpanAttributes returns an Option to add custom attributes to the spans.
+func WithSpanAttributes(a ...attribute.KeyValue) Option {
+ return spanAttributesOption{a: a}
+}
+
+type metricAttributesOption struct{ a []attribute.KeyValue }
+
+func (o metricAttributesOption) apply(c *config) {
+ if o.a != nil {
+ c.MetricAttributes = o.a
+ }
+}
+
+// WithMetricAttributes returns an Option to add custom attributes to the metrics.
+func WithMetricAttributes(a ...attribute.KeyValue) Option {
+ return metricAttributesOption{a: a}
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go
index 201867a86944a..fbcbfb84e047e 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go
@@ -62,11 +62,11 @@ func (h *serverHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) cont
trace.ContextWithRemoteSpanContext(ctx, trace.SpanContextFromContext(ctx)),
name,
trace.WithSpanKind(trace.SpanKindServer),
- trace.WithAttributes(attrs...),
+ trace.WithAttributes(append(attrs, h.config.SpanAttributes...)...),
)
gctx := gRPCContext{
- metricAttrs: attrs,
+ metricAttrs: append(attrs, h.config.MetricAttributes...),
record: true,
}
if h.config.Filter != nil {
@@ -102,11 +102,11 @@ func (h *clientHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) cont
ctx,
name,
trace.WithSpanKind(trace.SpanKindClient),
- trace.WithAttributes(attrs...),
+ trace.WithAttributes(append(attrs, h.config.SpanAttributes...)...),
)
gctx := gRPCContext{
- metricAttrs: attrs,
+ metricAttrs: append(attrs, h.config.MetricAttributes...),
record: true,
}
if h.config.Filter != nil {
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go
index a15d06cb0c451..04f425edfefea 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go
@@ -5,7 +5,7 @@ package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.g
// Version is the current release version of the gRPC instrumentation.
func Version() string {
- return "0.53.0"
+ return "0.54.0"
// This string is updated by the pre_release.sh script during release
}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go
index 214acaf581ef1..5d6e6156b7beb 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go
@@ -18,13 +18,6 @@ const (
WriteErrorKey = attribute.Key("http.write_error") // if an error occurred while writing a reply, the string of the error (io.EOF is not recorded)
)
-// Server HTTP metrics.
-const (
- serverRequestSize = "http.server.request.size" // Incoming request bytes total
- serverResponseSize = "http.server.response.size" // Incoming response bytes total
- serverDuration = "http.server.duration" // Incoming end to end duration, milliseconds
-)
-
// Client HTTP metrics.
const (
clientRequestSize = "http.client.request.size" // Outgoing request bytes total
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go
index f0a9bb9efeb5c..a01bfafbe0773 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go
@@ -8,6 +8,8 @@ import (
"net/http"
"net/http/httptrace"
+ "go.opentelemetry.io/otel/attribute"
+
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/propagation"
@@ -33,8 +35,9 @@ type config struct {
SpanNameFormatter func(string, *http.Request) string
ClientTrace func(context.Context) *httptrace.ClientTrace
- TracerProvider trace.TracerProvider
- MeterProvider metric.MeterProvider
+ TracerProvider trace.TracerProvider
+ MeterProvider metric.MeterProvider
+ MetricAttributesFn func(*http.Request) []attribute.KeyValue
}
// Option interface used for setting optional config properties.
@@ -194,3 +197,11 @@ func WithServerName(server string) Option {
c.ServerName = server
})
}
+
+// WithMetricAttributesFn returns an Option to set a function that maps an HTTP request to a slice of attribute.KeyValue.
+// These attributes will be included in metrics for every request.
+func WithMetricAttributesFn(metricAttributesFn func(r *http.Request) []attribute.KeyValue) Option {
+ return optionFunc(func(c *config) {
+ c.MetricAttributesFn = metricAttributesFn
+ })
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go
index d01bdccf40dc5..33580a35b774b 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go
@@ -9,11 +9,9 @@ import (
"github.com/felixge/httpsnoop"
+ "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request"
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
- "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
"go.opentelemetry.io/otel"
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/propagation"
"go.opentelemetry.io/otel/trace"
)
@@ -24,7 +22,6 @@ type middleware struct {
server string
tracer trace.Tracer
- meter metric.Meter
propagators propagation.TextMapPropagator
spanStartOptions []trace.SpanStartOption
readEvent bool
@@ -34,10 +31,7 @@ type middleware struct {
publicEndpoint bool
publicEndpointFn func(*http.Request) bool
- traceSemconv semconv.HTTPServer
- requestBytesCounter metric.Int64Counter
- responseBytesCounter metric.Int64Counter
- serverLatencyMeasure metric.Float64Histogram
+ semconv semconv.HTTPServer
}
func defaultHandlerFormatter(operation string, _ *http.Request) string {
@@ -56,8 +50,6 @@ func NewHandler(handler http.Handler, operation string, opts ...Option) http.Han
func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Handler {
h := middleware{
operation: operation,
-
- traceSemconv: semconv.NewHTTPServer(),
}
defaultOpts := []Option{
@@ -67,7 +59,6 @@ func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Han
c := newConfig(append(defaultOpts, opts...)...)
h.configure(c)
- h.createMeasures()
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
@@ -78,7 +69,6 @@ func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Han
func (h *middleware) configure(c *config) {
h.tracer = c.Tracer
- h.meter = c.Meter
h.propagators = c.Propagators
h.spanStartOptions = c.SpanStartOptions
h.readEvent = c.ReadEvent
@@ -88,6 +78,7 @@ func (h *middleware) configure(c *config) {
h.publicEndpoint = c.PublicEndpoint
h.publicEndpointFn = c.PublicEndpointFn
h.server = c.ServerName
+ h.semconv = semconv.NewHTTPServer(c.Meter)
}
func handleErr(err error) {
@@ -96,30 +87,6 @@ func handleErr(err error) {
}
}
-func (h *middleware) createMeasures() {
- var err error
- h.requestBytesCounter, err = h.meter.Int64Counter(
- serverRequestSize,
- metric.WithUnit("By"),
- metric.WithDescription("Measures the size of HTTP request messages."),
- )
- handleErr(err)
-
- h.responseBytesCounter, err = h.meter.Int64Counter(
- serverResponseSize,
- metric.WithUnit("By"),
- metric.WithDescription("Measures the size of HTTP response messages."),
- )
- handleErr(err)
-
- h.serverLatencyMeasure, err = h.meter.Float64Histogram(
- serverDuration,
- metric.WithUnit("ms"),
- metric.WithDescription("Measures the duration of inbound HTTP requests."),
- )
- handleErr(err)
-}
-
// serveHTTP sets up tracing and calls the given next http.Handler with the span
// context injected into the request context.
func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http.Handler) {
@@ -134,7 +101,7 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http
ctx := h.propagators.Extract(r.Context(), propagation.HeaderCarrier(r.Header))
opts := []trace.SpanStartOption{
- trace.WithAttributes(h.traceSemconv.RequestTraceAttrs(h.server, r)...),
+ trace.WithAttributes(h.semconv.RequestTraceAttrs(h.server, r)...),
}
opts = append(opts, h.spanStartOptions...)
@@ -166,14 +133,12 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http
}
}
- var bw bodyWrapper
// if request body is nil or NoBody, we don't want to mutate the body as it
// will affect the identity of it in an unforeseeable way because we assert
// ReadCloser fulfills a certain interface and it is indeed nil or NoBody.
+ bw := request.NewBodyWrapper(r.Body, readRecordFunc)
if r.Body != nil && r.Body != http.NoBody {
- bw.ReadCloser = r.Body
- bw.record = readRecordFunc
- r.Body = &bw
+ r.Body = bw
}
writeRecordFunc := func(int64) {}
@@ -183,13 +148,7 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http
}
}
- rww := &respWriterWrapper{
- ResponseWriter: w,
- record: writeRecordFunc,
- ctx: ctx,
- props: h.propagators,
- statusCode: http.StatusOK, // default status code in case the Handler doesn't write anything
- }
+ rww := request.NewRespWriterWrapper(w, writeRecordFunc)
// Wrap w to use our ResponseWriter methods while also exposing
// other interfaces that w may implement (http.CloseNotifier,
@@ -217,35 +176,35 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http
next.ServeHTTP(w, r.WithContext(ctx))
- span.SetStatus(semconv.ServerStatus(rww.statusCode))
- span.SetAttributes(h.traceSemconv.ResponseTraceAttrs(semconv.ResponseTelemetry{
- StatusCode: rww.statusCode,
- ReadBytes: bw.read.Load(),
- ReadError: bw.err,
- WriteBytes: rww.written,
- WriteError: rww.err,
+ statusCode := rww.StatusCode()
+ bytesWritten := rww.BytesWritten()
+ span.SetStatus(h.semconv.Status(statusCode))
+ span.SetAttributes(h.semconv.ResponseTraceAttrs(semconv.ResponseTelemetry{
+ StatusCode: statusCode,
+ ReadBytes: bw.BytesRead(),
+ ReadError: bw.Error(),
+ WriteBytes: bytesWritten,
+ WriteError: rww.Error(),
})...)
- // Add metrics
- attributes := append(labeler.Get(), semconvutil.HTTPServerRequestMetrics(h.server, r)...)
- if rww.statusCode > 0 {
- attributes = append(attributes, semconv.HTTPStatusCode(rww.statusCode))
- }
- o := metric.WithAttributeSet(attribute.NewSet(attributes...))
- addOpts := []metric.AddOption{o} // Allocate vararg slice once.
- h.requestBytesCounter.Add(ctx, bw.read.Load(), addOpts...)
- h.responseBytesCounter.Add(ctx, rww.written, addOpts...)
-
// Use floating point division here for higher precision (instead of Millisecond method).
elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond)
- h.serverLatencyMeasure.Record(ctx, elapsedTime, o)
+ h.semconv.RecordMetrics(ctx, semconv.MetricData{
+ ServerName: h.server,
+ Req: r,
+ StatusCode: statusCode,
+ AdditionalAttributes: labeler.Get(),
+ RequestSize: bw.BytesRead(),
+ ResponseSize: bytesWritten,
+ ElapsedTime: elapsedTime,
+ })
}
// WithRouteTag annotates spans and metrics with the provided route name
// with HTTP route attribute.
func WithRouteTag(route string, h http.Handler) http.Handler {
- attr := semconv.NewHTTPServer().Route(route)
+ attr := semconv.NewHTTPServer(nil).Route(route)
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
span := trace.SpanFromContext(r.Context())
span.SetAttributes(attr)
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go
new file mode 100644
index 0000000000000..a945f55661658
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go
@@ -0,0 +1,75 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package request // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request"
+
+import (
+ "io"
+ "sync"
+)
+
+var _ io.ReadCloser = &BodyWrapper{}
+
+// BodyWrapper wraps a http.Request.Body (an io.ReadCloser) to track the number
+// of bytes read and the last error.
+type BodyWrapper struct {
+ io.ReadCloser
+ OnRead func(n int64) // must not be nil
+
+ mu sync.Mutex
+ read int64
+ err error
+}
+
+// NewBodyWrapper creates a new BodyWrapper.
+//
+// The onRead attribute is a callback that will be called every time the data
+// is read, with the number of bytes being read.
+func NewBodyWrapper(body io.ReadCloser, onRead func(int64)) *BodyWrapper {
+ return &BodyWrapper{
+ ReadCloser: body,
+ OnRead: onRead,
+ }
+}
+
+// Read reads the data from the io.ReadCloser, and stores the number of bytes
+// read and the error.
+func (w *BodyWrapper) Read(b []byte) (int, error) {
+ n, err := w.ReadCloser.Read(b)
+ n1 := int64(n)
+
+ w.updateReadData(n1, err)
+ w.OnRead(n1)
+ return n, err
+}
+
+func (w *BodyWrapper) updateReadData(n int64, err error) {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ w.read += n
+ if err != nil {
+ w.err = err
+ }
+}
+
+// Closes closes the io.ReadCloser.
+func (w *BodyWrapper) Close() error {
+ return w.ReadCloser.Close()
+}
+
+// BytesRead returns the number of bytes read up to this point.
+func (w *BodyWrapper) BytesRead() int64 {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ return w.read
+}
+
+// Error returns the last error.
+func (w *BodyWrapper) Error() error {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ return w.err
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go
new file mode 100644
index 0000000000000..aea171fb260b5
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go
@@ -0,0 +1,112 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package request // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request"
+
+import (
+ "net/http"
+ "sync"
+)
+
+var _ http.ResponseWriter = &RespWriterWrapper{}
+
+// RespWriterWrapper wraps a http.ResponseWriter in order to track the number of
+// bytes written, the last error, and to catch the first written statusCode.
+// TODO: The wrapped http.ResponseWriter doesn't implement any of the optional
+// types (http.Hijacker, http.Pusher, http.CloseNotifier, etc)
+// that may be useful when using it in real life situations.
+type RespWriterWrapper struct {
+ http.ResponseWriter
+ OnWrite func(n int64) // must not be nil
+
+ mu sync.RWMutex
+ written int64
+ statusCode int
+ err error
+ wroteHeader bool
+}
+
+// NewRespWriterWrapper creates a new RespWriterWrapper.
+//
+// The onWrite attribute is a callback that will be called every time the data
+// is written, with the number of bytes that were written.
+func NewRespWriterWrapper(w http.ResponseWriter, onWrite func(int64)) *RespWriterWrapper {
+ return &RespWriterWrapper{
+ ResponseWriter: w,
+ OnWrite: onWrite,
+ statusCode: http.StatusOK, // default status code in case the Handler doesn't write anything
+ }
+}
+
+// Write writes the bytes array into the [ResponseWriter], and tracks the
+// number of bytes written and last error.
+func (w *RespWriterWrapper) Write(p []byte) (int, error) {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ w.writeHeader(http.StatusOK)
+
+ n, err := w.ResponseWriter.Write(p)
+ n1 := int64(n)
+ w.OnWrite(n1)
+ w.written += n1
+ w.err = err
+ return n, err
+}
+
+// WriteHeader persists initial statusCode for span attribution.
+// All calls to WriteHeader will be propagated to the underlying ResponseWriter
+// and will persist the statusCode from the first call.
+// Blocking consecutive calls to WriteHeader alters expected behavior and will
+// remove warning logs from net/http where developers will notice incorrect handler implementations.
+func (w *RespWriterWrapper) WriteHeader(statusCode int) {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ w.writeHeader(statusCode)
+}
+
+// writeHeader persists the status code for span attribution, and propagates
+// the call to the underlying ResponseWriter.
+// It does not acquire a lock, and therefore assumes that is being handled by a
+// parent method.
+func (w *RespWriterWrapper) writeHeader(statusCode int) {
+ if !w.wroteHeader {
+ w.wroteHeader = true
+ w.statusCode = statusCode
+ }
+ w.ResponseWriter.WriteHeader(statusCode)
+}
+
+// Flush implements [http.Flusher].
+func (w *RespWriterWrapper) Flush() {
+ w.WriteHeader(http.StatusOK)
+
+ if f, ok := w.ResponseWriter.(http.Flusher); ok {
+ f.Flush()
+ }
+}
+
+// BytesWritten returns the number of bytes written.
+func (w *RespWriterWrapper) BytesWritten() int64 {
+ w.mu.RLock()
+ defer w.mu.RUnlock()
+
+ return w.written
+}
+
+// BytesWritten returns the HTTP status code that was sent.
+func (w *RespWriterWrapper) StatusCode() int {
+ w.mu.RLock()
+ defer w.mu.RUnlock()
+
+ return w.statusCode
+}
+
+// Error returns the last error.
+func (w *RespWriterWrapper) Error() error {
+ w.mu.RLock()
+ defer w.mu.RUnlock()
+
+ return w.err
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go
index 3ec0ad00c81f8..9cae4cab86af1 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go
@@ -4,6 +4,7 @@
package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
import (
+ "context"
"fmt"
"net/http"
"os"
@@ -11,6 +12,7 @@ import (
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
+ "go.opentelemetry.io/otel/metric"
)
type ResponseTelemetry struct {
@@ -23,6 +25,11 @@ type ResponseTelemetry struct {
type HTTPServer struct {
duplicate bool
+
+ // Old metrics
+ requestBytesCounter metric.Int64Counter
+ responseBytesCounter metric.Int64Counter
+ serverLatencyMeasure metric.Float64Histogram
}
// RequestTraceAttrs returns trace attributes for an HTTP request received by a
@@ -63,15 +70,10 @@ func (s HTTPServer) Route(route string) attribute.KeyValue {
return oldHTTPServer{}.Route(route)
}
-func NewHTTPServer() HTTPServer {
- env := strings.ToLower(os.Getenv("OTEL_HTTP_CLIENT_COMPATIBILITY_MODE"))
- return HTTPServer{duplicate: env == "http/dup"}
-}
-
-// ServerStatus returns a span status code and message for an HTTP status code
+// Status returns a span status code and message for an HTTP status code
// value returned by a server. Status codes in the 400-499 range are not
// returned as errors.
-func ServerStatus(code int) (codes.Code, string) {
+func (s HTTPServer) Status(code int) (codes.Code, string) {
if code < 100 || code >= 600 {
return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code)
}
@@ -80,3 +82,84 @@ func ServerStatus(code int) (codes.Code, string) {
}
return codes.Unset, ""
}
+
+type MetricData struct {
+ ServerName string
+ Req *http.Request
+ StatusCode int
+ AdditionalAttributes []attribute.KeyValue
+
+ RequestSize int64
+ ResponseSize int64
+ ElapsedTime float64
+}
+
+func (s HTTPServer) RecordMetrics(ctx context.Context, md MetricData) {
+ if s.requestBytesCounter == nil || s.responseBytesCounter == nil || s.serverLatencyMeasure == nil {
+ // This will happen if an HTTPServer{} is used insted of NewHTTPServer.
+ return
+ }
+
+ attributes := oldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes)
+ o := metric.WithAttributeSet(attribute.NewSet(attributes...))
+ addOpts := []metric.AddOption{o} // Allocate vararg slice once.
+ s.requestBytesCounter.Add(ctx, md.RequestSize, addOpts...)
+ s.responseBytesCounter.Add(ctx, md.ResponseSize, addOpts...)
+ s.serverLatencyMeasure.Record(ctx, md.ElapsedTime, o)
+
+ // TODO: Duplicate Metrics
+}
+
+func NewHTTPServer(meter metric.Meter) HTTPServer {
+ env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN"))
+ duplicate := env == "http/dup"
+ server := HTTPServer{
+ duplicate: duplicate,
+ }
+ server.requestBytesCounter, server.responseBytesCounter, server.serverLatencyMeasure = oldHTTPServer{}.createMeasures(meter)
+ return server
+}
+
+type HTTPClient struct {
+ duplicate bool
+}
+
+func NewHTTPClient() HTTPClient {
+ env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN"))
+ return HTTPClient{duplicate: env == "http/dup"}
+}
+
+// RequestTraceAttrs returns attributes for an HTTP request made by a client.
+func (c HTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue {
+ if c.duplicate {
+ return append(oldHTTPClient{}.RequestTraceAttrs(req), newHTTPClient{}.RequestTraceAttrs(req)...)
+ }
+ return oldHTTPClient{}.RequestTraceAttrs(req)
+}
+
+// ResponseTraceAttrs returns metric attributes for an HTTP request made by a client.
+func (c HTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue {
+ if c.duplicate {
+ return append(oldHTTPClient{}.ResponseTraceAttrs(resp), newHTTPClient{}.ResponseTraceAttrs(resp)...)
+ }
+
+ return oldHTTPClient{}.ResponseTraceAttrs(resp)
+}
+
+func (c HTTPClient) Status(code int) (codes.Code, string) {
+ if code < 100 || code >= 600 {
+ return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code)
+ }
+ if code >= 400 {
+ return codes.Error, ""
+ }
+ return codes.Unset, ""
+}
+
+func (c HTTPClient) ErrorType(err error) attribute.KeyValue {
+ if c.duplicate {
+ return newHTTPClient{}.ErrorType(err)
+ }
+
+ return attribute.KeyValue{}
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.24.0.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go
similarity index 57%
rename from vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.24.0.go
rename to vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go
index 0c5d4c4608a28..745b8c67bc40c 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.24.0.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go
@@ -4,11 +4,14 @@
package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
import (
+ "fmt"
"net/http"
+ "reflect"
+ "strconv"
"strings"
"go.opentelemetry.io/otel/attribute"
- semconvNew "go.opentelemetry.io/otel/semconv/v1.24.0"
+ semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0"
)
type newHTTPServer struct{}
@@ -195,3 +198,151 @@ func (n newHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.Ke
func (n newHTTPServer) Route(route string) attribute.KeyValue {
return semconvNew.HTTPRoute(route)
}
+
+type newHTTPClient struct{}
+
+// RequestTraceAttrs returns trace attributes for an HTTP request made by a client.
+func (n newHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue {
+ /*
+ below attributes are returned:
+ - http.request.method
+ - http.request.method.original
+ - url.full
+ - server.address
+ - server.port
+ - network.protocol.name
+ - network.protocol.version
+ */
+ numOfAttributes := 3 // URL, server address, proto, and method.
+
+ var urlHost string
+ if req.URL != nil {
+ urlHost = req.URL.Host
+ }
+ var requestHost string
+ var requestPort int
+ for _, hostport := range []string{urlHost, req.Header.Get("Host")} {
+ requestHost, requestPort = splitHostPort(hostport)
+ if requestHost != "" || requestPort > 0 {
+ break
+ }
+ }
+
+ eligiblePort := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort)
+ if eligiblePort > 0 {
+ numOfAttributes++
+ }
+ useragent := req.UserAgent()
+ if useragent != "" {
+ numOfAttributes++
+ }
+
+ protoName, protoVersion := netProtocol(req.Proto)
+ if protoName != "" && protoName != "http" {
+ numOfAttributes++
+ }
+ if protoVersion != "" {
+ numOfAttributes++
+ }
+
+ method, originalMethod := n.method(req.Method)
+ if originalMethod != (attribute.KeyValue{}) {
+ numOfAttributes++
+ }
+
+ attrs := make([]attribute.KeyValue, 0, numOfAttributes)
+
+ attrs = append(attrs, method)
+ if originalMethod != (attribute.KeyValue{}) {
+ attrs = append(attrs, originalMethod)
+ }
+
+ var u string
+ if req.URL != nil {
+ // Remove any username/password info that may be in the URL.
+ userinfo := req.URL.User
+ req.URL.User = nil
+ u = req.URL.String()
+ // Restore any username/password info that was removed.
+ req.URL.User = userinfo
+ }
+ attrs = append(attrs, semconvNew.URLFull(u))
+
+ attrs = append(attrs, semconvNew.ServerAddress(requestHost))
+ if eligiblePort > 0 {
+ attrs = append(attrs, semconvNew.ServerPort(eligiblePort))
+ }
+
+ if protoName != "" && protoName != "http" {
+ attrs = append(attrs, semconvNew.NetworkProtocolName(protoName))
+ }
+ if protoVersion != "" {
+ attrs = append(attrs, semconvNew.NetworkProtocolVersion(protoVersion))
+ }
+
+ return attrs
+}
+
+// ResponseTraceAttrs returns trace attributes for an HTTP response made by a client.
+func (n newHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue {
+ /*
+ below attributes are returned:
+ - http.response.status_code
+ - error.type
+ */
+ var count int
+ if resp.StatusCode > 0 {
+ count++
+ }
+
+ if isErrorStatusCode(resp.StatusCode) {
+ count++
+ }
+
+ attrs := make([]attribute.KeyValue, 0, count)
+ if resp.StatusCode > 0 {
+ attrs = append(attrs, semconvNew.HTTPResponseStatusCode(resp.StatusCode))
+ }
+
+ if isErrorStatusCode(resp.StatusCode) {
+ errorType := strconv.Itoa(resp.StatusCode)
+ attrs = append(attrs, semconvNew.ErrorTypeKey.String(errorType))
+ }
+ return attrs
+}
+
+func (n newHTTPClient) ErrorType(err error) attribute.KeyValue {
+ t := reflect.TypeOf(err)
+ var value string
+ if t.PkgPath() == "" && t.Name() == "" {
+ // Likely a builtin type.
+ value = t.String()
+ } else {
+ value = fmt.Sprintf("%s.%s", t.PkgPath(), t.Name())
+ }
+
+ if value == "" {
+ return semconvNew.ErrorTypeOther
+ }
+
+ return semconvNew.ErrorTypeKey.String(value)
+}
+
+func (n newHTTPClient) method(method string) (attribute.KeyValue, attribute.KeyValue) {
+ if method == "" {
+ return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{}
+ }
+ if attr, ok := methodLookup[method]; ok {
+ return attr, attribute.KeyValue{}
+ }
+
+ orig := semconvNew.HTTPRequestMethodOriginal(method)
+ if attr, ok := methodLookup[strings.ToUpper(method)]; ok {
+ return attr, orig
+ }
+ return semconvNew.HTTPRequestMethodGet, orig
+}
+
+func isErrorStatusCode(code int) bool {
+ return code >= 400 || code < 100
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go
index e7f293761bd02..e6e14924f5790 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go
@@ -9,8 +9,9 @@ import (
"strconv"
"strings"
+ "go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
- semconvNew "go.opentelemetry.io/otel/semconv/v1.24.0"
+ semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0"
)
// splitHostPort splits a network address hostport of the form "host",
@@ -49,7 +50,7 @@ func splitHostPort(hostport string) (host string, port int) {
if err != nil {
return
}
- return host, int(p)
+ return host, int(p) // nolint: gosec // Byte size checked 16 above.
}
func requiredHTTPPort(https bool, port int) int { // nolint:revive
@@ -89,3 +90,9 @@ var methodLookup = map[string]attribute.KeyValue{
http.MethodPut: semconvNew.HTTPRequestMethodPut,
http.MethodTrace: semconvNew.HTTPRequestMethodTrace,
}
+
+func handleErr(err error) {
+ if err != nil {
+ otel.Handle(err)
+ }
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go
index c3e838aaa5422..c999b05e675b2 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go
@@ -7,9 +7,13 @@ import (
"errors"
"io"
"net/http"
+ "slices"
+ "strings"
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
"go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/metric/noop"
semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
)
@@ -72,3 +76,117 @@ func (o oldHTTPServer) Route(route string) attribute.KeyValue {
func HTTPStatusCode(status int) attribute.KeyValue {
return semconv.HTTPStatusCode(status)
}
+
+// Server HTTP metrics.
+const (
+ serverRequestSize = "http.server.request.size" // Incoming request bytes total
+ serverResponseSize = "http.server.response.size" // Incoming response bytes total
+ serverDuration = "http.server.duration" // Incoming end to end duration, milliseconds
+)
+
+func (h oldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) {
+ if meter == nil {
+ return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{}
+ }
+ var err error
+ requestBytesCounter, err := meter.Int64Counter(
+ serverRequestSize,
+ metric.WithUnit("By"),
+ metric.WithDescription("Measures the size of HTTP request messages."),
+ )
+ handleErr(err)
+
+ responseBytesCounter, err := meter.Int64Counter(
+ serverResponseSize,
+ metric.WithUnit("By"),
+ metric.WithDescription("Measures the size of HTTP response messages."),
+ )
+ handleErr(err)
+
+ serverLatencyMeasure, err := meter.Float64Histogram(
+ serverDuration,
+ metric.WithUnit("ms"),
+ metric.WithDescription("Measures the duration of inbound HTTP requests."),
+ )
+ handleErr(err)
+
+ return requestBytesCounter, responseBytesCounter, serverLatencyMeasure
+}
+
+func (o oldHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue {
+ n := len(additionalAttributes) + 3
+ var host string
+ var p int
+ if server == "" {
+ host, p = splitHostPort(req.Host)
+ } else {
+ // Prioritize the primary server name.
+ host, p = splitHostPort(server)
+ if p < 0 {
+ _, p = splitHostPort(req.Host)
+ }
+ }
+ hostPort := requiredHTTPPort(req.TLS != nil, p)
+ if hostPort > 0 {
+ n++
+ }
+ protoName, protoVersion := netProtocol(req.Proto)
+ if protoName != "" {
+ n++
+ }
+ if protoVersion != "" {
+ n++
+ }
+
+ if statusCode > 0 {
+ n++
+ }
+
+ attributes := slices.Grow(additionalAttributes, n)
+ attributes = append(attributes,
+ o.methodMetric(req.Method),
+ o.scheme(req.TLS != nil),
+ semconv.NetHostName(host))
+
+ if hostPort > 0 {
+ attributes = append(attributes, semconv.NetHostPort(hostPort))
+ }
+ if protoName != "" {
+ attributes = append(attributes, semconv.NetProtocolName(protoName))
+ }
+ if protoVersion != "" {
+ attributes = append(attributes, semconv.NetProtocolVersion(protoVersion))
+ }
+
+ if statusCode > 0 {
+ attributes = append(attributes, semconv.HTTPStatusCode(statusCode))
+ }
+ return attributes
+}
+
+func (o oldHTTPServer) methodMetric(method string) attribute.KeyValue {
+ method = strings.ToUpper(method)
+ switch method {
+ case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace:
+ default:
+ method = "_OTHER"
+ }
+ return semconv.HTTPMethod(method)
+}
+
+func (o oldHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive
+ if https {
+ return semconv.HTTPSchemeHTTPS
+ }
+ return semconv.HTTPSchemeHTTP
+}
+
+type oldHTTPClient struct{}
+
+func (o oldHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue {
+ return semconvutil.HTTPClientRequest(req)
+}
+
+func (o oldHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue {
+ return semconvutil.HTTPClientResponse(resp)
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go
index a9a9226b39afa..b80a1db61fa0f 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go
@@ -195,7 +195,7 @@ func splitHostPort(hostport string) (host string, port int) {
if err != nil {
return
}
- return host, int(p)
+ return host, int(p) // nolint: gosec // Bitsize checked to be 16 above.
}
func netProtocol(proto string) (name string, version string) {
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go
index 0d3cb2e4aa4fc..b4119d3438b7d 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go
@@ -11,13 +11,15 @@ import (
"sync/atomic"
"time"
+ "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request"
+ "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/propagation"
- semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
+
"go.opentelemetry.io/otel/trace"
)
@@ -26,14 +28,16 @@ import (
type Transport struct {
rt http.RoundTripper
- tracer trace.Tracer
- meter metric.Meter
- propagators propagation.TextMapPropagator
- spanStartOptions []trace.SpanStartOption
- filters []Filter
- spanNameFormatter func(string, *http.Request) string
- clientTrace func(context.Context) *httptrace.ClientTrace
+ tracer trace.Tracer
+ meter metric.Meter
+ propagators propagation.TextMapPropagator
+ spanStartOptions []trace.SpanStartOption
+ filters []Filter
+ spanNameFormatter func(string, *http.Request) string
+ clientTrace func(context.Context) *httptrace.ClientTrace
+ metricAttributesFn func(*http.Request) []attribute.KeyValue
+ semconv semconv.HTTPClient
requestBytesCounter metric.Int64Counter
responseBytesCounter metric.Int64Counter
latencyMeasure metric.Float64Histogram
@@ -53,7 +57,8 @@ func NewTransport(base http.RoundTripper, opts ...Option) *Transport {
}
t := Transport{
- rt: base,
+ rt: base,
+ semconv: semconv.NewHTTPClient(),
}
defaultOpts := []Option{
@@ -76,6 +81,7 @@ func (t *Transport) applyConfig(c *config) {
t.filters = c.Filters
t.spanNameFormatter = c.SpanNameFormatter
t.clientTrace = c.ClientTrace
+ t.metricAttributesFn = c.MetricAttributesFn
}
func (t *Transport) createMeasures() {
@@ -143,45 +149,49 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) {
r = r.Clone(ctx) // According to RoundTripper spec, we shouldn't modify the origin request.
- // use a body wrapper to determine the request size
- var bw bodyWrapper
// if request body is nil or NoBody, we don't want to mutate the body as it
// will affect the identity of it in an unforeseeable way because we assert
// ReadCloser fulfills a certain interface and it is indeed nil or NoBody.
+ bw := request.NewBodyWrapper(r.Body, func(int64) {})
if r.Body != nil && r.Body != http.NoBody {
- bw.ReadCloser = r.Body
- // noop to prevent nil panic. not using this record fun yet.
- bw.record = func(int64) {}
- r.Body = &bw
+ r.Body = bw
}
- span.SetAttributes(semconvutil.HTTPClientRequest(r)...)
+ span.SetAttributes(t.semconv.RequestTraceAttrs(r)...)
t.propagators.Inject(ctx, propagation.HeaderCarrier(r.Header))
res, err := t.rt.RoundTrip(r)
if err != nil {
- span.RecordError(err)
+ // set error type attribute if the error is part of the predefined
+ // error types.
+ // otherwise, record it as an exception
+ if errType := t.semconv.ErrorType(err); errType.Valid() {
+ span.SetAttributes(errType)
+ } else {
+ span.RecordError(err)
+ }
+
span.SetStatus(codes.Error, err.Error())
span.End()
return res, err
}
// metrics
- metricAttrs := append(labeler.Get(), semconvutil.HTTPClientRequestMetrics(r)...)
+ metricAttrs := append(append(labeler.Get(), semconvutil.HTTPClientRequestMetrics(r)...), t.metricAttributesFromRequest(r)...)
if res.StatusCode > 0 {
metricAttrs = append(metricAttrs, semconv.HTTPStatusCode(res.StatusCode))
}
o := metric.WithAttributeSet(attribute.NewSet(metricAttrs...))
- addOpts := []metric.AddOption{o} // Allocate vararg slice once.
- t.requestBytesCounter.Add(ctx, bw.read.Load(), addOpts...)
+
+ t.requestBytesCounter.Add(ctx, bw.BytesRead(), o)
// For handling response bytes we leverage a callback when the client reads the http response
readRecordFunc := func(n int64) {
- t.responseBytesCounter.Add(ctx, n, addOpts...)
+ t.responseBytesCounter.Add(ctx, n, o)
}
// traces
- span.SetAttributes(semconvutil.HTTPClientResponse(res)...)
- span.SetStatus(semconvutil.HTTPClientStatus(res.StatusCode))
+ span.SetAttributes(t.semconv.ResponseTraceAttrs(res)...)
+ span.SetStatus(t.semconv.Status(res.StatusCode))
res.Body = newWrappedBody(span, readRecordFunc, res.Body)
@@ -193,6 +203,14 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) {
return res, err
}
+func (t *Transport) metricAttributesFromRequest(r *http.Request) []attribute.KeyValue {
+ var attributeForRequest []attribute.KeyValue
+ if t.metricAttributesFn != nil {
+ attributeForRequest = t.metricAttributesFn(r)
+ }
+ return attributeForRequest
+}
+
// newWrappedBody returns a new and appropriately scoped *wrappedBody as an
// io.ReadCloser. If the passed body implements io.Writer, the returned value
// will implement io.ReadWriteCloser.
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go
index b0957f28cead6..502c1bdafc791 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go
@@ -5,7 +5,7 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http
// Version is the current release version of the otelhttp instrumentation.
func Version() string {
- return "0.53.0"
+ return "0.54.0"
// This string is updated by the pre_release.sh script during release
}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go
deleted file mode 100644
index 948f8406c09d2..0000000000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
-
-import (
- "context"
- "io"
- "net/http"
- "sync/atomic"
-
- "go.opentelemetry.io/otel/propagation"
-)
-
-var _ io.ReadCloser = &bodyWrapper{}
-
-// bodyWrapper wraps a http.Request.Body (an io.ReadCloser) to track the number
-// of bytes read and the last error.
-type bodyWrapper struct {
- io.ReadCloser
- record func(n int64) // must not be nil
-
- read atomic.Int64
- err error
-}
-
-func (w *bodyWrapper) Read(b []byte) (int, error) {
- n, err := w.ReadCloser.Read(b)
- n1 := int64(n)
- w.read.Add(n1)
- w.err = err
- w.record(n1)
- return n, err
-}
-
-func (w *bodyWrapper) Close() error {
- return w.ReadCloser.Close()
-}
-
-var _ http.ResponseWriter = &respWriterWrapper{}
-
-// respWriterWrapper wraps a http.ResponseWriter in order to track the number of
-// bytes written, the last error, and to catch the first written statusCode.
-// TODO: The wrapped http.ResponseWriter doesn't implement any of the optional
-// types (http.Hijacker, http.Pusher, http.CloseNotifier, http.Flusher, etc)
-// that may be useful when using it in real life situations.
-type respWriterWrapper struct {
- http.ResponseWriter
- record func(n int64) // must not be nil
-
- // used to inject the header
- ctx context.Context
-
- props propagation.TextMapPropagator
-
- written int64
- statusCode int
- err error
- wroteHeader bool
-}
-
-func (w *respWriterWrapper) Header() http.Header {
- return w.ResponseWriter.Header()
-}
-
-func (w *respWriterWrapper) Write(p []byte) (int, error) {
- if !w.wroteHeader {
- w.WriteHeader(http.StatusOK)
- }
- n, err := w.ResponseWriter.Write(p)
- n1 := int64(n)
- w.record(n1)
- w.written += n1
- w.err = err
- return n, err
-}
-
-// WriteHeader persists initial statusCode for span attribution.
-// All calls to WriteHeader will be propagated to the underlying ResponseWriter
-// and will persist the statusCode from the first call.
-// Blocking consecutive calls to WriteHeader alters expected behavior and will
-// remove warning logs from net/http where developers will notice incorrect handler implementations.
-func (w *respWriterWrapper) WriteHeader(statusCode int) {
- if !w.wroteHeader {
- w.wroteHeader = true
- w.statusCode = statusCode
- }
- w.ResponseWriter.WriteHeader(statusCode)
-}
-
-func (w *respWriterWrapper) Flush() {
- if !w.wroteHeader {
- w.WriteHeader(http.StatusOK)
- }
-
- if f, ok := w.ResponseWriter.(http.Flusher); ok {
- f.Flush()
- }
-}
diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml
index 6d9c8b64958b3..d9abe194d94b9 100644
--- a/vendor/go.opentelemetry.io/otel/.golangci.yml
+++ b/vendor/go.opentelemetry.io/otel/.golangci.yml
@@ -9,6 +9,8 @@ linters:
disable-all: true
# Specifically enable linters we want to use.
enable:
+ - asasalint
+ - bodyclose
- depguard
- errcheck
- errorlint
diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md
index c01e6998e0b3c..6107c17b89fc9 100644
--- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md
+++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md
@@ -8,6 +8,64 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
## [Unreleased]
+
+
+
+## [1.29.0/0.51.0/0.5.0] 2024-08-23
+
+This release is the last to support [Go 1.21].
+The next release will require at least [Go 1.22].
+
+### Added
+
+- Add MacOS ARM64 platform to the compatibility testing suite. (#5577)
+- Add `InstrumentationScope` field to `SpanStub` in `go.opentelemetry.io/otel/sdk/trace/tracetest`, as a replacement for the deprecated `InstrumentationLibrary`. (#5627)
+- Make the initial release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`.
+ This new module contains an OTLP exporter that transmits log telemetry using gRPC.
+ This module is unstable and breaking changes may be introduced.
+ See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5629)
+- Add `Walk` function to `TraceState` in `go.opentelemetry.io/otel/trace` to iterate all the key-value pairs. (#5651)
+- Bridge the trace state in `go.opentelemetry.io/otel/bridge/opencensus`. (#5651)
+- Zero value of `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` no longer panics. (#5665)
+- The `FilterProcessor` interface type is added in `go.opentelemetry.io/otel/sdk/log/internal/x`.
+ This is an optional and experimental interface that log `Processor`s can implement to instruct the `Logger` if a `Record` will be processed or not.
+ It replaces the existing `Enabled` method that is removed from the `Processor` interface itself.
+ It does not fall within the scope of the OpenTelemetry Go versioning and stability [policy](./VERSIONING.md) and it may be changed in backwards incompatible ways or removed in feature releases. (#5692)
+- Support [Go 1.23]. (#5720)
+
+### Changed
+
+- `NewMemberRaw`, `NewKeyProperty` and `NewKeyValuePropertyRaw` in `go.opentelemetry.io/otel/baggage` allow UTF-8 string in key. (#5132)
+- `Processor.OnEmit` in `go.opentelemetry.io/otel/sdk/log` now accepts a pointer to `Record` instead of a value so that the record modifications done in a processor are propagated to subsequent registered processors. (#5636)
+- `SimpleProcessor.Enabled` in `go.opentelemetry.io/otel/sdk/log` now returns `false` if the exporter is `nil`. (#5665)
+- Update the concurrency requirements of `Exporter` in `go.opentelemetry.io/otel/sdk/log`. (#5666)
+- `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` synchronizes `OnEmit` calls. (#5666)
+- The `Processor` interface in `go.opentelemetry.io/otel/sdk/log` no longer includes the `Enabled` method.
+ See the `FilterProcessor` interface type added in `go.opentelemetry.io/otel/sdk/log/internal/x` to continue providing this functionality. (#5692)
+- The `SimpleProcessor` type in `go.opentelemetry.io/otel/sdk/log` is no longer comparable. (#5693)
+- The `BatchProcessor` type in `go.opentelemetry.io/otel/sdk/log` is no longer comparable. (#5693)
+
+### Fixed
+
+- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5584)
+- Pass the underlying error rather than a generic retry-able failure in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`, `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5541)
+- Correct the `Tracer`, `Meter`, and `Logger` names used in `go.opentelemetry.io/otel/example/dice`. (#5612)
+- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/namedtracer`. (#5612)
+- Correct the `Tracer` name used in `go.opentelemetry.io/otel/example/opencensus`. (#5612)
+- Correct the `Tracer` and `Meter` names used in `go.opentelemetry.io/otel/example/otel-collector`. (#5612)
+- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/passthrough`. (#5612)
+- Correct the `Meter` name used in `go.opentelemetry.io/otel/example/prometheus`. (#5612)
+- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/zipkin`. (#5612)
+- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5641)
+- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5650)
+- Stop percent encoding header environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` (#5705)
+- Remove invalid environment variable header keys in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` (#5705)
+
+### Removed
+
+- The `Enabled` method of the `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` is removed. (#5692)
+- The `Enabled` method of the `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` is removed. (#5692)
+
## [1.28.0/0.50.0/0.4.0] 2024-07-02
### Added
@@ -49,6 +107,7 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
- Fix stale timestamps reported by the last-value aggregation. (#5517)
- Indicate the `Exporter` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` must be created by the `New` method. (#5521)
- Improved performance in all `{Bool,Int64,Float64,String}SliceValue` functions of `go.opentelemetry.io/attributes` by reducing the number of allocations. (#5549)
+- Replace invalid percent-encoded octet sequences with replacement char in `go.opentelemetry.io/otel/baggage`. (#5528)
## [1.27.0/0.49.0/0.3.0] 2024-05-21
@@ -175,7 +234,7 @@ The next release will require at least [Go 1.21].
This module includes OpenTelemetry Go's implementation of the Logs Bridge API.
This module is in an alpha state, it is subject to breaking changes.
See our [versioning policy](./VERSIONING.md) for more info. (#4961)
-- ARM64 platform to the compatibility testing suite. (#4994)
+- Add ARM64 platform to the compatibility testing suite. (#4994)
### Fixed
@@ -3003,7 +3062,8 @@ It contains api and sdk for trace and meter.
- CircleCI build CI manifest files.
- CODEOWNERS file to track owners of this project.
-[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.28.0...HEAD
+[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.29.0...HEAD
+[1.29.0/0.51.0/0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.29.0
[1.28.0/0.50.0/0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.28.0
[1.27.0/0.49.0/0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.27.0
[1.26.0/0.48.0/0.2.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.26.0
@@ -3086,6 +3146,9 @@ It contains api and sdk for trace and meter.
[0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1
[0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0
+
+
+[Go 1.23]: https://go.dev/doc/go1.23
[Go 1.22]: https://go.dev/doc/go1.22
[Go 1.21]: https://go.dev/doc/go1.21
[Go 1.20]: https://go.dev/doc/go1.20
diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS
index 2025549332305..5904bb7070eb7 100644
--- a/vendor/go.opentelemetry.io/otel/CODEOWNERS
+++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS
@@ -5,7 +5,7 @@
#####################################################
#
# Learn about membership in OpenTelemetry community:
-# https://github.com/open-telemetry/community/blob/main/community-membership.md
+# https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md
#
#
# Learn about CODEOWNERS file format:
diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
index b86572f58ea5a..b7402576f9832 100644
--- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
+++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
@@ -650,7 +650,7 @@ should be canceled.
### Become an Approver or a Maintainer
See the [community membership document in OpenTelemetry community
-repo](https://github.com/open-telemetry/community/blob/main/community-membership.md).
+repo](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md).
[Approver]: #approvers
[Maintainer]: #maintainers
diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile
index f33619f76a2b9..070b1e57df10a 100644
--- a/vendor/go.opentelemetry.io/otel/Makefile
+++ b/vendor/go.opentelemetry.io/otel/Makefile
@@ -178,17 +178,14 @@ test-coverage: $(GOCOVMERGE)
done; \
$(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt
-# Adding a directory will include all benchmarks in that directory if a filter is not specified.
-BENCHMARK_TARGETS := sdk/trace
.PHONY: benchmark
-benchmark: $(BENCHMARK_TARGETS:%=benchmark/%)
-BENCHMARK_FILTER = .
-# You can override the filter for a particular directory by adding a rule here.
-benchmark/sdk/trace: BENCHMARK_FILTER = SpanWithAttributes_8/AlwaysSample
+benchmark: $(OTEL_GO_MOD_DIRS:%=benchmark/%)
benchmark/%:
- @echo "$(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(BENCHMARK_FILTER) $*..." \
+ @echo "$(GO) test -run=xxxxxMatchNothingxxxxx -bench=. $*..." \
&& cd $* \
- $(foreach filter, $(BENCHMARK_FILTER), && $(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(filter))
+ && $(GO) list ./... \
+ | grep -v third_party \
+ | xargs $(GO) test -run=xxxxxMatchNothingxxxxx -bench=.
.PHONY: golangci-lint golangci-lint-fix
golangci-lint-fix: ARGS=--fix
diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md
index 5a8909317312c..657df34710364 100644
--- a/vendor/go.opentelemetry.io/otel/README.md
+++ b/vendor/go.opentelemetry.io/otel/README.md
@@ -47,20 +47,29 @@ stop ensuring compatibility with these versions in the following manner:
Currently, this project supports the following environments.
-| OS | Go Version | Architecture |
-|---------|------------|--------------|
-| Ubuntu | 1.22 | amd64 |
-| Ubuntu | 1.21 | amd64 |
-| Ubuntu | 1.22 | 386 |
-| Ubuntu | 1.21 | 386 |
-| Linux | 1.22 | arm64 |
-| Linux | 1.21 | arm64 |
-| MacOS | 1.22 | amd64 |
-| MacOS | 1.21 | amd64 |
-| Windows | 1.22 | amd64 |
-| Windows | 1.21 | amd64 |
-| Windows | 1.22 | 386 |
-| Windows | 1.21 | 386 |
+| OS | Go Version | Architecture |
+|----------|------------|--------------|
+| Ubuntu | 1.23 | amd64 |
+| Ubuntu | 1.22 | amd64 |
+| Ubuntu | 1.21 | amd64 |
+| Ubuntu | 1.23 | 386 |
+| Ubuntu | 1.22 | 386 |
+| Ubuntu | 1.21 | 386 |
+| Linux | 1.23 | arm64 |
+| Linux | 1.22 | arm64 |
+| Linux | 1.21 | arm64 |
+| macOS 13 | 1.23 | amd64 |
+| macOS 13 | 1.22 | amd64 |
+| macOS 13 | 1.21 | amd64 |
+| macOS | 1.23 | arm64 |
+| macOS | 1.22 | arm64 |
+| macOS | 1.21 | arm64 |
+| Windows | 1.23 | amd64 |
+| Windows | 1.22 | amd64 |
+| Windows | 1.21 | amd64 |
+| Windows | 1.23 | 386 |
+| Windows | 1.22 | 386 |
+| Windows | 1.21 | 386 |
While this project should work for other systems, no compatibility guarantees
are made for those systems currently.
diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md
index 940f57f3d87d1..59992984d427d 100644
--- a/vendor/go.opentelemetry.io/otel/RELEASING.md
+++ b/vendor/go.opentelemetry.io/otel/RELEASING.md
@@ -69,6 +69,7 @@ Update go.mod for submodules to depend on the new release which will happen in t
```
- Move all the `Unreleased` changes into a new section following the title scheme (`[] - `).
+ - Make sure the new section is under the comment for released section, like ``, so it is protected from being overwritten in the future.
- Update all the appropriate links at the bottom.
4. Push the changes to upstream and create a Pull Request on GitHub.
diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go
index c40c896cc6673..b3569e95e5cad 100644
--- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go
+++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go
@@ -44,9 +44,15 @@ type Property struct {
// NewKeyProperty returns a new Property for key.
//
+// The passed key must be valid, non-empty UTF-8 string.
// If key is invalid, an error will be returned.
+// However, the specific Propagators that are used to transmit baggage entries across
+// component boundaries may impose their own restrictions on Property key.
+// For example, the W3C Baggage specification restricts the Property keys to strings that
+// satisfy the token definition from RFC7230, Section 3.2.6.
+// For maximum compatibility, alpha-numeric value are strongly recommended to be used as Property key.
func NewKeyProperty(key string) (Property, error) {
- if !validateKey(key) {
+ if !validateBaggageName(key) {
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
}
@@ -62,6 +68,10 @@ func NewKeyProperty(key string) (Property, error) {
// Notice: Consider using [NewKeyValuePropertyRaw] instead
// that does not require percent-encoding of the value.
func NewKeyValueProperty(key, value string) (Property, error) {
+ if !validateKey(key) {
+ return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
+ }
+
if !validateValue(value) {
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value)
}
@@ -74,11 +84,20 @@ func NewKeyValueProperty(key, value string) (Property, error) {
// NewKeyValuePropertyRaw returns a new Property for key with value.
//
-// The passed key must be compliant with W3C Baggage specification.
+// The passed key must be valid, non-empty UTF-8 string.
+// The passed value must be valid UTF-8 string.
+// However, the specific Propagators that are used to transmit baggage entries across
+// component boundaries may impose their own restrictions on Property key.
+// For example, the W3C Baggage specification restricts the Property keys to strings that
+// satisfy the token definition from RFC7230, Section 3.2.6.
+// For maximum compatibility, alpha-numeric value are strongly recommended to be used as Property key.
func NewKeyValuePropertyRaw(key, value string) (Property, error) {
- if !validateKey(key) {
+ if !validateBaggageName(key) {
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
}
+ if !validateBaggageValue(value) {
+ return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value)
+ }
p := Property{
key: key,
@@ -115,12 +134,15 @@ func (p Property) validate() error {
return fmt.Errorf("invalid property: %w", err)
}
- if !validateKey(p.key) {
+ if !validateBaggageName(p.key) {
return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key))
}
if !p.hasValue && p.value != "" {
return errFunc(errors.New("inconsistent value"))
}
+ if p.hasValue && !validateBaggageValue(p.value) {
+ return errFunc(fmt.Errorf("%w: %q", errInvalidValue, p.value))
+ }
return nil
}
@@ -138,7 +160,15 @@ func (p Property) Value() (string, bool) {
// String encodes Property into a header string compliant with the W3C Baggage
// specification.
+// It would return empty string if the key is invalid with the W3C Baggage
+// specification. This could happen for a UTF-8 key, as it may contain
+// invalid characters.
func (p Property) String() string {
+ // W3C Baggage specification does not allow percent-encoded keys.
+ if !validateKey(p.key) {
+ return ""
+ }
+
if p.hasValue {
return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, valueEscape(p.value))
}
@@ -203,9 +233,14 @@ func (p properties) validate() error {
// String encodes properties into a header string compliant with the W3C Baggage
// specification.
func (p properties) String() string {
- props := make([]string, len(p))
- for i, prop := range p {
- props[i] = prop.String()
+ props := make([]string, 0, len(p))
+ for _, prop := range p {
+ s := prop.String()
+
+ // Ignored empty properties.
+ if s != "" {
+ props = append(props, s)
+ }
}
return strings.Join(props, propertyDelimiter)
}
@@ -230,6 +265,10 @@ type Member struct {
// Notice: Consider using [NewMemberRaw] instead
// that does not require percent-encoding of the value.
func NewMember(key, value string, props ...Property) (Member, error) {
+ if !validateKey(key) {
+ return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key)
+ }
+
if !validateValue(value) {
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value)
}
@@ -242,7 +281,13 @@ func NewMember(key, value string, props ...Property) (Member, error) {
// NewMemberRaw returns a new Member from the passed arguments.
//
-// The passed key must be compliant with W3C Baggage specification.
+// The passed key must be valid, non-empty UTF-8 string.
+// The passed value must be valid UTF-8 string.
+// However, the specific Propagators that are used to transmit baggage entries across
+// component boundaries may impose their own restrictions on baggage key.
+// For example, the W3C Baggage specification restricts the baggage keys to strings that
+// satisfy the token definition from RFC7230, Section 3.2.6.
+// For maximum compatibility, alpha-numeric value are strongly recommended to be used as baggage key.
func NewMemberRaw(key, value string, props ...Property) (Member, error) {
m := Member{
key: key,
@@ -294,19 +339,45 @@ func parseMember(member string) (Member, error) {
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key)
}
- val := strings.TrimSpace(v)
- if !validateValue(val) {
+ rawVal := strings.TrimSpace(v)
+ if !validateValue(rawVal) {
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, v)
}
// Decode a percent-encoded value.
- value, err := url.PathUnescape(val)
+ unescapeVal, err := url.PathUnescape(rawVal)
if err != nil {
return newInvalidMember(), fmt.Errorf("%w: %w", errInvalidValue, err)
}
+
+ value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal)
return Member{key: key, value: value, properties: props, hasData: true}, nil
}
+// replaceInvalidUTF8Sequences replaces invalid UTF-8 sequences with '�'.
+func replaceInvalidUTF8Sequences(cap int, unescapeVal string) string {
+ if utf8.ValidString(unescapeVal) {
+ return unescapeVal
+ }
+ // W3C baggage spec:
+ // https://github.com/w3c/baggage/blob/8c215efbeebd3fa4b1aceb937a747e56444f22f3/baggage/HTTP_HEADER_FORMAT.md?plain=1#L69
+
+ var b strings.Builder
+ b.Grow(cap)
+ for i := 0; i < len(unescapeVal); {
+ r, size := utf8.DecodeRuneInString(unescapeVal[i:])
+ if r == utf8.RuneError && size == 1 {
+ // Invalid UTF-8 sequence found, replace it with '�'
+ _, _ = b.WriteString("�")
+ } else {
+ _, _ = b.WriteRune(r)
+ }
+ i += size
+ }
+
+ return b.String()
+}
+
// validate ensures m conforms to the W3C Baggage specification.
// A key must be an ASCII string, returning an error otherwise.
func (m Member) validate() error {
@@ -314,9 +385,12 @@ func (m Member) validate() error {
return fmt.Errorf("%w: %q", errInvalidMember, m)
}
- if !validateKey(m.key) {
+ if !validateBaggageName(m.key) {
return fmt.Errorf("%w: %q", errInvalidKey, m.key)
}
+ if !validateBaggageValue(m.value) {
+ return fmt.Errorf("%w: %q", errInvalidValue, m.value)
+ }
return m.properties.validate()
}
@@ -331,10 +405,15 @@ func (m Member) Properties() []Property { return m.properties.Copy() }
// String encodes Member into a header string compliant with the W3C Baggage
// specification.
+// It would return empty string if the key is invalid with the W3C Baggage
+// specification. This could happen for a UTF-8 key, as it may contain
+// invalid characters.
func (m Member) String() string {
- // A key is just an ASCII string. A value is restricted to be
- // US-ASCII characters excluding CTLs, whitespace,
- // DQUOTE, comma, semicolon, and backslash.
+ // W3C Baggage specification does not allow percent-encoded keys.
+ if !validateKey(m.key) {
+ return ""
+ }
+
s := m.key + keyValueDelimiter + valueEscape(m.value)
if len(m.properties) > 0 {
s += propertyDelimiter + m.properties.String()
@@ -448,7 +527,7 @@ func (b Baggage) Member(key string) Member {
}
// Members returns all the baggage list-members.
-// The order of the returned list-members does not have significance.
+// The order of the returned list-members is not significant.
//
// The returned members are not validated, as we assume the validation happened
// when they were added to the Baggage.
@@ -469,8 +548,8 @@ func (b Baggage) Members() []Member {
return members
}
-// SetMember returns a copy the Baggage with the member included. If the
-// baggage contains a Member with the same key the existing Member is
+// SetMember returns a copy of the Baggage with the member included. If the
+// baggage contains a Member with the same key, the existing Member is
// replaced.
//
// If member is invalid according to the W3C Baggage specification, an error
@@ -528,14 +607,22 @@ func (b Baggage) Len() int {
// String encodes Baggage into a header string compliant with the W3C Baggage
// specification.
+// It would ignore members where the member key is invalid with the W3C Baggage
+// specification. This could happen for a UTF-8 key, as it may contain
+// invalid characters.
func (b Baggage) String() string {
members := make([]string, 0, len(b.list))
for k, v := range b.list {
- members = append(members, Member{
+ s := Member{
key: k,
value: v.Value,
properties: fromInternalProperties(v.Properties),
- }.String())
+ }.String()
+
+ // Ignored empty members.
+ if s != "" {
+ members = append(members, s)
+ }
}
return strings.Join(members, listDelimiter)
}
@@ -607,10 +694,12 @@ func parsePropertyInternal(s string) (p Property, ok bool) {
}
// Decode a percent-encoded value.
- value, err := url.PathUnescape(s[valueStart:valueEnd])
+ rawVal := s[valueStart:valueEnd]
+ unescapeVal, err := url.PathUnescape(rawVal)
if err != nil {
return
}
+ value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal)
ok = true
p.key = s[keyStart:keyEnd]
@@ -720,6 +809,24 @@ var safeKeyCharset = [utf8.RuneSelf]bool{
'~': true,
}
+// validateBaggageName checks if the string is a valid OpenTelemetry Baggage name.
+// Baggage name is a valid, non-empty UTF-8 string.
+func validateBaggageName(s string) bool {
+ if len(s) == 0 {
+ return false
+ }
+
+ return utf8.ValidString(s)
+}
+
+// validateBaggageValue checks if the string is a valid OpenTelemetry Baggage value.
+// Baggage value is a valid UTF-8 strings.
+// Empty string is also a valid UTF-8 string.
+func validateBaggageValue(s string) bool {
+ return utf8.ValidString(s)
+}
+
+// validateKey checks if the string is a valid W3C Baggage key.
func validateKey(s string) bool {
if len(s) == 0 {
return false
@@ -738,6 +845,7 @@ func validateKeyChar(c int32) bool {
return c >= 0 && c < int32(utf8.RuneSelf) && safeKeyCharset[c]
}
+// validateValue checks if the string is a valid W3C Baggage value.
func validateValue(s string) bool {
for _, c := range s {
if !validateValueChar(c) {
diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go
index df29d96a6daea..2acbac3546651 100644
--- a/vendor/go.opentelemetry.io/otel/codes/codes.go
+++ b/vendor/go.opentelemetry.io/otel/codes/codes.go
@@ -83,7 +83,7 @@ func (c *Code) UnmarshalJSON(b []byte) error {
return fmt.Errorf("invalid code: %q", ci)
}
- *c = Code(ci)
+ *c = Code(ci) // nolint: gosec // Bit size of 32 check above.
return nil
}
return fmt.Errorf("invalid code: %q", string(b))
diff --git a/vendor/go.opentelemetry.io/otel/doc.go b/vendor/go.opentelemetry.io/otel/doc.go
index 441c595014d35..921f85961ad41 100644
--- a/vendor/go.opentelemetry.io/otel/doc.go
+++ b/vendor/go.opentelemetry.io/otel/doc.go
@@ -17,6 +17,8 @@ To read more about tracing, see go.opentelemetry.io/otel/trace.
To read more about metrics, see go.opentelemetry.io/otel/metric.
+To read more about logs, see go.opentelemetry.io/otel/log.
+
To read more about propagation, see go.opentelemetry.io/otel/propagation and
go.opentelemetry.io/otel/baggage.
*/
diff --git a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go
index 3e7bb3b3566cd..9b1da2c02b934 100644
--- a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go
+++ b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go
@@ -24,7 +24,8 @@ func Int64ToRaw(i int64) uint64 {
}
func RawToInt64(r uint64) int64 {
- return int64(r)
+ // Assumes original was a valid int64 (overflow not checked).
+ return int64(r) // nolint: gosec
}
func Float64ToRaw(f float64) uint64 {
@@ -36,9 +37,11 @@ func RawToFloat64(r uint64) float64 {
}
func RawPtrToFloat64Ptr(r *uint64) *float64 {
- return (*float64)(unsafe.Pointer(r))
+ // Assumes original was a valid *float64 (overflow not checked).
+ return (*float64)(unsafe.Pointer(r)) // nolint: gosec
}
func RawPtrToInt64Ptr(r *uint64) *int64 {
- return (*int64)(unsafe.Pointer(r))
+ // Assumes original was a valid *int64 (overflow not checked).
+ return (*int64)(unsafe.Pointer(r)) // nolint: gosec
}
diff --git a/vendor/go.opentelemetry.io/otel/metric/meter.go b/vendor/go.opentelemetry.io/otel/metric/meter.go
index 6a7991e0151cc..14e08c24a4be6 100644
--- a/vendor/go.opentelemetry.io/otel/metric/meter.go
+++ b/vendor/go.opentelemetry.io/otel/metric/meter.go
@@ -52,6 +52,7 @@ type Meter interface {
// See the Instrument Name section of the package documentation for more
// information.
Int64Counter(name string, options ...Int64CounterOption) (Int64Counter, error)
+
// Int64UpDownCounter returns a new Int64UpDownCounter instrument
// identified by name and configured with options. The instrument is used
// to synchronously record int64 measurements during a computational
@@ -61,6 +62,7 @@ type Meter interface {
// See the Instrument Name section of the package documentation for more
// information.
Int64UpDownCounter(name string, options ...Int64UpDownCounterOption) (Int64UpDownCounter, error)
+
// Int64Histogram returns a new Int64Histogram instrument identified by
// name and configured with options. The instrument is used to
// synchronously record the distribution of int64 measurements during a
@@ -70,6 +72,7 @@ type Meter interface {
// See the Instrument Name section of the package documentation for more
// information.
Int64Histogram(name string, options ...Int64HistogramOption) (Int64Histogram, error)
+
// Int64Gauge returns a new Int64Gauge instrument identified by name and
// configured with options. The instrument is used to synchronously record
// instantaneous int64 measurements during a computational operation.
@@ -78,6 +81,7 @@ type Meter interface {
// See the Instrument Name section of the package documentation for more
// information.
Int64Gauge(name string, options ...Int64GaugeOption) (Int64Gauge, error)
+
// Int64ObservableCounter returns a new Int64ObservableCounter identified
// by name and configured with options. The instrument is used to
// asynchronously record increasing int64 measurements once per a
@@ -92,6 +96,7 @@ type Meter interface {
// See the Instrument Name section of the package documentation for more
// information.
Int64ObservableCounter(name string, options ...Int64ObservableCounterOption) (Int64ObservableCounter, error)
+
// Int64ObservableUpDownCounter returns a new Int64ObservableUpDownCounter
// instrument identified by name and configured with options. The
// instrument is used to asynchronously record int64 measurements once per
@@ -106,6 +111,7 @@ type Meter interface {
// See the Instrument Name section of the package documentation for more
// information.
Int64ObservableUpDownCounter(name string, options ...Int64ObservableUpDownCounterOption) (Int64ObservableUpDownCounter, error)
+
// Int64ObservableGauge returns a new Int64ObservableGauge instrument
// identified by name and configured with options. The instrument is used
// to asynchronously record instantaneous int64 measurements once per a
@@ -130,6 +136,7 @@ type Meter interface {
// See the Instrument Name section of the package documentation for more
// information.
Float64Counter(name string, options ...Float64CounterOption) (Float64Counter, error)
+
// Float64UpDownCounter returns a new Float64UpDownCounter instrument
// identified by name and configured with options. The instrument is used
// to synchronously record float64 measurements during a computational
@@ -139,6 +146,7 @@ type Meter interface {
// See the Instrument Name section of the package documentation for more
// information.
Float64UpDownCounter(name string, options ...Float64UpDownCounterOption) (Float64UpDownCounter, error)
+
// Float64Histogram returns a new Float64Histogram instrument identified by
// name and configured with options. The instrument is used to
// synchronously record the distribution of float64 measurements during a
@@ -148,6 +156,7 @@ type Meter interface {
// See the Instrument Name section of the package documentation for more
// information.
Float64Histogram(name string, options ...Float64HistogramOption) (Float64Histogram, error)
+
// Float64Gauge returns a new Float64Gauge instrument identified by name and
// configured with options. The instrument is used to synchronously record
// instantaneous float64 measurements during a computational operation.
@@ -156,6 +165,7 @@ type Meter interface {
// See the Instrument Name section of the package documentation for more
// information.
Float64Gauge(name string, options ...Float64GaugeOption) (Float64Gauge, error)
+
// Float64ObservableCounter returns a new Float64ObservableCounter
// instrument identified by name and configured with options. The
// instrument is used to asynchronously record increasing float64
@@ -170,6 +180,7 @@ type Meter interface {
// See the Instrument Name section of the package documentation for more
// information.
Float64ObservableCounter(name string, options ...Float64ObservableCounterOption) (Float64ObservableCounter, error)
+
// Float64ObservableUpDownCounter returns a new
// Float64ObservableUpDownCounter instrument identified by name and
// configured with options. The instrument is used to asynchronously record
@@ -184,6 +195,7 @@ type Meter interface {
// See the Instrument Name section of the package documentation for more
// information.
Float64ObservableUpDownCounter(name string, options ...Float64ObservableUpDownCounterOption) (Float64ObservableUpDownCounter, error)
+
// Float64ObservableGauge returns a new Float64ObservableGauge instrument
// identified by name and configured with options. The instrument is used
// to asynchronously record instantaneous float64 measurements once per a
@@ -242,6 +254,7 @@ type Observer interface {
// ObserveFloat64 records the float64 value for obsrv.
ObserveFloat64(obsrv Float64Observable, value float64, opts ...ObserveOption)
+
// ObserveInt64 records the int64 value for obsrv.
ObserveInt64(obsrv Int64Observable, value int64, opts ...ObserveOption)
}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go
index f4d1857c4f42f..f2cdf3c6518c9 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go
@@ -4,5 +4,6 @@
package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation"
// Library represents the instrumentation library.
-// Deprecated: please use Scope instead.
+//
+// Deprecated: use [Scope] instead.
type Library = Scope
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go b/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go
index 4f553a5715356..90a4ae16c1a66 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go
@@ -31,6 +31,14 @@
// is being run on. That way when multiple instances of the code are collected
// at a single endpoint their origin is decipherable.
//
+// To avoid leaking memory, the SDK returns the same instrument for calls to
+// create new instruments with the same Name, Unit, and Description.
+// Importantly, callbacks provided using metric.WithFloat64Callback or
+// metric.WithInt64Callback will only apply for the first instrument created
+// with a given Name, Unit, and Description. Instead, use
+// Meter.RegisterCallback and Registration.Unregister to add and remove
+// callbacks without leaking memory.
+//
// See [go.opentelemetry.io/otel/metric] for more information about
// the metric API.
//
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go
index c9c7e8f62a908..707342408acd2 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go
@@ -42,14 +42,14 @@ type expoHistogramDataPoint[N int64 | float64] struct {
noMinMax bool
noSum bool
- scale int
+ scale int32
posBuckets expoBuckets
negBuckets expoBuckets
zeroCount uint64
}
-func newExpoHistogramDataPoint[N int64 | float64](attrs attribute.Set, maxSize, maxScale int, noMinMax, noSum bool) *expoHistogramDataPoint[N] {
+func newExpoHistogramDataPoint[N int64 | float64](attrs attribute.Set, maxSize int, maxScale int32, noMinMax, noSum bool) *expoHistogramDataPoint[N] {
f := math.MaxFloat64
max := N(f) // if N is int64, max will overflow to -9223372036854775808
min := N(-f)
@@ -119,11 +119,13 @@ func (p *expoHistogramDataPoint[N]) record(v N) {
}
// getBin returns the bin v should be recorded into.
-func (p *expoHistogramDataPoint[N]) getBin(v float64) int {
- frac, exp := math.Frexp(v)
+func (p *expoHistogramDataPoint[N]) getBin(v float64) int32 {
+ frac, expInt := math.Frexp(v)
+ // 11-bit exponential.
+ exp := int32(expInt) // nolint: gosec
if p.scale <= 0 {
// Because of the choice of fraction is always 1 power of two higher than we want.
- correction := 1
+ var correction int32 = 1
if frac == .5 {
// If v is an exact power of two the frac will be .5 and the exp
// will be one higher than we want.
@@ -131,7 +133,7 @@ func (p *expoHistogramDataPoint[N]) getBin(v float64) int {
}
return (exp - correction) >> (-p.scale)
}
- return exp<= bin {
- low = bin
- high = startBin + length - 1
+ low = int(bin)
+ high = int(startBin) + length - 1
}
- count := 0
+ var count int32
for high-low >= p.maxSize {
low = low >> 1
high = high >> 1
@@ -189,39 +191,39 @@ func (p *expoHistogramDataPoint[N]) scaleChange(bin, startBin, length int) int {
// expoBuckets is a set of buckets in an exponential histogram.
type expoBuckets struct {
- startBin int
+ startBin int32
counts []uint64
}
// record increments the count for the given bin, and expands the buckets if needed.
// Size changes must be done before calling this function.
-func (b *expoBuckets) record(bin int) {
+func (b *expoBuckets) record(bin int32) {
if len(b.counts) == 0 {
b.counts = []uint64{1}
b.startBin = bin
return
}
- endBin := b.startBin + len(b.counts) - 1
+ endBin := int(b.startBin) + len(b.counts) - 1
// if the new bin is inside the current range
- if bin >= b.startBin && bin <= endBin {
+ if bin >= b.startBin && int(bin) <= endBin {
b.counts[bin-b.startBin]++
return
}
// if the new bin is before the current start add spaces to the counts
if bin < b.startBin {
origLen := len(b.counts)
- newLength := endBin - bin + 1
+ newLength := endBin - int(bin) + 1
shift := b.startBin - bin
if newLength > cap(b.counts) {
b.counts = append(b.counts, make([]uint64, newLength-len(b.counts))...)
}
- copy(b.counts[shift:origLen+shift], b.counts[:])
+ copy(b.counts[shift:origLen+int(shift)], b.counts[:])
b.counts = b.counts[:newLength]
- for i := 1; i < shift; i++ {
+ for i := 1; i < int(shift); i++ {
b.counts[i] = 0
}
b.startBin = bin
@@ -229,17 +231,17 @@ func (b *expoBuckets) record(bin int) {
return
}
// if the new is after the end add spaces to the end
- if bin > endBin {
- if bin-b.startBin < cap(b.counts) {
+ if int(bin) > endBin {
+ if int(bin-b.startBin) < cap(b.counts) {
b.counts = b.counts[:bin-b.startBin+1]
- for i := endBin + 1 - b.startBin; i < len(b.counts); i++ {
+ for i := endBin + 1 - int(b.startBin); i < len(b.counts); i++ {
b.counts[i] = 0
}
b.counts[bin-b.startBin] = 1
return
}
- end := make([]uint64, bin-b.startBin-len(b.counts)+1)
+ end := make([]uint64, int(bin-b.startBin)-len(b.counts)+1)
b.counts = append(b.counts, end...)
b.counts[bin-b.startBin] = 1
}
@@ -247,7 +249,7 @@ func (b *expoBuckets) record(bin int) {
// downscale shrinks a bucket by a factor of 2*s. It will sum counts into the
// correct lower resolution bucket.
-func (b *expoBuckets) downscale(delta int) {
+func (b *expoBuckets) downscale(delta int32) {
// Example
// delta = 2
// Original offset: -6
@@ -262,19 +264,19 @@ func (b *expoBuckets) downscale(delta int) {
return
}
- steps := 1 << delta
+ steps := int32(1) << delta
offset := b.startBin % steps
offset = (offset + steps) % steps // to make offset positive
for i := 1; i < len(b.counts); i++ {
- idx := i + offset
- if idx%steps == 0 {
- b.counts[idx/steps] = b.counts[i]
+ idx := i + int(offset)
+ if idx%int(steps) == 0 {
+ b.counts[idx/int(steps)] = b.counts[i]
continue
}
- b.counts[idx/steps] += b.counts[i]
+ b.counts[idx/int(steps)] += b.counts[i]
}
- lastIdx := (len(b.counts) - 1 + offset) / steps
+ lastIdx := (len(b.counts) - 1 + int(offset)) / int(steps)
b.counts = b.counts[:lastIdx+1]
b.startBin = b.startBin >> delta
}
@@ -287,7 +289,7 @@ func newExponentialHistogram[N int64 | float64](maxSize, maxScale int32, noMinMa
noSum: noSum,
noMinMax: noMinMax,
maxSize: int(maxSize),
- maxScale: int(maxScale),
+ maxScale: maxScale,
newRes: r,
limit: newLimiter[*expoHistogramDataPoint[N]](limit),
@@ -303,7 +305,7 @@ type expoHistogram[N int64 | float64] struct {
noSum bool
noMinMax bool
maxSize int
- maxScale int
+ maxScale int32
newRes func() exemplar.FilteredReservoir[N]
limit limiter[*expoHistogramDataPoint[N]]
@@ -354,15 +356,15 @@ func (e *expoHistogram[N]) delta(dest *metricdata.Aggregation) int {
hDPts[i].StartTime = e.start
hDPts[i].Time = t
hDPts[i].Count = val.count
- hDPts[i].Scale = int32(val.scale)
+ hDPts[i].Scale = val.scale
hDPts[i].ZeroCount = val.zeroCount
hDPts[i].ZeroThreshold = 0.0
- hDPts[i].PositiveBucket.Offset = int32(val.posBuckets.startBin)
+ hDPts[i].PositiveBucket.Offset = val.posBuckets.startBin
hDPts[i].PositiveBucket.Counts = reset(hDPts[i].PositiveBucket.Counts, len(val.posBuckets.counts), len(val.posBuckets.counts))
copy(hDPts[i].PositiveBucket.Counts, val.posBuckets.counts)
- hDPts[i].NegativeBucket.Offset = int32(val.negBuckets.startBin)
+ hDPts[i].NegativeBucket.Offset = val.negBuckets.startBin
hDPts[i].NegativeBucket.Counts = reset(hDPts[i].NegativeBucket.Counts, len(val.negBuckets.counts), len(val.negBuckets.counts))
copy(hDPts[i].NegativeBucket.Counts, val.negBuckets.counts)
@@ -407,15 +409,15 @@ func (e *expoHistogram[N]) cumulative(dest *metricdata.Aggregation) int {
hDPts[i].StartTime = e.start
hDPts[i].Time = t
hDPts[i].Count = val.count
- hDPts[i].Scale = int32(val.scale)
+ hDPts[i].Scale = val.scale
hDPts[i].ZeroCount = val.zeroCount
hDPts[i].ZeroThreshold = 0.0
- hDPts[i].PositiveBucket.Offset = int32(val.posBuckets.startBin)
+ hDPts[i].PositiveBucket.Offset = val.posBuckets.startBin
hDPts[i].PositiveBucket.Counts = reset(hDPts[i].PositiveBucket.Counts, len(val.posBuckets.counts), len(val.posBuckets.counts))
copy(hDPts[i].PositiveBucket.Counts, val.posBuckets.counts)
- hDPts[i].NegativeBucket.Offset = int32(val.negBuckets.startBin)
+ hDPts[i].NegativeBucket.Offset = val.negBuckets.startBin
hDPts[i].NegativeBucket.Counts = reset(hDPts[i].NegativeBucket.Counts, len(val.negBuckets.counts), len(val.negBuckets.counts))
copy(hDPts[i].NegativeBucket.Counts, val.negBuckets.counts)
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/value.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/value.go
index 9daf27dc00676..1957d6b1e3a37 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/value.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/value.go
@@ -42,7 +42,8 @@ func (v Value) Type() ValueType { return v.t }
// Int64ValueType, 0 is returned.
func (v Value) Int64() int64 {
if v.t == Int64ValueType {
- return int64(v.val)
+ // Assumes the correct int64 was stored in v.val based on type.
+ return int64(v.val) // nolint: gosec
}
return 0
}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go b/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go
index 479b7610eb16b..2309e5b2b0f8e 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go
@@ -185,6 +185,11 @@ func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64Obser
// configured with options. The instrument is used to asynchronously record
// int64 measurements once per a measurement collection cycle. Only the
// measurements recorded during the collection cycle are exported.
+//
+// If Int64ObservableUpDownCounter is invoked repeatedly with the same Name,
+// Description, and Unit, only the first set of callbacks provided are used.
+// Use meter.RegisterCallback and Registration.Unregister to manage callbacks
+// if instrumentation can be created multiple times with different callbacks.
func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) {
cfg := metric.NewInt64ObservableUpDownCounterConfig(options...)
id := Instrument{
@@ -201,6 +206,11 @@ func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int6
// configured with options. The instrument is used to asynchronously record
// instantaneous int64 measurements once per a measurement collection cycle.
// Only the measurements recorded during the collection cycle are exported.
+//
+// If Int64ObservableGauge is invoked repeatedly with the same Name,
+// Description, and Unit, only the first set of callbacks provided are used.
+// Use meter.RegisterCallback and Registration.Unregister to manage callbacks
+// if instrumentation can be created multiple times with different callbacks.
func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) {
cfg := metric.NewInt64ObservableGaugeConfig(options...)
id := Instrument{
@@ -334,6 +344,11 @@ func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64O
// and configured with options. The instrument is used to asynchronously record
// float64 measurements once per a measurement collection cycle. Only the
// measurements recorded during the collection cycle are exported.
+//
+// If Float64ObservableUpDownCounter is invoked repeatedly with the same Name,
+// Description, and Unit, only the first set of callbacks provided are used.
+// Use meter.RegisterCallback and Registration.Unregister to manage callbacks
+// if instrumentation can be created multiple times with different callbacks.
func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) {
cfg := metric.NewFloat64ObservableUpDownCounterConfig(options...)
id := Instrument{
@@ -350,6 +365,11 @@ func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Fl
// configured with options. The instrument is used to asynchronously record
// instantaneous float64 measurements once per a measurement collection cycle.
// Only the measurements recorded during the collection cycle are exported.
+//
+// If Float64ObservableGauge is invoked repeatedly with the same Name,
+// Description, and Unit, only the first set of callbacks provided are used.
+// Use meter.RegisterCallback and Registration.Unregister to manage callbacks
+// if instrumentation can be created multiple times with different callbacks.
func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) {
cfg := metric.NewFloat64ObservableGaugeConfig(options...)
id := Instrument{
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go
index a55f9a5372c9c..d94bdee75b731 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go
@@ -34,7 +34,7 @@ var errNonPositiveDuration = fmt.Errorf("non-positive duration")
// start of bi-directional control flow.
//
// Typically, push-based exporters that are periodic will
-// implement PeroidicExporter themselves and construct a
+// implement PeriodicExporter themselves and construct a
// PeriodicReader to satisfy this interface.
//
// Pull-based exporters will typically implement Register
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/version.go b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go
index dade0a19a2b8a..44316caa11bba 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/version.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go
@@ -5,5 +5,5 @@ package metric // import "go.opentelemetry.io/otel/sdk/metric"
// version is the current release version of the metric SDK in use.
func version() string {
- return "1.28.0"
+ return "1.29.0"
}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go
index 33d065a7cb991..b7cede891c4c6 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/version.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/version.go
@@ -5,5 +5,5 @@ package sdk // import "go.opentelemetry.io/otel/sdk"
// Version is the current release version of the OpenTelemetry SDK in use.
func Version() string {
- return "1.28.0"
+ return "1.29.0"
}
diff --git a/vendor/go.opentelemetry.io/otel/trace/provider.go b/vendor/go.opentelemetry.io/otel/trace/provider.go
new file mode 100644
index 0000000000000..ef85cb70c6d8b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/provider.go
@@ -0,0 +1,59 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/trace"
+
+import "go.opentelemetry.io/otel/trace/embedded"
+
+// TracerProvider provides Tracers that are used by instrumentation code to
+// trace computational workflows.
+//
+// A TracerProvider is the collection destination of all Spans from Tracers it
+// provides, it represents a unique telemetry collection pipeline. How that
+// pipeline is defined, meaning how those Spans are collected, processed, and
+// where they are exported, depends on its implementation. Instrumentation
+// authors do not need to define this implementation, rather just use the
+// provided Tracers to instrument code.
+//
+// Commonly, instrumentation code will accept a TracerProvider implementation
+// at runtime from its users or it can simply use the globally registered one
+// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider).
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type TracerProvider interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.TracerProvider
+
+ // Tracer returns a unique Tracer scoped to be used by instrumentation code
+ // to trace computational workflows. The scope and identity of that
+ // instrumentation code is uniquely defined by the name and options passed.
+ //
+ // The passed name needs to uniquely identify instrumentation code.
+ // Therefore, it is recommended that name is the Go package name of the
+ // library providing instrumentation (note: not the code being
+ // instrumented). Instrumentation libraries can have multiple versions,
+ // therefore, the WithInstrumentationVersion option should be used to
+ // distinguish these different codebases. Additionally, instrumentation
+ // libraries may sometimes use traces to communicate different domains of
+ // workflow data (i.e. using spans to communicate workflow events only). If
+ // this is the case, the WithScopeAttributes option should be used to
+ // uniquely identify Tracers that handle the different domains of workflow
+ // data.
+ //
+ // If the same name and options are passed multiple times, the same Tracer
+ // will be returned (it is up to the implementation if this will be the
+ // same underlying instance of that Tracer or not). It is not necessary to
+ // call this multiple times with the same name and options to get an
+ // up-to-date Tracer. All implementations will ensure any TracerProvider
+ // configuration changes are propagated to all provided Tracers.
+ //
+ // If name is empty, then an implementation defined default name will be
+ // used instead.
+ //
+ // This method is safe to call concurrently.
+ Tracer(name string, options ...TracerOption) Tracer
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/span.go b/vendor/go.opentelemetry.io/otel/trace/span.go
new file mode 100644
index 0000000000000..d3aa476ee1254
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/span.go
@@ -0,0 +1,177 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/trace"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/codes"
+ "go.opentelemetry.io/otel/trace/embedded"
+)
+
+// Span is the individual component of a trace. It represents a single named
+// and timed operation of a workflow that is traced. A Tracer is used to
+// create a Span and it is then up to the operation the Span represents to
+// properly end the Span when the operation itself ends.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Span interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Span
+
+ // End completes the Span. The Span is considered complete and ready to be
+ // delivered through the rest of the telemetry pipeline after this method
+ // is called. Therefore, updates to the Span are not allowed after this
+ // method has been called.
+ End(options ...SpanEndOption)
+
+ // AddEvent adds an event with the provided name and options.
+ AddEvent(name string, options ...EventOption)
+
+ // AddLink adds a link.
+ // Adding links at span creation using WithLinks is preferred to calling AddLink
+ // later, for contexts that are available during span creation, because head
+ // sampling decisions can only consider information present during span creation.
+ AddLink(link Link)
+
+ // IsRecording returns the recording state of the Span. It will return
+ // true if the Span is active and events can be recorded.
+ IsRecording() bool
+
+ // RecordError will record err as an exception span event for this span. An
+ // additional call to SetStatus is required if the Status of the Span should
+ // be set to Error, as this method does not change the Span status. If this
+ // span is not being recorded or err is nil then this method does nothing.
+ RecordError(err error, options ...EventOption)
+
+ // SpanContext returns the SpanContext of the Span. The returned SpanContext
+ // is usable even after the End method has been called for the Span.
+ SpanContext() SpanContext
+
+ // SetStatus sets the status of the Span in the form of a code and a
+ // description, provided the status hasn't already been set to a higher
+ // value before (OK > Error > Unset). The description is only included in a
+ // status when the code is for an error.
+ SetStatus(code codes.Code, description string)
+
+ // SetName sets the Span name.
+ SetName(name string)
+
+ // SetAttributes sets kv as attributes of the Span. If a key from kv
+ // already exists for an attribute of the Span it will be overwritten with
+ // the value contained in kv.
+ SetAttributes(kv ...attribute.KeyValue)
+
+ // TracerProvider returns a TracerProvider that can be used to generate
+ // additional Spans on the same telemetry pipeline as the current Span.
+ TracerProvider() TracerProvider
+}
+
+// Link is the relationship between two Spans. The relationship can be within
+// the same Trace or across different Traces.
+//
+// For example, a Link is used in the following situations:
+//
+// 1. Batch Processing: A batch of operations may contain operations
+// associated with one or more traces/spans. Since there can only be one
+// parent SpanContext, a Link is used to keep reference to the
+// SpanContext of all operations in the batch.
+// 2. Public Endpoint: A SpanContext for an in incoming client request on a
+// public endpoint should be considered untrusted. In such a case, a new
+// trace with its own identity and sampling decision needs to be created,
+// but this new trace needs to be related to the original trace in some
+// form. A Link is used to keep reference to the original SpanContext and
+// track the relationship.
+type Link struct {
+ // SpanContext of the linked Span.
+ SpanContext SpanContext
+
+ // Attributes describe the aspects of the link.
+ Attributes []attribute.KeyValue
+}
+
+// LinkFromContext returns a link encapsulating the SpanContext in the provided
+// ctx.
+func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link {
+ return Link{
+ SpanContext: SpanContextFromContext(ctx),
+ Attributes: attrs,
+ }
+}
+
+// SpanKind is the role a Span plays in a Trace.
+type SpanKind int
+
+// As a convenience, these match the proto definition, see
+// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129
+//
+// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()`
+// to coerce a span kind to a valid value.
+const (
+ // SpanKindUnspecified is an unspecified SpanKind and is not a valid
+ // SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal
+ // if it is received.
+ SpanKindUnspecified SpanKind = 0
+ // SpanKindInternal is a SpanKind for a Span that represents an internal
+ // operation within an application.
+ SpanKindInternal SpanKind = 1
+ // SpanKindServer is a SpanKind for a Span that represents the operation
+ // of handling a request from a client.
+ SpanKindServer SpanKind = 2
+ // SpanKindClient is a SpanKind for a Span that represents the operation
+ // of client making a request to a server.
+ SpanKindClient SpanKind = 3
+ // SpanKindProducer is a SpanKind for a Span that represents the operation
+ // of a producer sending a message to a message broker. Unlike
+ // SpanKindClient and SpanKindServer, there is often no direct
+ // relationship between this kind of Span and a SpanKindConsumer kind. A
+ // SpanKindProducer Span will end once the message is accepted by the
+ // message broker which might not overlap with the processing of that
+ // message.
+ SpanKindProducer SpanKind = 4
+ // SpanKindConsumer is a SpanKind for a Span that represents the operation
+ // of a consumer receiving a message from a message broker. Like
+ // SpanKindProducer Spans, there is often no direct relationship between
+ // this Span and the Span that produced the message.
+ SpanKindConsumer SpanKind = 5
+)
+
+// ValidateSpanKind returns a valid span kind value. This will coerce
+// invalid values into the default value, SpanKindInternal.
+func ValidateSpanKind(spanKind SpanKind) SpanKind {
+ switch spanKind {
+ case SpanKindInternal,
+ SpanKindServer,
+ SpanKindClient,
+ SpanKindProducer,
+ SpanKindConsumer:
+ // valid
+ return spanKind
+ default:
+ return SpanKindInternal
+ }
+}
+
+// String returns the specified name of the SpanKind in lower-case.
+func (sk SpanKind) String() string {
+ switch sk {
+ case SpanKindInternal:
+ return "internal"
+ case SpanKindServer:
+ return "server"
+ case SpanKindClient:
+ return "client"
+ case SpanKindProducer:
+ return "producer"
+ case SpanKindConsumer:
+ return "consumer"
+ default:
+ return "unspecified"
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go
index 28877d4ab4dff..d49adf671b954 100644
--- a/vendor/go.opentelemetry.io/otel/trace/trace.go
+++ b/vendor/go.opentelemetry.io/otel/trace/trace.go
@@ -5,13 +5,8 @@ package trace // import "go.opentelemetry.io/otel/trace"
import (
"bytes"
- "context"
"encoding/hex"
"encoding/json"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/codes"
- "go.opentelemetry.io/otel/trace/embedded"
)
const (
@@ -326,247 +321,3 @@ func (sc SpanContext) MarshalJSON() ([]byte, error) {
Remote: sc.remote,
})
}
-
-// Span is the individual component of a trace. It represents a single named
-// and timed operation of a workflow that is traced. A Tracer is used to
-// create a Span and it is then up to the operation the Span represents to
-// properly end the Span when the operation itself ends.
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type Span interface {
- // Users of the interface can ignore this. This embedded type is only used
- // by implementations of this interface. See the "API Implementations"
- // section of the package documentation for more information.
- embedded.Span
-
- // End completes the Span. The Span is considered complete and ready to be
- // delivered through the rest of the telemetry pipeline after this method
- // is called. Therefore, updates to the Span are not allowed after this
- // method has been called.
- End(options ...SpanEndOption)
-
- // AddEvent adds an event with the provided name and options.
- AddEvent(name string, options ...EventOption)
-
- // AddLink adds a link.
- // Adding links at span creation using WithLinks is preferred to calling AddLink
- // later, for contexts that are available during span creation, because head
- // sampling decisions can only consider information present during span creation.
- AddLink(link Link)
-
- // IsRecording returns the recording state of the Span. It will return
- // true if the Span is active and events can be recorded.
- IsRecording() bool
-
- // RecordError will record err as an exception span event for this span. An
- // additional call to SetStatus is required if the Status of the Span should
- // be set to Error, as this method does not change the Span status. If this
- // span is not being recorded or err is nil then this method does nothing.
- RecordError(err error, options ...EventOption)
-
- // SpanContext returns the SpanContext of the Span. The returned SpanContext
- // is usable even after the End method has been called for the Span.
- SpanContext() SpanContext
-
- // SetStatus sets the status of the Span in the form of a code and a
- // description, provided the status hasn't already been set to a higher
- // value before (OK > Error > Unset). The description is only included in a
- // status when the code is for an error.
- SetStatus(code codes.Code, description string)
-
- // SetName sets the Span name.
- SetName(name string)
-
- // SetAttributes sets kv as attributes of the Span. If a key from kv
- // already exists for an attribute of the Span it will be overwritten with
- // the value contained in kv.
- SetAttributes(kv ...attribute.KeyValue)
-
- // TracerProvider returns a TracerProvider that can be used to generate
- // additional Spans on the same telemetry pipeline as the current Span.
- TracerProvider() TracerProvider
-}
-
-// Link is the relationship between two Spans. The relationship can be within
-// the same Trace or across different Traces.
-//
-// For example, a Link is used in the following situations:
-//
-// 1. Batch Processing: A batch of operations may contain operations
-// associated with one or more traces/spans. Since there can only be one
-// parent SpanContext, a Link is used to keep reference to the
-// SpanContext of all operations in the batch.
-// 2. Public Endpoint: A SpanContext for an in incoming client request on a
-// public endpoint should be considered untrusted. In such a case, a new
-// trace with its own identity and sampling decision needs to be created,
-// but this new trace needs to be related to the original trace in some
-// form. A Link is used to keep reference to the original SpanContext and
-// track the relationship.
-type Link struct {
- // SpanContext of the linked Span.
- SpanContext SpanContext
-
- // Attributes describe the aspects of the link.
- Attributes []attribute.KeyValue
-}
-
-// LinkFromContext returns a link encapsulating the SpanContext in the provided ctx.
-func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link {
- return Link{
- SpanContext: SpanContextFromContext(ctx),
- Attributes: attrs,
- }
-}
-
-// SpanKind is the role a Span plays in a Trace.
-type SpanKind int
-
-// As a convenience, these match the proto definition, see
-// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129
-//
-// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()`
-// to coerce a span kind to a valid value.
-const (
- // SpanKindUnspecified is an unspecified SpanKind and is not a valid
- // SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal
- // if it is received.
- SpanKindUnspecified SpanKind = 0
- // SpanKindInternal is a SpanKind for a Span that represents an internal
- // operation within an application.
- SpanKindInternal SpanKind = 1
- // SpanKindServer is a SpanKind for a Span that represents the operation
- // of handling a request from a client.
- SpanKindServer SpanKind = 2
- // SpanKindClient is a SpanKind for a Span that represents the operation
- // of client making a request to a server.
- SpanKindClient SpanKind = 3
- // SpanKindProducer is a SpanKind for a Span that represents the operation
- // of a producer sending a message to a message broker. Unlike
- // SpanKindClient and SpanKindServer, there is often no direct
- // relationship between this kind of Span and a SpanKindConsumer kind. A
- // SpanKindProducer Span will end once the message is accepted by the
- // message broker which might not overlap with the processing of that
- // message.
- SpanKindProducer SpanKind = 4
- // SpanKindConsumer is a SpanKind for a Span that represents the operation
- // of a consumer receiving a message from a message broker. Like
- // SpanKindProducer Spans, there is often no direct relationship between
- // this Span and the Span that produced the message.
- SpanKindConsumer SpanKind = 5
-)
-
-// ValidateSpanKind returns a valid span kind value. This will coerce
-// invalid values into the default value, SpanKindInternal.
-func ValidateSpanKind(spanKind SpanKind) SpanKind {
- switch spanKind {
- case SpanKindInternal,
- SpanKindServer,
- SpanKindClient,
- SpanKindProducer,
- SpanKindConsumer:
- // valid
- return spanKind
- default:
- return SpanKindInternal
- }
-}
-
-// String returns the specified name of the SpanKind in lower-case.
-func (sk SpanKind) String() string {
- switch sk {
- case SpanKindInternal:
- return "internal"
- case SpanKindServer:
- return "server"
- case SpanKindClient:
- return "client"
- case SpanKindProducer:
- return "producer"
- case SpanKindConsumer:
- return "consumer"
- default:
- return "unspecified"
- }
-}
-
-// Tracer is the creator of Spans.
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type Tracer interface {
- // Users of the interface can ignore this. This embedded type is only used
- // by implementations of this interface. See the "API Implementations"
- // section of the package documentation for more information.
- embedded.Tracer
-
- // Start creates a span and a context.Context containing the newly-created span.
- //
- // If the context.Context provided in `ctx` contains a Span then the newly-created
- // Span will be a child of that span, otherwise it will be a root span. This behavior
- // can be overridden by providing `WithNewRoot()` as a SpanOption, causing the
- // newly-created Span to be a root span even if `ctx` contains a Span.
- //
- // When creating a Span it is recommended to provide all known span attributes using
- // the `WithAttributes()` SpanOption as samplers will only have access to the
- // attributes provided when a Span is created.
- //
- // Any Span that is created MUST also be ended. This is the responsibility of the user.
- // Implementations of this API may leak memory or other resources if Spans are not ended.
- Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span)
-}
-
-// TracerProvider provides Tracers that are used by instrumentation code to
-// trace computational workflows.
-//
-// A TracerProvider is the collection destination of all Spans from Tracers it
-// provides, it represents a unique telemetry collection pipeline. How that
-// pipeline is defined, meaning how those Spans are collected, processed, and
-// where they are exported, depends on its implementation. Instrumentation
-// authors do not need to define this implementation, rather just use the
-// provided Tracers to instrument code.
-//
-// Commonly, instrumentation code will accept a TracerProvider implementation
-// at runtime from its users or it can simply use the globally registered one
-// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider).
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type TracerProvider interface {
- // Users of the interface can ignore this. This embedded type is only used
- // by implementations of this interface. See the "API Implementations"
- // section of the package documentation for more information.
- embedded.TracerProvider
-
- // Tracer returns a unique Tracer scoped to be used by instrumentation code
- // to trace computational workflows. The scope and identity of that
- // instrumentation code is uniquely defined by the name and options passed.
- //
- // The passed name needs to uniquely identify instrumentation code.
- // Therefore, it is recommended that name is the Go package name of the
- // library providing instrumentation (note: not the code being
- // instrumented). Instrumentation libraries can have multiple versions,
- // therefore, the WithInstrumentationVersion option should be used to
- // distinguish these different codebases. Additionally, instrumentation
- // libraries may sometimes use traces to communicate different domains of
- // workflow data (i.e. using spans to communicate workflow events only). If
- // this is the case, the WithScopeAttributes option should be used to
- // uniquely identify Tracers that handle the different domains of workflow
- // data.
- //
- // If the same name and options are passed multiple times, the same Tracer
- // will be returned (it is up to the implementation if this will be the
- // same underlying instance of that Tracer or not). It is not necessary to
- // call this multiple times with the same name and options to get an
- // up-to-date Tracer. All implementations will ensure any TracerProvider
- // configuration changes are propagated to all provided Tracers.
- //
- // If name is empty, then an implementation defined default name will be
- // used instead.
- //
- // This method is safe to call concurrently.
- Tracer(name string, options ...TracerOption) Tracer
-}
diff --git a/vendor/go.opentelemetry.io/otel/trace/tracer.go b/vendor/go.opentelemetry.io/otel/trace/tracer.go
new file mode 100644
index 0000000000000..77952d2a0b310
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/tracer.go
@@ -0,0 +1,37 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/trace"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/trace/embedded"
+)
+
+// Tracer is the creator of Spans.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Tracer interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Tracer
+
+ // Start creates a span and a context.Context containing the newly-created span.
+ //
+ // If the context.Context provided in `ctx` contains a Span then the newly-created
+ // Span will be a child of that span, otherwise it will be a root span. This behavior
+ // can be overridden by providing `WithNewRoot()` as a SpanOption, causing the
+ // newly-created Span to be a root span even if `ctx` contains a Span.
+ //
+ // When creating a Span it is recommended to provide all known span attributes using
+ // the `WithAttributes()` SpanOption as samplers will only have access to the
+ // attributes provided when a Span is created.
+ //
+ // Any Span that is created MUST also be ended. This is the responsibility of the user.
+ // Implementations of this API may leak memory or other resources if Spans are not ended.
+ Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span)
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go
index 20b5cf24332da..dc5e34cad0dd4 100644
--- a/vendor/go.opentelemetry.io/otel/trace/tracestate.go
+++ b/vendor/go.opentelemetry.io/otel/trace/tracestate.go
@@ -260,6 +260,16 @@ func (ts TraceState) Get(key string) string {
return ""
}
+// Walk walks all key value pairs in the TraceState by calling f
+// Iteration stops if f returns false.
+func (ts TraceState) Walk(f func(key, value string) bool) {
+ for _, m := range ts.list {
+ if !f(m.Key, m.Value) {
+ break
+ }
+ }
+}
+
// Insert adds a new list-member defined by the key/value pair to the
// TraceState. If a list-member already exists for the given key, that
// list-member's value is updated. The new or updated list-member is always
diff --git a/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh b/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh
new file mode 100644
index 0000000000000..c9b7cdbbfef7e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh
@@ -0,0 +1,42 @@
+#!/bin/bash
+
+# Copyright The OpenTelemetry Authors
+# SPDX-License-Identifier: Apache-2.0
+
+set -euo pipefail
+
+TARGET="${1:?Must provide target ref}"
+
+FILE="CHANGELOG.md"
+TEMP_DIR=$(mktemp -d)
+echo "Temp folder: $TEMP_DIR"
+
+# Only the latest commit of the feature branch is available
+# automatically. To diff with the base branch, we need to
+# fetch that too (and we only need its latest commit).
+git fetch origin "${TARGET}" --depth=1
+
+# Checkout the previous version on the base branch of the changelog to tmpfolder
+git --work-tree="$TEMP_DIR" checkout FETCH_HEAD $FILE
+
+PREVIOUS_FILE="$TEMP_DIR/$FILE"
+CURRENT_FILE="$FILE"
+PREVIOUS_LOCKED_FILE="$TEMP_DIR/previous_locked_section.md"
+CURRENT_LOCKED_FILE="$TEMP_DIR/current_locked_section.md"
+
+# Extract released sections from the previous version
+awk '/^/ {flag=1} /^/ {flag=0} flag' "$PREVIOUS_FILE" > "$PREVIOUS_LOCKED_FILE"
+
+# Extract released sections from the current version
+awk '/^/ {flag=1} /^/ {flag=0} flag' "$CURRENT_FILE" > "$CURRENT_LOCKED_FILE"
+
+# Compare the released sections
+if ! diff -q "$PREVIOUS_LOCKED_FILE" "$CURRENT_LOCKED_FILE"; then
+ echo "Error: The released sections of the changelog file have been modified."
+ diff "$PREVIOUS_LOCKED_FILE" "$CURRENT_LOCKED_FILE"
+ rm -rf "$TEMP_DIR"
+ false
+fi
+
+rm -rf "$TEMP_DIR"
+echo "The released sections remain unchanged."
diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go
index ab28960524b81..f67039ed1f90b 100644
--- a/vendor/go.opentelemetry.io/otel/version.go
+++ b/vendor/go.opentelemetry.io/otel/version.go
@@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel"
// Version is the current release version of OpenTelemetry in use.
func Version() string {
- return "1.28.0"
+ return "1.29.0"
}
diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml
index 241cfc82a8d0e..3ba611d71362c 100644
--- a/vendor/go.opentelemetry.io/otel/versions.yaml
+++ b/vendor/go.opentelemetry.io/otel/versions.yaml
@@ -3,7 +3,7 @@
module-sets:
stable-v1:
- version: v1.28.0
+ version: v1.29.0
modules:
- go.opentelemetry.io/otel
- go.opentelemetry.io/otel/bridge/opencensus
@@ -29,15 +29,16 @@ module-sets:
- go.opentelemetry.io/otel/sdk/metric
- go.opentelemetry.io/otel/trace
experimental-metrics:
- version: v0.50.0
+ version: v0.51.0
modules:
- go.opentelemetry.io/otel/example/prometheus
- go.opentelemetry.io/otel/exporters/prometheus
experimental-logs:
- version: v0.4.0
+ version: v0.5.0
modules:
- go.opentelemetry.io/otel/log
- go.opentelemetry.io/otel/sdk/log
+ - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc
- go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp
- go.opentelemetry.io/otel/exporters/stdout/stdoutlog
experimental-schema:
@@ -46,4 +47,3 @@ module-sets:
- go.opentelemetry.io/otel/schema
excluded-modules:
- go.opentelemetry.io/otel/internal/tools
- - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc
diff --git a/vendor/golang.org/x/crypto/sha3/shake.go b/vendor/golang.org/x/crypto/sha3/shake.go
index 1ea9275b8b7ac..a01ef43577df1 100644
--- a/vendor/golang.org/x/crypto/sha3/shake.go
+++ b/vendor/golang.org/x/crypto/sha3/shake.go
@@ -85,9 +85,9 @@ func newCShake(N, S []byte, rate, outputLen int, dsbyte byte) ShakeHash {
// leftEncode returns max 9 bytes
c.initBlock = make([]byte, 0, 9*2+len(N)+len(S))
- c.initBlock = append(c.initBlock, leftEncode(uint64(len(N)*8))...)
+ c.initBlock = append(c.initBlock, leftEncode(uint64(len(N))*8)...)
c.initBlock = append(c.initBlock, N...)
- c.initBlock = append(c.initBlock, leftEncode(uint64(len(S)*8))...)
+ c.initBlock = append(c.initBlock, leftEncode(uint64(len(S))*8)...)
c.initBlock = append(c.initBlock, S...)
c.Write(bytepad(c.initBlock, c.rate))
return &c
diff --git a/vendor/golang.org/x/net/http2/config.go b/vendor/golang.org/x/net/http2/config.go
new file mode 100644
index 0000000000000..de58dfb8dc492
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/config.go
@@ -0,0 +1,122 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "math"
+ "net/http"
+ "time"
+)
+
+// http2Config is a package-internal version of net/http.HTTP2Config.
+//
+// http.HTTP2Config was added in Go 1.24.
+// When running with a version of net/http that includes HTTP2Config,
+// we merge the configuration with the fields in Transport or Server
+// to produce an http2Config.
+//
+// Zero valued fields in http2Config are interpreted as in the
+// net/http.HTTPConfig documentation.
+//
+// Precedence order for reconciling configurations is:
+//
+// - Use the net/http.{Server,Transport}.HTTP2Config value, when non-zero.
+// - Otherwise use the http2.{Server.Transport} value.
+// - If the resulting value is zero or out of range, use a default.
+type http2Config struct {
+ MaxConcurrentStreams uint32
+ MaxDecoderHeaderTableSize uint32
+ MaxEncoderHeaderTableSize uint32
+ MaxReadFrameSize uint32
+ MaxUploadBufferPerConnection int32
+ MaxUploadBufferPerStream int32
+ SendPingTimeout time.Duration
+ PingTimeout time.Duration
+ WriteByteTimeout time.Duration
+ PermitProhibitedCipherSuites bool
+ CountError func(errType string)
+}
+
+// configFromServer merges configuration settings from
+// net/http.Server.HTTP2Config and http2.Server.
+func configFromServer(h1 *http.Server, h2 *Server) http2Config {
+ conf := http2Config{
+ MaxConcurrentStreams: h2.MaxConcurrentStreams,
+ MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize,
+ MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize,
+ MaxReadFrameSize: h2.MaxReadFrameSize,
+ MaxUploadBufferPerConnection: h2.MaxUploadBufferPerConnection,
+ MaxUploadBufferPerStream: h2.MaxUploadBufferPerStream,
+ SendPingTimeout: h2.ReadIdleTimeout,
+ PingTimeout: h2.PingTimeout,
+ WriteByteTimeout: h2.WriteByteTimeout,
+ PermitProhibitedCipherSuites: h2.PermitProhibitedCipherSuites,
+ CountError: h2.CountError,
+ }
+ fillNetHTTPServerConfig(&conf, h1)
+ setConfigDefaults(&conf, true)
+ return conf
+}
+
+// configFromServer merges configuration settings from h2 and h2.t1.HTTP2
+// (the net/http Transport).
+func configFromTransport(h2 *Transport) http2Config {
+ conf := http2Config{
+ MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize,
+ MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize,
+ MaxReadFrameSize: h2.MaxReadFrameSize,
+ SendPingTimeout: h2.ReadIdleTimeout,
+ PingTimeout: h2.PingTimeout,
+ WriteByteTimeout: h2.WriteByteTimeout,
+ }
+
+ // Unlike most config fields, where out-of-range values revert to the default,
+ // Transport.MaxReadFrameSize clips.
+ if conf.MaxReadFrameSize < minMaxFrameSize {
+ conf.MaxReadFrameSize = minMaxFrameSize
+ } else if conf.MaxReadFrameSize > maxFrameSize {
+ conf.MaxReadFrameSize = maxFrameSize
+ }
+
+ if h2.t1 != nil {
+ fillNetHTTPTransportConfig(&conf, h2.t1)
+ }
+ setConfigDefaults(&conf, false)
+ return conf
+}
+
+func setDefault[T ~int | ~int32 | ~uint32 | ~int64](v *T, minval, maxval, defval T) {
+ if *v < minval || *v > maxval {
+ *v = defval
+ }
+}
+
+func setConfigDefaults(conf *http2Config, server bool) {
+ setDefault(&conf.MaxConcurrentStreams, 1, math.MaxUint32, defaultMaxStreams)
+ setDefault(&conf.MaxEncoderHeaderTableSize, 1, math.MaxUint32, initialHeaderTableSize)
+ setDefault(&conf.MaxDecoderHeaderTableSize, 1, math.MaxUint32, initialHeaderTableSize)
+ if server {
+ setDefault(&conf.MaxUploadBufferPerConnection, initialWindowSize, math.MaxInt32, 1<<20)
+ } else {
+ setDefault(&conf.MaxUploadBufferPerConnection, initialWindowSize, math.MaxInt32, transportDefaultConnFlow)
+ }
+ if server {
+ setDefault(&conf.MaxUploadBufferPerStream, 1, math.MaxInt32, 1<<20)
+ } else {
+ setDefault(&conf.MaxUploadBufferPerStream, 1, math.MaxInt32, transportDefaultStreamFlow)
+ }
+ setDefault(&conf.MaxReadFrameSize, minMaxFrameSize, maxFrameSize, defaultMaxReadFrameSize)
+ setDefault(&conf.PingTimeout, 1, math.MaxInt64, 15*time.Second)
+}
+
+// adjustHTTP1MaxHeaderSize converts a limit in bytes on the size of an HTTP/1 header
+// to an HTTP/2 MAX_HEADER_LIST_SIZE value.
+func adjustHTTP1MaxHeaderSize(n int64) int64 {
+ // http2's count is in a slightly different unit and includes 32 bytes per pair.
+ // So, take the net/http.Server value and pad it up a bit, assuming 10 headers.
+ const perFieldOverhead = 32 // per http2 spec
+ const typicalHeaders = 10 // conservative
+ return n + typicalHeaders*perFieldOverhead
+}
diff --git a/vendor/golang.org/x/net/http2/config_go124.go b/vendor/golang.org/x/net/http2/config_go124.go
new file mode 100644
index 0000000000000..e3784123c81a6
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/config_go124.go
@@ -0,0 +1,61 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.24
+
+package http2
+
+import "net/http"
+
+// fillNetHTTPServerConfig sets fields in conf from srv.HTTP2.
+func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {
+ fillNetHTTPConfig(conf, srv.HTTP2)
+}
+
+// fillNetHTTPServerConfig sets fields in conf from tr.HTTP2.
+func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {
+ fillNetHTTPConfig(conf, tr.HTTP2)
+}
+
+func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) {
+ if h2 == nil {
+ return
+ }
+ if h2.MaxConcurrentStreams != 0 {
+ conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
+ }
+ if h2.MaxEncoderHeaderTableSize != 0 {
+ conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize)
+ }
+ if h2.MaxDecoderHeaderTableSize != 0 {
+ conf.MaxDecoderHeaderTableSize = uint32(h2.MaxDecoderHeaderTableSize)
+ }
+ if h2.MaxConcurrentStreams != 0 {
+ conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
+ }
+ if h2.MaxReadFrameSize != 0 {
+ conf.MaxReadFrameSize = uint32(h2.MaxReadFrameSize)
+ }
+ if h2.MaxReceiveBufferPerConnection != 0 {
+ conf.MaxUploadBufferPerConnection = int32(h2.MaxReceiveBufferPerConnection)
+ }
+ if h2.MaxReceiveBufferPerStream != 0 {
+ conf.MaxUploadBufferPerStream = int32(h2.MaxReceiveBufferPerStream)
+ }
+ if h2.SendPingTimeout != 0 {
+ conf.SendPingTimeout = h2.SendPingTimeout
+ }
+ if h2.PingTimeout != 0 {
+ conf.PingTimeout = h2.PingTimeout
+ }
+ if h2.WriteByteTimeout != 0 {
+ conf.WriteByteTimeout = h2.WriteByteTimeout
+ }
+ if h2.PermitProhibitedCipherSuites {
+ conf.PermitProhibitedCipherSuites = true
+ }
+ if h2.CountError != nil {
+ conf.CountError = h2.CountError
+ }
+}
diff --git a/vendor/golang.org/x/net/http2/config_pre_go124.go b/vendor/golang.org/x/net/http2/config_pre_go124.go
new file mode 100644
index 0000000000000..060fd6c64c6ca
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/config_pre_go124.go
@@ -0,0 +1,16 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.24
+
+package http2
+
+import "net/http"
+
+// Pre-Go 1.24 fallback.
+// The Server.HTTP2 and Transport.HTTP2 config fields were added in Go 1.24.
+
+func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {}
+
+func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {}
diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go
index 003e649f30c6c..7688c356b7cba 100644
--- a/vendor/golang.org/x/net/http2/http2.go
+++ b/vendor/golang.org/x/net/http2/http2.go
@@ -19,8 +19,9 @@ import (
"bufio"
"context"
"crypto/tls"
+ "errors"
"fmt"
- "io"
+ "net"
"net/http"
"os"
"sort"
@@ -237,13 +238,19 @@ func (cw closeWaiter) Wait() {
// Its buffered writer is lazily allocated as needed, to minimize
// idle memory usage with many connections.
type bufferedWriter struct {
- _ incomparable
- w io.Writer // immutable
- bw *bufio.Writer // non-nil when data is buffered
+ _ incomparable
+ group synctestGroupInterface // immutable
+ conn net.Conn // immutable
+ bw *bufio.Writer // non-nil when data is buffered
+ byteTimeout time.Duration // immutable, WriteByteTimeout
}
-func newBufferedWriter(w io.Writer) *bufferedWriter {
- return &bufferedWriter{w: w}
+func newBufferedWriter(group synctestGroupInterface, conn net.Conn, timeout time.Duration) *bufferedWriter {
+ return &bufferedWriter{
+ group: group,
+ conn: conn,
+ byteTimeout: timeout,
+ }
}
// bufWriterPoolBufferSize is the size of bufio.Writer's
@@ -270,7 +277,7 @@ func (w *bufferedWriter) Available() int {
func (w *bufferedWriter) Write(p []byte) (n int, err error) {
if w.bw == nil {
bw := bufWriterPool.Get().(*bufio.Writer)
- bw.Reset(w.w)
+ bw.Reset((*bufferedWriterTimeoutWriter)(w))
w.bw = bw
}
return w.bw.Write(p)
@@ -288,6 +295,38 @@ func (w *bufferedWriter) Flush() error {
return err
}
+type bufferedWriterTimeoutWriter bufferedWriter
+
+func (w *bufferedWriterTimeoutWriter) Write(p []byte) (n int, err error) {
+ return writeWithByteTimeout(w.group, w.conn, w.byteTimeout, p)
+}
+
+// writeWithByteTimeout writes to conn.
+// If more than timeout passes without any bytes being written to the connection,
+// the write fails.
+func writeWithByteTimeout(group synctestGroupInterface, conn net.Conn, timeout time.Duration, p []byte) (n int, err error) {
+ if timeout <= 0 {
+ return conn.Write(p)
+ }
+ for {
+ var now time.Time
+ if group == nil {
+ now = time.Now()
+ } else {
+ now = group.Now()
+ }
+ conn.SetWriteDeadline(now.Add(timeout))
+ nn, err := conn.Write(p[n:])
+ n += nn
+ if n == len(p) || nn == 0 || !errors.Is(err, os.ErrDeadlineExceeded) {
+ // Either we finished the write, made no progress, or hit the deadline.
+ // Whichever it is, we're done now.
+ conn.SetWriteDeadline(time.Time{})
+ return n, err
+ }
+ }
+}
+
func mustUint31(v int32) uint32 {
if v < 0 || v > 2147483647 {
panic("out of range")
diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go
index 6c349f3ec6473..617b4a47623b2 100644
--- a/vendor/golang.org/x/net/http2/server.go
+++ b/vendor/golang.org/x/net/http2/server.go
@@ -29,6 +29,7 @@ import (
"bufio"
"bytes"
"context"
+ "crypto/rand"
"crypto/tls"
"errors"
"fmt"
@@ -52,10 +53,14 @@ import (
)
const (
- prefaceTimeout = 10 * time.Second
- firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway
- handlerChunkWriteSize = 4 << 10
- defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to?
+ prefaceTimeout = 10 * time.Second
+ firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway
+ handlerChunkWriteSize = 4 << 10
+ defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to?
+
+ // maxQueuedControlFrames is the maximum number of control frames like
+ // SETTINGS, PING and RST_STREAM that will be queued for writing before
+ // the connection is closed to prevent memory exhaustion attacks.
maxQueuedControlFrames = 10000
)
@@ -127,6 +132,22 @@ type Server struct {
// If zero or negative, there is no timeout.
IdleTimeout time.Duration
+ // ReadIdleTimeout is the timeout after which a health check using a ping
+ // frame will be carried out if no frame is received on the connection.
+ // If zero, no health check is performed.
+ ReadIdleTimeout time.Duration
+
+ // PingTimeout is the timeout after which the connection will be closed
+ // if a response to a ping is not received.
+ // If zero, a default of 15 seconds is used.
+ PingTimeout time.Duration
+
+ // WriteByteTimeout is the timeout after which a connection will be
+ // closed if no data can be written to it. The timeout begins when data is
+ // available to write, and is extended whenever any bytes are written.
+ // If zero or negative, there is no timeout.
+ WriteByteTimeout time.Duration
+
// MaxUploadBufferPerConnection is the size of the initial flow
// control window for each connections. The HTTP/2 spec does not
// allow this to be smaller than 65535 or larger than 2^32-1.
@@ -189,57 +210,6 @@ func (s *Server) afterFunc(d time.Duration, f func()) timer {
return timeTimer{time.AfterFunc(d, f)}
}
-func (s *Server) initialConnRecvWindowSize() int32 {
- if s.MaxUploadBufferPerConnection >= initialWindowSize {
- return s.MaxUploadBufferPerConnection
- }
- return 1 << 20
-}
-
-func (s *Server) initialStreamRecvWindowSize() int32 {
- if s.MaxUploadBufferPerStream > 0 {
- return s.MaxUploadBufferPerStream
- }
- return 1 << 20
-}
-
-func (s *Server) maxReadFrameSize() uint32 {
- if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize {
- return v
- }
- return defaultMaxReadFrameSize
-}
-
-func (s *Server) maxConcurrentStreams() uint32 {
- if v := s.MaxConcurrentStreams; v > 0 {
- return v
- }
- return defaultMaxStreams
-}
-
-func (s *Server) maxDecoderHeaderTableSize() uint32 {
- if v := s.MaxDecoderHeaderTableSize; v > 0 {
- return v
- }
- return initialHeaderTableSize
-}
-
-func (s *Server) maxEncoderHeaderTableSize() uint32 {
- if v := s.MaxEncoderHeaderTableSize; v > 0 {
- return v
- }
- return initialHeaderTableSize
-}
-
-// maxQueuedControlFrames is the maximum number of control frames like
-// SETTINGS, PING and RST_STREAM that will be queued for writing before
-// the connection is closed to prevent memory exhaustion attacks.
-func (s *Server) maxQueuedControlFrames() int {
- // TODO: if anybody asks, add a Server field, and remember to define the
- // behavior of negative values.
- return maxQueuedControlFrames
-}
-
type serverInternalState struct {
mu sync.Mutex
activeConns map[*serverConn]struct{}
@@ -440,13 +410,15 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon
baseCtx, cancel := serverConnBaseContext(c, opts)
defer cancel()
+ http1srv := opts.baseConfig()
+ conf := configFromServer(http1srv, s)
sc := &serverConn{
srv: s,
- hs: opts.baseConfig(),
+ hs: http1srv,
conn: c,
baseCtx: baseCtx,
remoteAddrStr: c.RemoteAddr().String(),
- bw: newBufferedWriter(c),
+ bw: newBufferedWriter(s.group, c, conf.WriteByteTimeout),
handler: opts.handler(),
streams: make(map[uint32]*stream),
readFrameCh: make(chan readFrameResult),
@@ -456,9 +428,12 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon
bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way
doneServing: make(chan struct{}),
clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value"
- advMaxStreams: s.maxConcurrentStreams(),
+ advMaxStreams: conf.MaxConcurrentStreams,
initialStreamSendWindowSize: initialWindowSize,
+ initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream,
maxFrameSize: initialMaxFrameSize,
+ pingTimeout: conf.PingTimeout,
+ countErrorFunc: conf.CountError,
serveG: newGoroutineLock(),
pushEnabled: true,
sawClientPreface: opts.SawClientPreface,
@@ -491,15 +466,15 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon
sc.flow.add(initialWindowSize)
sc.inflow.init(initialWindowSize)
sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)
- sc.hpackEncoder.SetMaxDynamicTableSizeLimit(s.maxEncoderHeaderTableSize())
+ sc.hpackEncoder.SetMaxDynamicTableSizeLimit(conf.MaxEncoderHeaderTableSize)
fr := NewFramer(sc.bw, c)
- if s.CountError != nil {
- fr.countError = s.CountError
+ if conf.CountError != nil {
+ fr.countError = conf.CountError
}
- fr.ReadMetaHeaders = hpack.NewDecoder(s.maxDecoderHeaderTableSize(), nil)
+ fr.ReadMetaHeaders = hpack.NewDecoder(conf.MaxDecoderHeaderTableSize, nil)
fr.MaxHeaderListSize = sc.maxHeaderListSize()
- fr.SetMaxReadFrameSize(s.maxReadFrameSize())
+ fr.SetMaxReadFrameSize(conf.MaxReadFrameSize)
sc.framer = fr
if tc, ok := c.(connectionStater); ok {
@@ -532,7 +507,7 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon
// So for now, do nothing here again.
}
- if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) {
+ if !conf.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) {
// "Endpoints MAY choose to generate a connection error
// (Section 5.4.1) of type INADEQUATE_SECURITY if one of
// the prohibited cipher suites are negotiated."
@@ -569,7 +544,7 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon
opts.UpgradeRequest = nil
}
- sc.serve()
+ sc.serve(conf)
}
func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx context.Context, cancel func()) {
@@ -609,6 +584,7 @@ type serverConn struct {
tlsState *tls.ConnectionState // shared by all handlers, like net/http
remoteAddrStr string
writeSched WriteScheduler
+ countErrorFunc func(errType string)
// Everything following is owned by the serve loop; use serveG.check():
serveG goroutineLock // used to verify funcs are on serve()
@@ -628,6 +604,7 @@ type serverConn struct {
streams map[uint32]*stream
unstartedHandlers []unstartedHandler
initialStreamSendWindowSize int32
+ initialStreamRecvWindowSize int32
maxFrameSize int32
peerMaxHeaderListSize uint32 // zero means unknown (default)
canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case
@@ -638,9 +615,14 @@ type serverConn struct {
inGoAway bool // we've started to or sent GOAWAY
inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop
needToSendGoAway bool // we need to schedule a GOAWAY frame write
+ pingSent bool
+ sentPingData [8]byte
goAwayCode ErrCode
shutdownTimer timer // nil until used
idleTimer timer // nil if unused
+ readIdleTimeout time.Duration
+ pingTimeout time.Duration
+ readIdleTimer timer // nil if unused
// Owned by the writeFrameAsync goroutine:
headerWriteBuf bytes.Buffer
@@ -655,11 +637,7 @@ func (sc *serverConn) maxHeaderListSize() uint32 {
if n <= 0 {
n = http.DefaultMaxHeaderBytes
}
- // http2's count is in a slightly different unit and includes 32 bytes per pair.
- // So, take the net/http.Server value and pad it up a bit, assuming 10 headers.
- const perFieldOverhead = 32 // per http2 spec
- const typicalHeaders = 10 // conservative
- return uint32(n + typicalHeaders*perFieldOverhead)
+ return uint32(adjustHTTP1MaxHeaderSize(int64(n)))
}
func (sc *serverConn) curOpenStreams() uint32 {
@@ -923,7 +901,7 @@ func (sc *serverConn) notePanic() {
}
}
-func (sc *serverConn) serve() {
+func (sc *serverConn) serve(conf http2Config) {
sc.serveG.check()
defer sc.notePanic()
defer sc.conn.Close()
@@ -937,18 +915,18 @@ func (sc *serverConn) serve() {
sc.writeFrame(FrameWriteRequest{
write: writeSettings{
- {SettingMaxFrameSize, sc.srv.maxReadFrameSize()},
+ {SettingMaxFrameSize, conf.MaxReadFrameSize},
{SettingMaxConcurrentStreams, sc.advMaxStreams},
{SettingMaxHeaderListSize, sc.maxHeaderListSize()},
- {SettingHeaderTableSize, sc.srv.maxDecoderHeaderTableSize()},
- {SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())},
+ {SettingHeaderTableSize, conf.MaxDecoderHeaderTableSize},
+ {SettingInitialWindowSize, uint32(sc.initialStreamRecvWindowSize)},
},
})
sc.unackedSettings++
// Each connection starts with initialWindowSize inflow tokens.
// If a higher value is configured, we add more tokens.
- if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 {
+ if diff := conf.MaxUploadBufferPerConnection - initialWindowSize; diff > 0 {
sc.sendWindowUpdate(nil, int(diff))
}
@@ -968,11 +946,18 @@ func (sc *serverConn) serve() {
defer sc.idleTimer.Stop()
}
+ if conf.SendPingTimeout > 0 {
+ sc.readIdleTimeout = conf.SendPingTimeout
+ sc.readIdleTimer = sc.srv.afterFunc(conf.SendPingTimeout, sc.onReadIdleTimer)
+ defer sc.readIdleTimer.Stop()
+ }
+
go sc.readFrames() // closed by defer sc.conn.Close above
settingsTimer := sc.srv.afterFunc(firstSettingsTimeout, sc.onSettingsTimer)
defer settingsTimer.Stop()
+ lastFrameTime := sc.srv.now()
loopNum := 0
for {
loopNum++
@@ -986,6 +971,7 @@ func (sc *serverConn) serve() {
case res := <-sc.wroteFrameCh:
sc.wroteFrame(res)
case res := <-sc.readFrameCh:
+ lastFrameTime = sc.srv.now()
// Process any written frames before reading new frames from the client since a
// written frame could have triggered a new stream to be started.
if sc.writingFrameAsync {
@@ -1017,6 +1003,8 @@ func (sc *serverConn) serve() {
case idleTimerMsg:
sc.vlogf("connection is idle")
sc.goAway(ErrCodeNo)
+ case readIdleTimerMsg:
+ sc.handlePingTimer(lastFrameTime)
case shutdownTimerMsg:
sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr())
return
@@ -1039,7 +1027,7 @@ func (sc *serverConn) serve() {
// If the peer is causing us to generate a lot of control frames,
// but not reading them from us, assume they are trying to make us
// run out of memory.
- if sc.queuedControlFrames > sc.srv.maxQueuedControlFrames() {
+ if sc.queuedControlFrames > maxQueuedControlFrames {
sc.vlogf("http2: too many control frames in send queue, closing connection")
return
}
@@ -1055,12 +1043,39 @@ func (sc *serverConn) serve() {
}
}
+func (sc *serverConn) handlePingTimer(lastFrameReadTime time.Time) {
+ if sc.pingSent {
+ sc.vlogf("timeout waiting for PING response")
+ sc.conn.Close()
+ return
+ }
+
+ pingAt := lastFrameReadTime.Add(sc.readIdleTimeout)
+ now := sc.srv.now()
+ if pingAt.After(now) {
+ // We received frames since arming the ping timer.
+ // Reset it for the next possible timeout.
+ sc.readIdleTimer.Reset(pingAt.Sub(now))
+ return
+ }
+
+ sc.pingSent = true
+ // Ignore crypto/rand.Read errors: It generally can't fail, and worse case if it does
+ // is we send a PING frame containing 0s.
+ _, _ = rand.Read(sc.sentPingData[:])
+ sc.writeFrame(FrameWriteRequest{
+ write: &writePing{data: sc.sentPingData},
+ })
+ sc.readIdleTimer.Reset(sc.pingTimeout)
+}
+
type serverMessage int
// Message values sent to serveMsgCh.
var (
settingsTimerMsg = new(serverMessage)
idleTimerMsg = new(serverMessage)
+ readIdleTimerMsg = new(serverMessage)
shutdownTimerMsg = new(serverMessage)
gracefulShutdownMsg = new(serverMessage)
handlerDoneMsg = new(serverMessage)
@@ -1068,6 +1083,7 @@ var (
func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) }
func (sc *serverConn) onIdleTimer() { sc.sendServeMsg(idleTimerMsg) }
+func (sc *serverConn) onReadIdleTimer() { sc.sendServeMsg(readIdleTimerMsg) }
func (sc *serverConn) onShutdownTimer() { sc.sendServeMsg(shutdownTimerMsg) }
func (sc *serverConn) sendServeMsg(msg interface{}) {
@@ -1320,6 +1336,10 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) {
sc.writingFrame = false
sc.writingFrameAsync = false
+ if res.err != nil {
+ sc.conn.Close()
+ }
+
wr := res.wr
if writeEndsStream(wr.write) {
@@ -1594,6 +1614,11 @@ func (sc *serverConn) processFrame(f Frame) error {
func (sc *serverConn) processPing(f *PingFrame) error {
sc.serveG.check()
if f.IsAck() {
+ if sc.pingSent && sc.sentPingData == f.Data {
+ // This is a response to a PING we sent.
+ sc.pingSent = false
+ sc.readIdleTimer.Reset(sc.readIdleTimeout)
+ }
// 6.7 PING: " An endpoint MUST NOT respond to PING frames
// containing this flag."
return nil
@@ -2160,7 +2185,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream
st.cw.Init()
st.flow.conn = &sc.flow // link to conn-level counter
st.flow.add(sc.initialStreamSendWindowSize)
- st.inflow.init(sc.srv.initialStreamRecvWindowSize())
+ st.inflow.init(sc.initialStreamRecvWindowSize)
if sc.hs.WriteTimeout > 0 {
st.writeDeadline = sc.srv.afterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
}
@@ -3301,7 +3326,7 @@ func (sc *serverConn) countError(name string, err error) error {
if sc == nil || sc.srv == nil {
return err
}
- f := sc.srv.CountError
+ f := sc.countErrorFunc
if f == nil {
return err
}
diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go
index 61f511f97aa44..0c5f64aa8bef7 100644
--- a/vendor/golang.org/x/net/http2/transport.go
+++ b/vendor/golang.org/x/net/http2/transport.go
@@ -25,7 +25,6 @@ import (
"net/http"
"net/http/httptrace"
"net/textproto"
- "os"
"sort"
"strconv"
"strings"
@@ -227,40 +226,26 @@ func (t *Transport) contextWithTimeout(ctx context.Context, d time.Duration) (co
}
func (t *Transport) maxHeaderListSize() uint32 {
- if t.MaxHeaderListSize == 0 {
+ n := int64(t.MaxHeaderListSize)
+ if t.t1 != nil && t.t1.MaxResponseHeaderBytes != 0 {
+ n = t.t1.MaxResponseHeaderBytes
+ if n > 0 {
+ n = adjustHTTP1MaxHeaderSize(n)
+ }
+ }
+ if n <= 0 {
return 10 << 20
}
- if t.MaxHeaderListSize == 0xffffffff {
+ if n >= 0xffffffff {
return 0
}
- return t.MaxHeaderListSize
-}
-
-func (t *Transport) maxFrameReadSize() uint32 {
- if t.MaxReadFrameSize == 0 {
- return 0 // use the default provided by the peer
- }
- if t.MaxReadFrameSize < minMaxFrameSize {
- return minMaxFrameSize
- }
- if t.MaxReadFrameSize > maxFrameSize {
- return maxFrameSize
- }
- return t.MaxReadFrameSize
+ return uint32(n)
}
func (t *Transport) disableCompression() bool {
return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression)
}
-func (t *Transport) pingTimeout() time.Duration {
- if t.PingTimeout == 0 {
- return 15 * time.Second
- }
- return t.PingTimeout
-
-}
-
// ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2.
// It returns an error if t1 has already been HTTP/2-enabled.
//
@@ -370,11 +355,14 @@ type ClientConn struct {
lastActive time.Time
lastIdle time.Time // time last idle
// Settings from peer: (also guarded by wmu)
- maxFrameSize uint32
- maxConcurrentStreams uint32
- peerMaxHeaderListSize uint64
- peerMaxHeaderTableSize uint32
- initialWindowSize uint32
+ maxFrameSize uint32
+ maxConcurrentStreams uint32
+ peerMaxHeaderListSize uint64
+ peerMaxHeaderTableSize uint32
+ initialWindowSize uint32
+ initialStreamRecvWindowSize int32
+ readIdleTimeout time.Duration
+ pingTimeout time.Duration
// reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests.
// Write to reqHeaderMu to lock it, read from it to unlock.
@@ -499,6 +487,7 @@ func (cs *clientStream) closeReqBodyLocked() {
}
type stickyErrWriter struct {
+ group synctestGroupInterface
conn net.Conn
timeout time.Duration
err *error
@@ -508,22 +497,9 @@ func (sew stickyErrWriter) Write(p []byte) (n int, err error) {
if *sew.err != nil {
return 0, *sew.err
}
- for {
- if sew.timeout != 0 {
- sew.conn.SetWriteDeadline(time.Now().Add(sew.timeout))
- }
- nn, err := sew.conn.Write(p[n:])
- n += nn
- if n < len(p) && nn > 0 && errors.Is(err, os.ErrDeadlineExceeded) {
- // Keep extending the deadline so long as we're making progress.
- continue
- }
- if sew.timeout != 0 {
- sew.conn.SetWriteDeadline(time.Time{})
- }
- *sew.err = err
- return n, err
- }
+ n, err = writeWithByteTimeout(sew.group, sew.conn, sew.timeout, p)
+ *sew.err = err
+ return n, err
}
// noCachedConnError is the concrete type of ErrNoCachedConn, which
@@ -758,44 +734,36 @@ func (t *Transport) expectContinueTimeout() time.Duration {
return t.t1.ExpectContinueTimeout
}
-func (t *Transport) maxDecoderHeaderTableSize() uint32 {
- if v := t.MaxDecoderHeaderTableSize; v > 0 {
- return v
- }
- return initialHeaderTableSize
-}
-
-func (t *Transport) maxEncoderHeaderTableSize() uint32 {
- if v := t.MaxEncoderHeaderTableSize; v > 0 {
- return v
- }
- return initialHeaderTableSize
-}
-
func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) {
return t.newClientConn(c, t.disableKeepAlives())
}
func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) {
+ conf := configFromTransport(t)
cc := &ClientConn{
- t: t,
- tconn: c,
- readerDone: make(chan struct{}),
- nextStreamID: 1,
- maxFrameSize: 16 << 10, // spec default
- initialWindowSize: 65535, // spec default
- maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings.
- peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead.
- streams: make(map[uint32]*clientStream),
- singleUse: singleUse,
- wantSettingsAck: true,
- pings: make(map[[8]byte]chan struct{}),
- reqHeaderMu: make(chan struct{}, 1),
- }
+ t: t,
+ tconn: c,
+ readerDone: make(chan struct{}),
+ nextStreamID: 1,
+ maxFrameSize: 16 << 10, // spec default
+ initialWindowSize: 65535, // spec default
+ initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream,
+ maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings.
+ peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead.
+ streams: make(map[uint32]*clientStream),
+ singleUse: singleUse,
+ wantSettingsAck: true,
+ readIdleTimeout: conf.SendPingTimeout,
+ pingTimeout: conf.PingTimeout,
+ pings: make(map[[8]byte]chan struct{}),
+ reqHeaderMu: make(chan struct{}, 1),
+ }
+ var group synctestGroupInterface
if t.transportTestHooks != nil {
t.markNewGoroutine()
t.transportTestHooks.newclientconn(cc)
c = cc.tconn
+ group = t.group
}
if VerboseLogs {
t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr())
@@ -807,24 +775,23 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
// TODO: adjust this writer size to account for frame size +
// MTU + crypto/tls record padding.
cc.bw = bufio.NewWriter(stickyErrWriter{
+ group: group,
conn: c,
- timeout: t.WriteByteTimeout,
+ timeout: conf.WriteByteTimeout,
err: &cc.werr,
})
cc.br = bufio.NewReader(c)
cc.fr = NewFramer(cc.bw, cc.br)
- if t.maxFrameReadSize() != 0 {
- cc.fr.SetMaxReadFrameSize(t.maxFrameReadSize())
- }
+ cc.fr.SetMaxReadFrameSize(conf.MaxReadFrameSize)
if t.CountError != nil {
cc.fr.countError = t.CountError
}
- maxHeaderTableSize := t.maxDecoderHeaderTableSize()
+ maxHeaderTableSize := conf.MaxDecoderHeaderTableSize
cc.fr.ReadMetaHeaders = hpack.NewDecoder(maxHeaderTableSize, nil)
cc.fr.MaxHeaderListSize = t.maxHeaderListSize()
cc.henc = hpack.NewEncoder(&cc.hbuf)
- cc.henc.SetMaxDynamicTableSizeLimit(t.maxEncoderHeaderTableSize())
+ cc.henc.SetMaxDynamicTableSizeLimit(conf.MaxEncoderHeaderTableSize)
cc.peerMaxHeaderTableSize = initialHeaderTableSize
if cs, ok := c.(connectionStater); ok {
@@ -834,11 +801,9 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
initialSettings := []Setting{
{ID: SettingEnablePush, Val: 0},
- {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow},
- }
- if max := t.maxFrameReadSize(); max != 0 {
- initialSettings = append(initialSettings, Setting{ID: SettingMaxFrameSize, Val: max})
+ {ID: SettingInitialWindowSize, Val: uint32(cc.initialStreamRecvWindowSize)},
}
+ initialSettings = append(initialSettings, Setting{ID: SettingMaxFrameSize, Val: conf.MaxReadFrameSize})
if max := t.maxHeaderListSize(); max != 0 {
initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max})
}
@@ -848,8 +813,8 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
cc.bw.Write(clientPreface)
cc.fr.WriteSettings(initialSettings...)
- cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow)
- cc.inflow.init(transportDefaultConnFlow + initialWindowSize)
+ cc.fr.WriteWindowUpdate(0, uint32(conf.MaxUploadBufferPerConnection))
+ cc.inflow.init(conf.MaxUploadBufferPerConnection + initialWindowSize)
cc.bw.Flush()
if cc.werr != nil {
cc.Close()
@@ -867,7 +832,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
}
func (cc *ClientConn) healthCheck() {
- pingTimeout := cc.t.pingTimeout()
+ pingTimeout := cc.pingTimeout
// We don't need to periodically ping in the health check, because the readLoop of ClientConn will
// trigger the healthCheck again if there is no frame received.
ctx, cancel := cc.t.contextWithTimeout(context.Background(), pingTimeout)
@@ -2199,7 +2164,7 @@ type resAndError struct {
func (cc *ClientConn) addStreamLocked(cs *clientStream) {
cs.flow.add(int32(cc.initialWindowSize))
cs.flow.setConnFlow(&cc.flow)
- cs.inflow.init(transportDefaultStreamFlow)
+ cs.inflow.init(cc.initialStreamRecvWindowSize)
cs.ID = cc.nextStreamID
cc.nextStreamID += 2
cc.streams[cs.ID] = cs
@@ -2345,7 +2310,7 @@ func (cc *ClientConn) countReadFrameError(err error) {
func (rl *clientConnReadLoop) run() error {
cc := rl.cc
gotSettings := false
- readIdleTimeout := cc.t.ReadIdleTimeout
+ readIdleTimeout := cc.readIdleTimeout
var t timer
if readIdleTimeout != 0 {
t = cc.t.afterFunc(readIdleTimeout, cc.healthCheck)
diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go
index 33f61398a1236..6ff6bee7e9549 100644
--- a/vendor/golang.org/x/net/http2/write.go
+++ b/vendor/golang.org/x/net/http2/write.go
@@ -131,6 +131,16 @@ func (se StreamError) writeFrame(ctx writeContext) error {
func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max }
+type writePing struct {
+ data [8]byte
+}
+
+func (w writePing) writeFrame(ctx writeContext) error {
+ return ctx.Framer().WritePing(false, w.data)
+}
+
+func (w writePing) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.data) <= max }
+
type writePingAck struct{ pf *PingFrame }
func (w writePingAck) writeFrame(ctx writeContext) error {
diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go
index 5bbb332174885..109997d77cea5 100644
--- a/vendor/golang.org/x/oauth2/token.go
+++ b/vendor/golang.org/x/oauth2/token.go
@@ -49,6 +49,13 @@ type Token struct {
// mechanisms for that TokenSource will not be used.
Expiry time.Time `json:"expiry,omitempty"`
+ // ExpiresIn is the OAuth2 wire format "expires_in" field,
+ // which specifies how many seconds later the token expires,
+ // relative to an unknown time base approximately around "now".
+ // It is the application's responsibility to populate
+ // `Expiry` from `ExpiresIn` when required.
+ ExpiresIn int64 `json:"expires_in,omitempty"`
+
// raw optionally contains extra metadata from the server
// when updating a token.
raw interface{}
diff --git a/vendor/golang.org/x/sys/unix/README.md b/vendor/golang.org/x/sys/unix/README.md
index 7d3c060e12213..6e08a76a716e9 100644
--- a/vendor/golang.org/x/sys/unix/README.md
+++ b/vendor/golang.org/x/sys/unix/README.md
@@ -156,7 +156,7 @@ from the generated architecture-specific files listed below, and merge these
into a common file for each OS.
The merge is performed in the following steps:
-1. Construct the set of common code that is idential in all architecture-specific files.
+1. Construct the set of common code that is identical in all architecture-specific files.
2. Write this common code to the merged file.
3. Remove the common code from all architecture-specific files.
diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh
index e14b766a32c5f..ac54ecaba0a4b 100644
--- a/vendor/golang.org/x/sys/unix/mkerrors.sh
+++ b/vendor/golang.org/x/sys/unix/mkerrors.sh
@@ -656,7 +656,7 @@ errors=$(
signals=$(
echo '#include ' | $CC -x c - -E -dM $ccflags |
awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print $2 }' |
- grep -v 'SIGSTKSIZE\|SIGSTKSZ\|SIGRT\|SIGMAX64' |
+ grep -E -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT|SIGMAX64)' |
sort
)
@@ -666,7 +666,7 @@ echo '#include ' | $CC -x c - -E -dM $ccflags |
sort >_error.grep
echo '#include ' | $CC -x c - -E -dM $ccflags |
awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print "^\t" $2 "[ \t]*=" }' |
- grep -v 'SIGSTKSIZE\|SIGSTKSZ\|SIGRT\|SIGMAX64' |
+ grep -E -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT|SIGMAX64)' |
sort >_signal.grep
echo '// mkerrors.sh' "$@"
diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go
index 67ce6cef2d5c4..6f15ba1eaff65 100644
--- a/vendor/golang.org/x/sys/unix/syscall_aix.go
+++ b/vendor/golang.org/x/sys/unix/syscall_aix.go
@@ -360,7 +360,7 @@ func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int,
var status _C_int
var r Pid_t
err = ERESTART
- // AIX wait4 may return with ERESTART errno, while the processus is still
+ // AIX wait4 may return with ERESTART errno, while the process is still
// active.
for err == ERESTART {
r, err = wait4(Pid_t(pid), &status, options, rusage)
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go
index 3f1d3d4cb2560..f08abd434ff47 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux.go
@@ -1295,6 +1295,48 @@ func GetsockoptTCPInfo(fd, level, opt int) (*TCPInfo, error) {
return &value, err
}
+// GetsockoptTCPCCVegasInfo returns algorithm specific congestion control information for a socket using the "vegas"
+// algorithm.
+//
+// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option:
+//
+// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION)
+func GetsockoptTCPCCVegasInfo(fd, level, opt int) (*TCPVegasInfo, error) {
+ var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment
+ vallen := _Socklen(SizeofTCPCCInfo)
+ err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen)
+ out := (*TCPVegasInfo)(unsafe.Pointer(&value[0]))
+ return out, err
+}
+
+// GetsockoptTCPCCDCTCPInfo returns algorithm specific congestion control information for a socket using the "dctp"
+// algorithm.
+//
+// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option:
+//
+// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION)
+func GetsockoptTCPCCDCTCPInfo(fd, level, opt int) (*TCPDCTCPInfo, error) {
+ var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment
+ vallen := _Socklen(SizeofTCPCCInfo)
+ err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen)
+ out := (*TCPDCTCPInfo)(unsafe.Pointer(&value[0]))
+ return out, err
+}
+
+// GetsockoptTCPCCBBRInfo returns algorithm specific congestion control information for a socket using the "bbr"
+// algorithm.
+//
+// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option:
+//
+// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION)
+func GetsockoptTCPCCBBRInfo(fd, level, opt int) (*TCPBBRInfo, error) {
+ var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment
+ vallen := _Socklen(SizeofTCPCCInfo)
+ err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen)
+ out := (*TCPBBRInfo)(unsafe.Pointer(&value[0]))
+ return out, err
+}
+
// GetsockoptString returns the string value of the socket option opt for the
// socket associated with fd at the given socket level.
func GetsockoptString(fd, level, opt int) (string, error) {
@@ -1959,7 +2001,26 @@ func Getpgrp() (pid int) {
//sysnb Getpid() (pid int)
//sysnb Getppid() (ppid int)
//sys Getpriority(which int, who int) (prio int, err error)
-//sys Getrandom(buf []byte, flags int) (n int, err error)
+
+func Getrandom(buf []byte, flags int) (n int, err error) {
+ vdsoRet, supported := vgetrandom(buf, uint32(flags))
+ if supported {
+ if vdsoRet < 0 {
+ return 0, errnoErr(syscall.Errno(-vdsoRet))
+ }
+ return vdsoRet, nil
+ }
+ var p *byte
+ if len(buf) > 0 {
+ p = &buf[0]
+ }
+ r, _, e := Syscall(SYS_GETRANDOM, uintptr(unsafe.Pointer(p)), uintptr(len(buf)), uintptr(flags))
+ if e != 0 {
+ return 0, errnoErr(e)
+ }
+ return int(r), nil
+}
+
//sysnb Getrusage(who int, rusage *Rusage) (err error)
//sysnb Getsid(pid int) (sid int, err error)
//sysnb Gettid() (tid int)
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go
index cf2ee6c75ef3d..745e5c7e6c0d5 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go
@@ -182,3 +182,5 @@ func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error
}
return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags)
}
+
+const SYS_FSTATAT = SYS_NEWFSTATAT
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go
index 3d0e98451f8a7..dd2262a40799a 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go
@@ -214,3 +214,5 @@ func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error
}
return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags)
}
+
+const SYS_FSTATAT = SYS_NEWFSTATAT
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go
index 6f5a288944dfe..8cf3670bda630 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go
@@ -187,3 +187,5 @@ func RISCVHWProbe(pairs []RISCVHWProbePairs, set *CPUSet, flags uint) (err error
}
return riscvHWProbe(pairs, setSize, set, flags)
}
+
+const SYS_FSTATAT = SYS_NEWFSTATAT
diff --git a/vendor/golang.org/x/sys/unix/vgetrandom_linux.go b/vendor/golang.org/x/sys/unix/vgetrandom_linux.go
new file mode 100644
index 0000000000000..07ac8e09d1b70
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/vgetrandom_linux.go
@@ -0,0 +1,13 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build linux && go1.24
+
+package unix
+
+import _ "unsafe"
+
+//go:linkname vgetrandom runtime.vgetrandom
+//go:noescape
+func vgetrandom(p []byte, flags uint32) (ret int, supported bool)
diff --git a/vendor/golang.org/x/sys/unix/vgetrandom_unsupported.go b/vendor/golang.org/x/sys/unix/vgetrandom_unsupported.go
new file mode 100644
index 0000000000000..297e97bce92a6
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/vgetrandom_unsupported.go
@@ -0,0 +1,11 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !linux || !go1.24
+
+package unix
+
+func vgetrandom(p []byte, flags uint32) (ret int, supported bool) {
+ return -1, false
+}
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go
index 01a70b24638e6..de3b462489c0b 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go
@@ -495,6 +495,7 @@ const (
BPF_F_TEST_REG_INVARIANTS = 0x80
BPF_F_TEST_RND_HI32 = 0x4
BPF_F_TEST_RUN_ON_CPU = 0x1
+ BPF_F_TEST_SKB_CHECKSUM_COMPLETE = 0x4
BPF_F_TEST_STATE_FREQ = 0x8
BPF_F_TEST_XDP_LIVE_FRAMES = 0x2
BPF_F_XDP_DEV_BOUND_ONLY = 0x40
@@ -1922,6 +1923,7 @@ const (
MNT_EXPIRE = 0x4
MNT_FORCE = 0x1
MNT_ID_REQ_SIZE_VER0 = 0x18
+ MNT_ID_REQ_SIZE_VER1 = 0x20
MODULE_INIT_COMPRESSED_FILE = 0x4
MODULE_INIT_IGNORE_MODVERSIONS = 0x1
MODULE_INIT_IGNORE_VERMAGIC = 0x2
@@ -2187,7 +2189,7 @@ const (
NFT_REG_SIZE = 0x10
NFT_REJECT_ICMPX_MAX = 0x3
NFT_RT_MAX = 0x4
- NFT_SECMARK_CTX_MAXLEN = 0x100
+ NFT_SECMARK_CTX_MAXLEN = 0x1000
NFT_SET_MAXNAMELEN = 0x100
NFT_SOCKET_MAX = 0x3
NFT_TABLE_F_MASK = 0x7
@@ -2356,9 +2358,11 @@ const (
PERF_MEM_LVLNUM_IO = 0xa
PERF_MEM_LVLNUM_L1 = 0x1
PERF_MEM_LVLNUM_L2 = 0x2
+ PERF_MEM_LVLNUM_L2_MHB = 0x5
PERF_MEM_LVLNUM_L3 = 0x3
PERF_MEM_LVLNUM_L4 = 0x4
PERF_MEM_LVLNUM_LFB = 0xc
+ PERF_MEM_LVLNUM_MSC = 0x6
PERF_MEM_LVLNUM_NA = 0xf
PERF_MEM_LVLNUM_PMEM = 0xe
PERF_MEM_LVLNUM_RAM = 0xd
@@ -2431,6 +2435,7 @@ const (
PRIO_PGRP = 0x1
PRIO_PROCESS = 0x0
PRIO_USER = 0x2
+ PROCFS_IOCTL_MAGIC = 'f'
PROC_SUPER_MAGIC = 0x9fa0
PROT_EXEC = 0x4
PROT_GROWSDOWN = 0x1000000
@@ -2933,11 +2938,12 @@ const (
RUSAGE_SELF = 0x0
RUSAGE_THREAD = 0x1
RWF_APPEND = 0x10
+ RWF_ATOMIC = 0x40
RWF_DSYNC = 0x2
RWF_HIPRI = 0x1
RWF_NOAPPEND = 0x20
RWF_NOWAIT = 0x8
- RWF_SUPPORTED = 0x3f
+ RWF_SUPPORTED = 0x7f
RWF_SYNC = 0x4
RWF_WRITE_LIFE_NOT_SET = 0x0
SCHED_BATCH = 0x3
@@ -3210,6 +3216,7 @@ const (
STATX_ATTR_MOUNT_ROOT = 0x2000
STATX_ATTR_NODUMP = 0x40
STATX_ATTR_VERITY = 0x100000
+ STATX_ATTR_WRITE_ATOMIC = 0x400000
STATX_BASIC_STATS = 0x7ff
STATX_BLOCKS = 0x400
STATX_BTIME = 0x800
@@ -3226,6 +3233,7 @@ const (
STATX_SUBVOL = 0x8000
STATX_TYPE = 0x1
STATX_UID = 0x8
+ STATX_WRITE_ATOMIC = 0x10000
STATX__RESERVED = 0x80000000
SYNC_FILE_RANGE_WAIT_AFTER = 0x4
SYNC_FILE_RANGE_WAIT_BEFORE = 0x1
@@ -3624,6 +3632,7 @@ const (
XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000
XDP_UMEM_PGOFF_FILL_RING = 0x100000000
XDP_UMEM_REG = 0x4
+ XDP_UMEM_TX_METADATA_LEN = 0x4
XDP_UMEM_TX_SW_CSUM = 0x2
XDP_UMEM_UNALIGNED_CHUNK_FLAG = 0x1
XDP_USE_NEED_WAKEUP = 0x8
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
index 684a5168dac4e..8aa6d77c0184e 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
@@ -153,9 +153,14 @@ const (
NFDBITS = 0x20
NLDLY = 0x100
NOFLSH = 0x80
+ NS_GET_MNTNS_ID = 0x8008b705
NS_GET_NSTYPE = 0xb703
NS_GET_OWNER_UID = 0xb704
NS_GET_PARENT = 0xb702
+ NS_GET_PID_FROM_PIDNS = 0x8004b706
+ NS_GET_PID_IN_PIDNS = 0x8004b708
+ NS_GET_TGID_FROM_PIDNS = 0x8004b707
+ NS_GET_TGID_IN_PIDNS = 0x8004b709
NS_GET_USERNS = 0xb701
OLCUC = 0x2
ONLCR = 0x4
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
index 61d74b592d686..da428f4253398 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
@@ -153,9 +153,14 @@ const (
NFDBITS = 0x40
NLDLY = 0x100
NOFLSH = 0x80
+ NS_GET_MNTNS_ID = 0x8008b705
NS_GET_NSTYPE = 0xb703
NS_GET_OWNER_UID = 0xb704
NS_GET_PARENT = 0xb702
+ NS_GET_PID_FROM_PIDNS = 0x8004b706
+ NS_GET_PID_IN_PIDNS = 0x8004b708
+ NS_GET_TGID_FROM_PIDNS = 0x8004b707
+ NS_GET_TGID_IN_PIDNS = 0x8004b709
NS_GET_USERNS = 0xb701
OLCUC = 0x2
ONLCR = 0x4
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
index a28c9e3e893ad..bf45bfec78a53 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
@@ -150,9 +150,14 @@ const (
NFDBITS = 0x20
NLDLY = 0x100
NOFLSH = 0x80
+ NS_GET_MNTNS_ID = 0x8008b705
NS_GET_NSTYPE = 0xb703
NS_GET_OWNER_UID = 0xb704
NS_GET_PARENT = 0xb702
+ NS_GET_PID_FROM_PIDNS = 0x8004b706
+ NS_GET_PID_IN_PIDNS = 0x8004b708
+ NS_GET_TGID_FROM_PIDNS = 0x8004b707
+ NS_GET_TGID_IN_PIDNS = 0x8004b709
NS_GET_USERNS = 0xb701
OLCUC = 0x2
ONLCR = 0x4
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
index ab5d1fe8ead78..71c67162b737e 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
@@ -154,9 +154,14 @@ const (
NFDBITS = 0x40
NLDLY = 0x100
NOFLSH = 0x80
+ NS_GET_MNTNS_ID = 0x8008b705
NS_GET_NSTYPE = 0xb703
NS_GET_OWNER_UID = 0xb704
NS_GET_PARENT = 0xb702
+ NS_GET_PID_FROM_PIDNS = 0x8004b706
+ NS_GET_PID_IN_PIDNS = 0x8004b708
+ NS_GET_TGID_FROM_PIDNS = 0x8004b707
+ NS_GET_TGID_IN_PIDNS = 0x8004b709
NS_GET_USERNS = 0xb701
OLCUC = 0x2
ONLCR = 0x4
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
index c523090e7c17e..9476628fa02b8 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
@@ -154,9 +154,14 @@ const (
NFDBITS = 0x40
NLDLY = 0x100
NOFLSH = 0x80
+ NS_GET_MNTNS_ID = 0x8008b705
NS_GET_NSTYPE = 0xb703
NS_GET_OWNER_UID = 0xb704
NS_GET_PARENT = 0xb702
+ NS_GET_PID_FROM_PIDNS = 0x8004b706
+ NS_GET_PID_IN_PIDNS = 0x8004b708
+ NS_GET_TGID_FROM_PIDNS = 0x8004b707
+ NS_GET_TGID_IN_PIDNS = 0x8004b709
NS_GET_USERNS = 0xb701
OLCUC = 0x2
ONLCR = 0x4
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
index 01e6ea7804b12..b9e85f3cf0c05 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
@@ -150,9 +150,14 @@ const (
NFDBITS = 0x20
NLDLY = 0x100
NOFLSH = 0x80
+ NS_GET_MNTNS_ID = 0x4008b705
NS_GET_NSTYPE = 0x2000b703
NS_GET_OWNER_UID = 0x2000b704
NS_GET_PARENT = 0x2000b702
+ NS_GET_PID_FROM_PIDNS = 0x4004b706
+ NS_GET_PID_IN_PIDNS = 0x4004b708
+ NS_GET_TGID_FROM_PIDNS = 0x4004b707
+ NS_GET_TGID_IN_PIDNS = 0x4004b709
NS_GET_USERNS = 0x2000b701
OLCUC = 0x2
ONLCR = 0x4
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
index 7aa610b1e717b..a48b68a7647ef 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
@@ -150,9 +150,14 @@ const (
NFDBITS = 0x40
NLDLY = 0x100
NOFLSH = 0x80
+ NS_GET_MNTNS_ID = 0x4008b705
NS_GET_NSTYPE = 0x2000b703
NS_GET_OWNER_UID = 0x2000b704
NS_GET_PARENT = 0x2000b702
+ NS_GET_PID_FROM_PIDNS = 0x4004b706
+ NS_GET_PID_IN_PIDNS = 0x4004b708
+ NS_GET_TGID_FROM_PIDNS = 0x4004b707
+ NS_GET_TGID_IN_PIDNS = 0x4004b709
NS_GET_USERNS = 0x2000b701
OLCUC = 0x2
ONLCR = 0x4
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
index 92af771b44a35..ea00e8522a159 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
@@ -150,9 +150,14 @@ const (
NFDBITS = 0x40
NLDLY = 0x100
NOFLSH = 0x80
+ NS_GET_MNTNS_ID = 0x4008b705
NS_GET_NSTYPE = 0x2000b703
NS_GET_OWNER_UID = 0x2000b704
NS_GET_PARENT = 0x2000b702
+ NS_GET_PID_FROM_PIDNS = 0x4004b706
+ NS_GET_PID_IN_PIDNS = 0x4004b708
+ NS_GET_TGID_FROM_PIDNS = 0x4004b707
+ NS_GET_TGID_IN_PIDNS = 0x4004b709
NS_GET_USERNS = 0x2000b701
OLCUC = 0x2
ONLCR = 0x4
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
index b27ef5e6f1195..91c64687176a9 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
@@ -150,9 +150,14 @@ const (
NFDBITS = 0x20
NLDLY = 0x100
NOFLSH = 0x80
+ NS_GET_MNTNS_ID = 0x4008b705
NS_GET_NSTYPE = 0x2000b703
NS_GET_OWNER_UID = 0x2000b704
NS_GET_PARENT = 0x2000b702
+ NS_GET_PID_FROM_PIDNS = 0x4004b706
+ NS_GET_PID_IN_PIDNS = 0x4004b708
+ NS_GET_TGID_FROM_PIDNS = 0x4004b707
+ NS_GET_TGID_IN_PIDNS = 0x4004b709
NS_GET_USERNS = 0x2000b701
OLCUC = 0x2
ONLCR = 0x4
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
index 237a2cefb3e5a..8cbf38d639016 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
@@ -152,9 +152,14 @@ const (
NL3 = 0x300
NLDLY = 0x300
NOFLSH = 0x80000000
+ NS_GET_MNTNS_ID = 0x4008b705
NS_GET_NSTYPE = 0x2000b703
NS_GET_OWNER_UID = 0x2000b704
NS_GET_PARENT = 0x2000b702
+ NS_GET_PID_FROM_PIDNS = 0x4004b706
+ NS_GET_PID_IN_PIDNS = 0x4004b708
+ NS_GET_TGID_FROM_PIDNS = 0x4004b707
+ NS_GET_TGID_IN_PIDNS = 0x4004b709
NS_GET_USERNS = 0x2000b701
OLCUC = 0x4
ONLCR = 0x2
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
index 4a5c555a36e2b..a2df7341917ec 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
@@ -152,9 +152,14 @@ const (
NL3 = 0x300
NLDLY = 0x300
NOFLSH = 0x80000000
+ NS_GET_MNTNS_ID = 0x4008b705
NS_GET_NSTYPE = 0x2000b703
NS_GET_OWNER_UID = 0x2000b704
NS_GET_PARENT = 0x2000b702
+ NS_GET_PID_FROM_PIDNS = 0x4004b706
+ NS_GET_PID_IN_PIDNS = 0x4004b708
+ NS_GET_TGID_FROM_PIDNS = 0x4004b707
+ NS_GET_TGID_IN_PIDNS = 0x4004b709
NS_GET_USERNS = 0x2000b701
OLCUC = 0x4
ONLCR = 0x2
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
index a02fb49a5f8ad..2479137923331 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
@@ -152,9 +152,14 @@ const (
NL3 = 0x300
NLDLY = 0x300
NOFLSH = 0x80000000
+ NS_GET_MNTNS_ID = 0x4008b705
NS_GET_NSTYPE = 0x2000b703
NS_GET_OWNER_UID = 0x2000b704
NS_GET_PARENT = 0x2000b702
+ NS_GET_PID_FROM_PIDNS = 0x4004b706
+ NS_GET_PID_IN_PIDNS = 0x4004b708
+ NS_GET_TGID_FROM_PIDNS = 0x4004b707
+ NS_GET_TGID_IN_PIDNS = 0x4004b709
NS_GET_USERNS = 0x2000b701
OLCUC = 0x4
ONLCR = 0x2
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
index e26a7c61b2b6f..d265f146ee016 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
@@ -150,9 +150,14 @@ const (
NFDBITS = 0x40
NLDLY = 0x100
NOFLSH = 0x80
+ NS_GET_MNTNS_ID = 0x8008b705
NS_GET_NSTYPE = 0xb703
NS_GET_OWNER_UID = 0xb704
NS_GET_PARENT = 0xb702
+ NS_GET_PID_FROM_PIDNS = 0x8004b706
+ NS_GET_PID_IN_PIDNS = 0x8004b708
+ NS_GET_TGID_FROM_PIDNS = 0x8004b707
+ NS_GET_TGID_IN_PIDNS = 0x8004b709
NS_GET_USERNS = 0xb701
OLCUC = 0x2
ONLCR = 0x4
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
index c48f7c2103b81..3f2d6443964ff 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
@@ -150,9 +150,14 @@ const (
NFDBITS = 0x40
NLDLY = 0x100
NOFLSH = 0x80
+ NS_GET_MNTNS_ID = 0x8008b705
NS_GET_NSTYPE = 0xb703
NS_GET_OWNER_UID = 0xb704
NS_GET_PARENT = 0xb702
+ NS_GET_PID_FROM_PIDNS = 0x8004b706
+ NS_GET_PID_IN_PIDNS = 0x8004b708
+ NS_GET_TGID_FROM_PIDNS = 0x8004b707
+ NS_GET_TGID_IN_PIDNS = 0x8004b709
NS_GET_USERNS = 0xb701
OLCUC = 0x2
ONLCR = 0x4
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
index ad4b9aace7bb6..5d8b727a1c837 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
@@ -155,9 +155,14 @@ const (
NFDBITS = 0x40
NLDLY = 0x100
NOFLSH = 0x80
+ NS_GET_MNTNS_ID = 0x4008b705
NS_GET_NSTYPE = 0x2000b703
NS_GET_OWNER_UID = 0x2000b704
NS_GET_PARENT = 0x2000b702
+ NS_GET_PID_FROM_PIDNS = 0x4004b706
+ NS_GET_PID_IN_PIDNS = 0x4004b708
+ NS_GET_TGID_FROM_PIDNS = 0x4004b707
+ NS_GET_TGID_IN_PIDNS = 0x4004b709
NS_GET_USERNS = 0x2000b701
OLCUC = 0x2
ONLCR = 0x4
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go
index 1bc1a5adb25fd..af30da5578031 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go
@@ -971,23 +971,6 @@ func Getpriority(which int, who int) (prio int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Getrandom(buf []byte, flags int) (n int, err error) {
- var _p0 unsafe.Pointer
- if len(buf) > 0 {
- _p0 = unsafe.Pointer(&buf[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags))
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func Getrusage(who int, rusage *Rusage) (err error) {
_, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0)
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
index d3e38f681ab03..f485dbf456567 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
@@ -341,6 +341,7 @@ const (
SYS_STATX = 332
SYS_IO_PGETEVENTS = 333
SYS_RSEQ = 334
+ SYS_URETPROBE = 335
SYS_PIDFD_SEND_SIGNAL = 424
SYS_IO_URING_SETUP = 425
SYS_IO_URING_ENTER = 426
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
index 6c778c23278f9..1893e2fe88404 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
@@ -85,7 +85,7 @@ const (
SYS_SPLICE = 76
SYS_TEE = 77
SYS_READLINKAT = 78
- SYS_FSTATAT = 79
+ SYS_NEWFSTATAT = 79
SYS_FSTAT = 80
SYS_SYNC = 81
SYS_FSYNC = 82
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go
index 37281cf51a80b..16a4017da0ab2 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go
@@ -84,6 +84,8 @@ const (
SYS_SPLICE = 76
SYS_TEE = 77
SYS_READLINKAT = 78
+ SYS_NEWFSTATAT = 79
+ SYS_FSTAT = 80
SYS_SYNC = 81
SYS_FSYNC = 82
SYS_FDATASYNC = 83
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
index 9889f6a5591b6..a5459e766f59d 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
@@ -84,7 +84,7 @@ const (
SYS_SPLICE = 76
SYS_TEE = 77
SYS_READLINKAT = 78
- SYS_FSTATAT = 79
+ SYS_NEWFSTATAT = 79
SYS_FSTAT = 80
SYS_SYNC = 81
SYS_FSYNC = 82
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go
index 9f2550dc3120d..3a69e45496268 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go
@@ -87,31 +87,35 @@ type StatxTimestamp struct {
}
type Statx_t struct {
- Mask uint32
- Blksize uint32
- Attributes uint64
- Nlink uint32
- Uid uint32
- Gid uint32
- Mode uint16
- _ [1]uint16
- Ino uint64
- Size uint64
- Blocks uint64
- Attributes_mask uint64
- Atime StatxTimestamp
- Btime StatxTimestamp
- Ctime StatxTimestamp
- Mtime StatxTimestamp
- Rdev_major uint32
- Rdev_minor uint32
- Dev_major uint32
- Dev_minor uint32
- Mnt_id uint64
- Dio_mem_align uint32
- Dio_offset_align uint32
- Subvol uint64
- _ [11]uint64
+ Mask uint32
+ Blksize uint32
+ Attributes uint64
+ Nlink uint32
+ Uid uint32
+ Gid uint32
+ Mode uint16
+ _ [1]uint16
+ Ino uint64
+ Size uint64
+ Blocks uint64
+ Attributes_mask uint64
+ Atime StatxTimestamp
+ Btime StatxTimestamp
+ Ctime StatxTimestamp
+ Mtime StatxTimestamp
+ Rdev_major uint32
+ Rdev_minor uint32
+ Dev_major uint32
+ Dev_minor uint32
+ Mnt_id uint64
+ Dio_mem_align uint32
+ Dio_offset_align uint32
+ Subvol uint64
+ Atomic_write_unit_min uint32
+ Atomic_write_unit_max uint32
+ Atomic_write_segments_max uint32
+ _ [1]uint32
+ _ [9]uint64
}
type Fsid struct {
@@ -516,6 +520,29 @@ type TCPInfo struct {
Total_rto_time uint32
}
+type TCPVegasInfo struct {
+ Enabled uint32
+ Rttcnt uint32
+ Rtt uint32
+ Minrtt uint32
+}
+
+type TCPDCTCPInfo struct {
+ Enabled uint16
+ Ce_state uint16
+ Alpha uint32
+ Ab_ecn uint32
+ Ab_tot uint32
+}
+
+type TCPBBRInfo struct {
+ Bw_lo uint32
+ Bw_hi uint32
+ Min_rtt uint32
+ Pacing_gain uint32
+ Cwnd_gain uint32
+}
+
type CanFilter struct {
Id uint32
Mask uint32
@@ -557,6 +584,7 @@ const (
SizeofICMPv6Filter = 0x20
SizeofUcred = 0xc
SizeofTCPInfo = 0xf8
+ SizeofTCPCCInfo = 0x14
SizeofCanFilter = 0x8
SizeofTCPRepairOpt = 0x8
)
@@ -3766,7 +3794,7 @@ const (
ETHTOOL_MSG_PSE_GET = 0x24
ETHTOOL_MSG_PSE_SET = 0x25
ETHTOOL_MSG_RSS_GET = 0x26
- ETHTOOL_MSG_USER_MAX = 0x2b
+ ETHTOOL_MSG_USER_MAX = 0x2c
ETHTOOL_MSG_KERNEL_NONE = 0x0
ETHTOOL_MSG_STRSET_GET_REPLY = 0x1
ETHTOOL_MSG_LINKINFO_GET_REPLY = 0x2
@@ -3806,7 +3834,7 @@ const (
ETHTOOL_MSG_MODULE_NTF = 0x24
ETHTOOL_MSG_PSE_GET_REPLY = 0x25
ETHTOOL_MSG_RSS_GET_REPLY = 0x26
- ETHTOOL_MSG_KERNEL_MAX = 0x2b
+ ETHTOOL_MSG_KERNEL_MAX = 0x2c
ETHTOOL_FLAG_COMPACT_BITSETS = 0x1
ETHTOOL_FLAG_OMIT_REPLY = 0x2
ETHTOOL_FLAG_STATS = 0x4
@@ -3951,7 +3979,7 @@ const (
ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL = 0x17
ETHTOOL_A_COALESCE_USE_CQE_MODE_TX = 0x18
ETHTOOL_A_COALESCE_USE_CQE_MODE_RX = 0x19
- ETHTOOL_A_COALESCE_MAX = 0x1c
+ ETHTOOL_A_COALESCE_MAX = 0x1e
ETHTOOL_A_PAUSE_UNSPEC = 0x0
ETHTOOL_A_PAUSE_HEADER = 0x1
ETHTOOL_A_PAUSE_AUTONEG = 0x2
@@ -4609,7 +4637,7 @@ const (
NL80211_ATTR_MAC_HINT = 0xc8
NL80211_ATTR_MAC_MASK = 0xd7
NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca
- NL80211_ATTR_MAX = 0x14a
+ NL80211_ATTR_MAX = 0x14c
NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4
NL80211_ATTR_MAX_CSA_COUNTERS = 0xce
NL80211_ATTR_MAX_MATCH_SETS = 0x85
@@ -5213,7 +5241,7 @@ const (
NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf
NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe
NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf
- NL80211_FREQUENCY_ATTR_MAX = 0x20
+ NL80211_FREQUENCY_ATTR_MAX = 0x21
NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6
NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11
NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc
diff --git a/vendor/golang.org/x/sys/windows/dll_windows.go b/vendor/golang.org/x/sys/windows/dll_windows.go
index 115341fba66da..4e613cf6335ce 100644
--- a/vendor/golang.org/x/sys/windows/dll_windows.go
+++ b/vendor/golang.org/x/sys/windows/dll_windows.go
@@ -65,7 +65,7 @@ func LoadDLL(name string) (dll *DLL, err error) {
return d, nil
}
-// MustLoadDLL is like LoadDLL but panics if load operation failes.
+// MustLoadDLL is like LoadDLL but panics if load operation fails.
func MustLoadDLL(name string) *DLL {
d, e := LoadDLL(name)
if e != nil {
diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go
index 8f6c7f493f8e9..93a798ab63704 100644
--- a/vendor/golang.org/x/time/rate/rate.go
+++ b/vendor/golang.org/x/time/rate/rate.go
@@ -99,8 +99,9 @@ func (lim *Limiter) Tokens() float64 {
// bursts of at most b tokens.
func NewLimiter(r Limit, b int) *Limiter {
return &Limiter{
- limit: r,
- burst: b,
+ limit: r,
+ burst: b,
+ tokens: float64(b),
}
}
@@ -344,18 +345,6 @@ func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration)
tokens: n,
timeToAct: t,
}
- } else if lim.limit == 0 {
- var ok bool
- if lim.burst >= n {
- ok = true
- lim.burst -= n
- }
- return Reservation{
- ok: ok,
- lim: lim,
- tokens: lim.burst,
- timeToAct: t,
- }
}
t, tokens := lim.advance(t)
diff --git a/vendor/google.golang.org/api/compute/v1/compute-api.json b/vendor/google.golang.org/api/compute/v1/compute-api.json
index 7699dd712d635..7ce5ff86ac875 100644
--- a/vendor/google.golang.org/api/compute/v1/compute-api.json
+++ b/vendor/google.golang.org/api/compute/v1/compute-api.json
@@ -1915,7 +1915,7 @@
]
},
"listUsable": {
- "description": "Retrieves an aggregated list of all usable backend services in the specified project.",
+ "description": "Retrieves a list of all usable backend services in the specified project.",
"flatPath": "projects/{project}/global/backendServices/listUsable",
"httpMethod": "GET",
"id": "compute.backendServices.listUsable",
@@ -4814,364 +4814,6 @@
}
}
},
- "futureReservations": {
- "methods": {
- "aggregatedList": {
- "description": "Retrieves an aggregated list of future reservations. To prevent failure, Google recommends that you set the `returnPartialSuccess` parameter to `true`.",
- "flatPath": "projects/{project}/aggregated/futureReservations",
- "httpMethod": "GET",
- "id": "compute.futureReservations.aggregatedList",
- "parameterOrder": [
- "project"
- ],
- "parameters": {
- "filter": {
- "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.",
- "location": "query",
- "type": "string"
- },
- "includeAllScopes": {
- "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.",
- "location": "query",
- "type": "boolean"
- },
- "maxResults": {
- "default": "500",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)",
- "format": "uint32",
- "location": "query",
- "minimum": "0",
- "type": "integer"
- },
- "orderBy": {
- "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.",
- "location": "query",
- "type": "string"
- },
- "pageToken": {
- "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.",
- "location": "query",
- "type": "string"
- },
- "project": {
- "description": "Project ID for this request.",
- "location": "path",
- "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))",
- "required": true,
- "type": "string"
- },
- "returnPartialSuccess": {
- "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false. For example, when partial success behavior is enabled, aggregatedList for a single zone scope either returns all resources in the zone or no resources, with an error code.",
- "location": "query",
- "type": "boolean"
- },
- "serviceProjectNumber": {
- "description": "The Shared VPC service project id or service project number for which aggregated list request is invoked for subnetworks list-usable api.",
- "format": "int64",
- "location": "query",
- "type": "string"
- }
- },
- "path": "projects/{project}/aggregated/futureReservations",
- "response": {
- "$ref": "FutureReservationsAggregatedListResponse"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/compute",
- "https://www.googleapis.com/auth/compute.readonly"
- ]
- },
- "cancel": {
- "description": "Cancel the specified future reservation.",
- "flatPath": "projects/{project}/zones/{zone}/futureReservations/{futureReservation}/cancel",
- "httpMethod": "POST",
- "id": "compute.futureReservations.cancel",
- "parameterOrder": [
- "project",
- "zone",
- "futureReservation"
- ],
- "parameters": {
- "futureReservation": {
- "description": "Name of the future reservation to retrieve. Name should conform to RFC1035.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "project": {
- "description": "Project ID for this request.",
- "location": "path",
- "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))",
- "required": true,
- "type": "string"
- },
- "requestId": {
- "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).",
- "location": "query",
- "type": "string"
- },
- "zone": {
- "description": "Name of the zone for this request. Name should conform to RFC1035.",
- "location": "path",
- "required": true,
- "type": "string"
- }
- },
- "path": "projects/{project}/zones/{zone}/futureReservations/{futureReservation}/cancel",
- "response": {
- "$ref": "Operation"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/compute"
- ]
- },
- "delete": {
- "description": "Deletes the specified future reservation.",
- "flatPath": "projects/{project}/zones/{zone}/futureReservations/{futureReservation}",
- "httpMethod": "DELETE",
- "id": "compute.futureReservations.delete",
- "parameterOrder": [
- "project",
- "zone",
- "futureReservation"
- ],
- "parameters": {
- "futureReservation": {
- "description": "Name of the future reservation to retrieve. Name should conform to RFC1035.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "project": {
- "description": "Project ID for this request.",
- "location": "path",
- "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))",
- "required": true,
- "type": "string"
- },
- "requestId": {
- "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).",
- "location": "query",
- "type": "string"
- },
- "zone": {
- "description": "Name of the zone for this request. Name should conform to RFC1035.",
- "location": "path",
- "required": true,
- "type": "string"
- }
- },
- "path": "projects/{project}/zones/{zone}/futureReservations/{futureReservation}",
- "response": {
- "$ref": "Operation"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/compute"
- ]
- },
- "get": {
- "description": "Retrieves information about the specified future reservation.",
- "flatPath": "projects/{project}/zones/{zone}/futureReservations/{futureReservation}",
- "httpMethod": "GET",
- "id": "compute.futureReservations.get",
- "parameterOrder": [
- "project",
- "zone",
- "futureReservation"
- ],
- "parameters": {
- "futureReservation": {
- "description": "Name of the future reservation to retrieve. Name should conform to RFC1035.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "project": {
- "description": "Project ID for this request.",
- "location": "path",
- "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))",
- "required": true,
- "type": "string"
- },
- "zone": {
- "description": "Name of the zone for this request. Name should conform to RFC1035.",
- "location": "path",
- "required": true,
- "type": "string"
- }
- },
- "path": "projects/{project}/zones/{zone}/futureReservations/{futureReservation}",
- "response": {
- "$ref": "FutureReservation"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/compute",
- "https://www.googleapis.com/auth/compute.readonly"
- ]
- },
- "insert": {
- "description": "Creates a new Future Reservation.",
- "flatPath": "projects/{project}/zones/{zone}/futureReservations",
- "httpMethod": "POST",
- "id": "compute.futureReservations.insert",
- "parameterOrder": [
- "project",
- "zone"
- ],
- "parameters": {
- "project": {
- "description": "Project ID for this request.",
- "location": "path",
- "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))",
- "required": true,
- "type": "string"
- },
- "requestId": {
- "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).",
- "location": "query",
- "type": "string"
- },
- "zone": {
- "description": "Name of the zone for this request. Name should conform to RFC1035.",
- "location": "path",
- "required": true,
- "type": "string"
- }
- },
- "path": "projects/{project}/zones/{zone}/futureReservations",
- "request": {
- "$ref": "FutureReservation"
- },
- "response": {
- "$ref": "Operation"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/compute"
- ]
- },
- "list": {
- "description": "A list of all the future reservations that have been configured for the specified project in specified zone.",
- "flatPath": "projects/{project}/zones/{zone}/futureReservations",
- "httpMethod": "GET",
- "id": "compute.futureReservations.list",
- "parameterOrder": [
- "project",
- "zone"
- ],
- "parameters": {
- "filter": {
- "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.",
- "location": "query",
- "type": "string"
- },
- "maxResults": {
- "default": "500",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)",
- "format": "uint32",
- "location": "query",
- "minimum": "0",
- "type": "integer"
- },
- "orderBy": {
- "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.",
- "location": "query",
- "type": "string"
- },
- "pageToken": {
- "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.",
- "location": "query",
- "type": "string"
- },
- "project": {
- "description": "Project ID for this request.",
- "location": "path",
- "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))",
- "required": true,
- "type": "string"
- },
- "returnPartialSuccess": {
- "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false. For example, when partial success behavior is enabled, aggregatedList for a single zone scope either returns all resources in the zone or no resources, with an error code.",
- "location": "query",
- "type": "boolean"
- },
- "zone": {
- "description": "Name of the zone for this request. Name should conform to RFC1035.",
- "location": "path",
- "required": true,
- "type": "string"
- }
- },
- "path": "projects/{project}/zones/{zone}/futureReservations",
- "response": {
- "$ref": "FutureReservationsListResponse"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/compute",
- "https://www.googleapis.com/auth/compute.readonly"
- ]
- },
- "update": {
- "description": "Updates the specified future reservation.",
- "flatPath": "projects/{project}/zones/{zone}/futureReservations/{futureReservation}",
- "httpMethod": "PATCH",
- "id": "compute.futureReservations.update",
- "parameterOrder": [
- "project",
- "zone",
- "futureReservation"
- ],
- "parameters": {
- "futureReservation": {
- "description": "Name of the reservation to update. Name should conform to RFC1035.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "project": {
- "description": "Project ID for this request.",
- "location": "path",
- "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))",
- "required": true,
- "type": "string"
- },
- "requestId": {
- "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).",
- "location": "query",
- "type": "string"
- },
- "updateMask": {
- "description": "update_mask indicates fields to be updated as part of this request.",
- "format": "google-fieldmask",
- "location": "query",
- "type": "string"
- },
- "zone": {
- "description": "Name of the zone for this request. Name should conform to RFC1035.",
- "location": "path",
- "required": true,
- "type": "string"
- }
- },
- "path": "projects/{project}/zones/{zone}/futureReservations/{futureReservation}",
- "request": {
- "$ref": "FutureReservation"
- },
- "response": {
- "$ref": "Operation"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/compute"
- ]
- }
- }
- },
"globalAddresses": {
"methods": {
"delete": {
@@ -20638,7 +20280,7 @@
]
},
"listUsable": {
- "description": "Retrieves an aggregated list of all usable backend services in the specified project in the given region.",
+ "description": "Retrieves a list of all usable backend services in the specified project in the given region.",
"flatPath": "projects/{project}/regions/{region}/backendServices/listUsable",
"httpMethod": "GET",
"id": "compute.regionBackendServices.listUsable",
@@ -37779,7 +37421,7 @@
}
}
},
- "revision": "20240813",
+ "revision": "20241001",
"rootUrl": "https://compute.googleapis.com/",
"schemas": {
"AWSV4Signature": {
@@ -39033,6 +38675,10 @@
"format": "int32",
"type": "integer"
},
+ "turboMode": {
+ "description": "Turbo frequency mode to use for the instance. Supported modes include: * ALL_CORE_MAX Using empty string or not setting this field will use the platform-specific default turbo mode.",
+ "type": "string"
+ },
"visibleCoreCount": {
"description": "The number of physical cores to expose to an instance. Multiply by the number of threads per core to compute the total number of virtual CPUs to expose to the instance. If unset, the number of cores is inferred from the instance's nominal CPU count and the underlying platform's SMT width.",
"format": "int32",
@@ -39504,13 +39150,6 @@
},
"type": "array"
},
- "exemptedMembers": {
- "description": "This is deprecated and has no effect. Do not use.",
- "items": {
- "type": "string"
- },
- "type": "array"
- },
"service": {
"description": "Specifies a service that will be enabled for audit logging. For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. `allServices` is a special value that covers all services.",
"type": "string"
@@ -39529,10 +39168,6 @@
},
"type": "array"
},
- "ignoreChildExemptions": {
- "description": "This is deprecated and has no effect. Do not use.",
- "type": "boolean"
- },
"logType": {
"description": "The log type that this config enables.",
"enum": [
@@ -40484,6 +40119,13 @@
"selfLink": {
"description": "[Output Only] Server-defined URL for the resource.",
"type": "string"
+ },
+ "usedBy": {
+ "description": "[Output Only] List of resources referencing that backend bucket.",
+ "items": {
+ "$ref": "BackendBucketUsedBy"
+ },
+ "type": "array"
}
},
"type": "object"
@@ -40770,6 +40412,16 @@
},
"type": "object"
},
+ "BackendBucketUsedBy": {
+ "id": "BackendBucketUsedBy",
+ "properties": {
+ "reference": {
+ "description": "[Output Only] Server-defined URL for UrlMaps referencing that BackendBucket.",
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
"BackendService": {
"description": "Represents a Backend Service resource. A backend service defines how Google Cloud load balancers distribute traffic. The backend service configuration contains a set of values, such as the protocol used to connect to backends, various distribution and session settings, health checks, and timeouts. These settings provide fine-grained control over how your load balancer behaves. Most of the settings have default values that allow for easy configuration if you need to get started quickly. Backend services in Google Compute Engine can be either regionally or globally scoped. * [Global](https://cloud.google.com/compute/docs/reference/rest/v1/backendServices) * [Regional](https://cloud.google.com/compute/docs/reference/rest/v1/regionBackendServices) For more information, see Backend Services.",
"id": "BackendService",
@@ -40871,6 +40523,22 @@
"format": "uint64",
"type": "string"
},
+ "ipAddressSelectionPolicy": {
+ "description": "Specifies a preference for traffic sent from the proxy to the backend (or from the client to the backend for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv4 health checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoint's IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv6 health checks are used to check the health of the backends. This field is applicable to either: - Advanced global external Application Load Balancer (load balancing scheme EXTERNAL_MANAGED), - Regional external Application Load Balancer, - Internal proxy Network Load Balancer (load balancing scheme INTERNAL_MANAGED), - Regional internal Application Load Balancer (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). ",
+ "enum": [
+ "IPV4_ONLY",
+ "IPV6_ONLY",
+ "IP_ADDRESS_SELECTION_POLICY_UNSPECIFIED",
+ "PREFER_IPV6"
+ ],
+ "enumDescriptions": [
+ "Only send IPv4 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv4 health-checks are used to check the health of the backends. This is the default setting.",
+ "Only send IPv6 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv6 health-checks are used to check the health of the backends.",
+ "Unspecified IP address selection policy.",
+ "Prioritize the connection to the endpoints IPv6 address over its IPv4 address (provided there is a healthy IPv6 address)."
+ ],
+ "type": "string"
+ },
"kind": {
"default": "compute#backendService",
"description": "[Output Only] Type of resource. Always compute#backendService for backend services.",
@@ -42287,7 +41955,7 @@
},
"locationPolicy": {
"$ref": "LocationPolicy",
- "description": "Policy for chosing target zone. For more information, see Create VMs in bulk ."
+ "description": "Policy for choosing target zone. For more information, see Create VMs in bulk."
},
"minCount": {
"description": "The minimum number of instances to create. If no min_count is specified then count is used as the default value. If min_count instances cannot be created, then no instances will be created and instances already created will be deleted.",
@@ -42487,6 +42155,10 @@
"description": "[Output Only] Creation timestamp in RFC3339 text format.",
"type": "string"
},
+ "customEndTimestamp": {
+ "description": "[Input Only] Optional, specifies the CUD end time requested by the customer in RFC3339 text format. Needed when the customer wants CUD's end date is later than the start date + term duration.",
+ "type": "string"
+ },
"description": {
"description": "An optional description of this resource. Provide this property when you create the resource.",
"type": "string"
@@ -42553,6 +42225,10 @@
},
"type": "array"
},
+ "resourceStatus": {
+ "$ref": "CommitmentResourceStatus",
+ "description": "[Output Only] Status information for Commitment resource."
+ },
"resources": {
"description": "A list of commitment amounts for particular resources. Note that VCPU and MEMORY resource commitments must occur together.",
"items": {
@@ -42956,6 +42632,17 @@
},
"type": "object"
},
+ "CommitmentResourceStatus": {
+ "description": "[Output Only] Contains output only fields.",
+ "id": "CommitmentResourceStatus",
+ "properties": {
+ "customTermEligibilityEndTimestamp": {
+ "description": "[Output Only] Indicates the end time of customer's eligibility to send custom term requests in RFC3339 text format. Term extension requests that (not the end time in the request) after this time will be rejected.",
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
"CommitmentsScopedList": {
"id": "CommitmentsScopedList",
"properties": {
@@ -43179,12 +42866,14 @@
"enum": [
"CONFIDENTIAL_INSTANCE_TYPE_UNSPECIFIED",
"SEV",
- "SEV_SNP"
+ "SEV_SNP",
+ "TDX"
],
"enumDescriptions": [
"No type specified. Do not use this value.",
"AMD Secure Encrypted Virtualization.",
- "AMD Secure Encrypted Virtualization - Secure Nested Paging."
+ "AMD Secure Encrypted Virtualization - Secure Nested Paging.",
+ "Intel Trust Domain eXtension."
],
"type": "string"
},
@@ -46975,781 +46664,6 @@
},
"type": "object"
},
- "FutureReservation": {
- "id": "FutureReservation",
- "properties": {
- "autoCreatedReservationsDeleteTime": {
- "description": "Future timestamp when the FR auto-created reservations will be deleted by Compute Engine. Format of this field must be a valid href=\"https://www.ietf.org/rfc/rfc3339.txt\"\u003eRFC3339 value.",
- "type": "string"
- },
- "autoCreatedReservationsDuration": {
- "$ref": "Duration",
- "description": "Specifies the duration of auto-created reservations. It represents relative time to future reservation start_time when auto-created reservations will be automatically deleted by Compute Engine. Duration time unit is represented as a count of seconds and fractions of seconds at nanosecond resolution."
- },
- "autoDeleteAutoCreatedReservations": {
- "description": "Setting for enabling or disabling automatic deletion for auto-created reservation. If set to true, auto-created reservations will be deleted at Future Reservation's end time (default) or at user's defined timestamp if any of the [auto_created_reservations_delete_time, auto_created_reservations_duration] values is specified. For keeping auto-created reservation indefinitely, this value should be set to false.",
- "type": "boolean"
- },
- "creationTimestamp": {
- "description": "[Output Only] The creation timestamp for this future reservation in RFC3339 text format.",
- "type": "string"
- },
- "description": {
- "description": "An optional description of this resource. Provide this property when you create the future reservation.",
- "type": "string"
- },
- "id": {
- "description": "[Output Only] A unique identifier for this future reservation. The server defines this identifier.",
- "format": "uint64",
- "type": "string"
- },
- "kind": {
- "default": "compute#futureReservation",
- "description": "[Output Only] Type of the resource. Always compute#futureReservation for future reservations.",
- "type": "string"
- },
- "name": {
- "annotations": {
- "required": [
- "compute.instances.insert"
- ]
- },
- "description": "The name of the resource, provided by the client when initially creating the resource. The resource name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.",
- "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
- "type": "string"
- },
- "namePrefix": {
- "description": "Name prefix for the reservations to be created at the time of delivery. The name prefix must comply with RFC1035. Maximum allowed length for name prefix is 20. Automatically created reservations name format will be -date-####.",
- "type": "string"
- },
- "planningStatus": {
- "description": "Planning state before being submitted for evaluation",
- "enum": [
- "DRAFT",
- "PLANNING_STATUS_UNSPECIFIED",
- "SUBMITTED"
- ],
- "enumDescriptions": [
- "Future Reservation is being drafted.",
- "",
- "Future Reservation has been submitted for evaluation by GCP."
- ],
- "type": "string"
- },
- "selfLink": {
- "description": "[Output Only] Server-defined fully-qualified URL for this resource.",
- "type": "string"
- },
- "selfLinkWithId": {
- "description": "[Output Only] Server-defined URL for this resource with the resource id.",
- "type": "string"
- },
- "shareSettings": {
- "$ref": "ShareSettings",
- "description": "List of Projects/Folders to share with."
- },
- "specificSkuProperties": {
- "$ref": "FutureReservationSpecificSKUProperties",
- "description": "Future Reservation configuration to indicate instance properties and total count."
- },
- "status": {
- "$ref": "FutureReservationStatus",
- "description": "[Output only] Status of the Future Reservation"
- },
- "timeWindow": {
- "$ref": "FutureReservationTimeWindow",
- "description": "Time window for this Future Reservation."
- },
- "zone": {
- "description": "[Output Only] URL of the Zone where this future reservation resides.",
- "type": "string"
- }
- },
- "type": "object"
- },
- "FutureReservationSpecificSKUProperties": {
- "id": "FutureReservationSpecificSKUProperties",
- "properties": {
- "instanceProperties": {
- "$ref": "AllocationSpecificSKUAllocationReservedInstanceProperties",
- "description": "Properties of the SKU instances being reserved."
- },
- "sourceInstanceTemplate": {
- "description": "The instance template that will be used to populate the ReservedInstanceProperties of the future reservation",
- "type": "string"
- },
- "totalCount": {
- "description": "Total number of instances for which capacity assurance is requested at a future time period.",
- "format": "int64",
- "type": "string"
- }
- },
- "type": "object"
- },
- "FutureReservationStatus": {
- "description": "[Output only] Represents status related to the future reservation.",
- "id": "FutureReservationStatus",
- "properties": {
- "amendmentStatus": {
- "description": "[Output Only] The current status of the requested amendment.",
- "enum": [
- "AMENDMENT_APPROVED",
- "AMENDMENT_DECLINED",
- "AMENDMENT_IN_REVIEW",
- "AMENDMENT_STATUS_UNSPECIFIED"
- ],
- "enumDescriptions": [
- "The requested amendment to the Future Resevation has been approved and applied by GCP.",
- "The requested amendment to the Future Reservation has been declined by GCP and the original state was restored.",
- "The requested amendment to the Future Reservation is currently being reviewd by GCP.",
- ""
- ],
- "type": "string"
- },
- "autoCreatedReservations": {
- "description": "Fully qualified urls of the automatically created reservations at start_time.",
- "items": {
- "type": "string"
- },
- "type": "array"
- },
- "existingMatchingUsageInfo": {
- "$ref": "FutureReservationStatusExistingMatchingUsageInfo",
- "description": "[Output Only] Represents the existing matching usage for the future reservation."
- },
- "fulfilledCount": {
- "description": "This count indicates the fulfilled capacity so far. This is set during \"PROVISIONING\" state. This count also includes capacity delivered as part of existing matching reservations.",
- "format": "int64",
- "type": "string"
- },
- "lastKnownGoodState": {
- "$ref": "FutureReservationStatusLastKnownGoodState",
- "description": "[Output Only] This field represents the future reservation before an amendment was requested. If the amendment is declined, the Future Reservation will be reverted to the last known good state. The last known good state is not set when updating a future reservation whose Procurement Status is DRAFTING."
- },
- "lockTime": {
- "description": "Time when Future Reservation would become LOCKED, after which no modifications to Future Reservation will be allowed. Applicable only after the Future Reservation is in the APPROVED state. The lock_time is an RFC3339 string. The procurement_status will transition to PROCURING state at this time.",
- "type": "string"
- },
- "procurementStatus": {
- "description": "Current state of this Future Reservation",
- "enum": [
- "APPROVED",
- "CANCELLED",
- "COMMITTED",
- "DECLINED",
- "DRAFTING",
- "FAILED",
- "FAILED_PARTIALLY_FULFILLED",
- "FULFILLED",
- "PENDING_AMENDMENT_APPROVAL",
- "PENDING_APPROVAL",
- "PROCUREMENT_STATUS_UNSPECIFIED",
- "PROCURING",
- "PROVISIONING"
- ],
- "enumDescriptions": [
- "Future reservation is approved by GCP.",
- "Future reservation is cancelled by the customer.",
- "Future reservation is committed by the customer.",
- "Future reservation is rejected by GCP.",
- "Related status for PlanningStatus.Draft. Transitions to PENDING_APPROVAL upon user submitting FR.",
- "Future reservation failed. No additional reservations were provided.",
- "Future reservation is partially fulfilled. Additional reservations were provided but did not reach total_count reserved instance slots.",
- "Future reservation is fulfilled completely.",
- "An Amendment to the Future Reservation has been requested. If the Amendment is declined, the Future Reservation will be restored to the last known good state.",
- "Future reservation is pending approval by GCP.",
- "",
- "Future reservation is being procured by GCP. Beyond this point, Future reservation is locked and no further modifications are allowed.",
- "Future reservation capacity is being provisioned. This state will be entered after start_time, while reservations are being created to provide total_count reserved instance slots. This state will not persist past start_time + 24h."
- ],
- "type": "string"
- },
- "specificSkuProperties": {
- "$ref": "FutureReservationStatusSpecificSKUProperties"
- }
- },
- "type": "object"
- },
- "FutureReservationStatusExistingMatchingUsageInfo": {
- "description": "[Output Only] Represents the existing matching usage for the future reservation.",
- "id": "FutureReservationStatusExistingMatchingUsageInfo",
- "properties": {
- "count": {
- "description": "Count to represent min(FR total_count, matching_reserved_capacity+matching_unreserved_instances)",
- "format": "int64",
- "type": "string"
- },
- "timestamp": {
- "description": "Timestamp when the matching usage was calculated",
- "type": "string"
- }
- },
- "type": "object"
- },
- "FutureReservationStatusLastKnownGoodState": {
- "description": "The state that the future reservation will be reverted to should the amendment be declined.",
- "id": "FutureReservationStatusLastKnownGoodState",
- "properties": {
- "description": {
- "description": "[Output Only] The description of the FutureReservation before an amendment was requested.",
- "type": "string"
- },
- "existingMatchingUsageInfo": {
- "$ref": "FutureReservationStatusExistingMatchingUsageInfo",
- "description": "[Output Only] Represents the matching usage for the future reservation before an amendment was requested."
- },
- "futureReservationSpecs": {
- "$ref": "FutureReservationStatusLastKnownGoodStateFutureReservationSpecs"
- },
- "lockTime": {
- "description": "[Output Only] The lock time of the FutureReservation before an amendment was requested.",
- "type": "string"
- },
- "namePrefix": {
- "description": "[Output Only] The name prefix of the Future Reservation before an amendment was requested.",
- "type": "string"
- },
- "procurementStatus": {
- "description": "[Output Only] The status of the last known good state for the Future Reservation.",
- "enum": [
- "APPROVED",
- "CANCELLED",
- "COMMITTED",
- "DECLINED",
- "DRAFTING",
- "FAILED",
- "FAILED_PARTIALLY_FULFILLED",
- "FULFILLED",
- "PENDING_AMENDMENT_APPROVAL",
- "PENDING_APPROVAL",
- "PROCUREMENT_STATUS_UNSPECIFIED",
- "PROCURING",
- "PROVISIONING"
- ],
- "enumDescriptions": [
- "Future reservation is approved by GCP.",
- "Future reservation is cancelled by the customer.",
- "Future reservation is committed by the customer.",
- "Future reservation is rejected by GCP.",
- "Related status for PlanningStatus.Draft. Transitions to PENDING_APPROVAL upon user submitting FR.",
- "Future reservation failed. No additional reservations were provided.",
- "Future reservation is partially fulfilled. Additional reservations were provided but did not reach total_count reserved instance slots.",
- "Future reservation is fulfilled completely.",
- "An Amendment to the Future Reservation has been requested. If the Amendment is declined, the Future Reservation will be restored to the last known good state.",
- "Future reservation is pending approval by GCP.",
- "",
- "Future reservation is being procured by GCP. Beyond this point, Future reservation is locked and no further modifications are allowed.",
- "Future reservation capacity is being provisioned. This state will be entered after start_time, while reservations are being created to provide total_count reserved instance slots. This state will not persist past start_time + 24h."
- ],
- "type": "string"
- }
- },
- "type": "object"
- },
- "FutureReservationStatusLastKnownGoodStateFutureReservationSpecs": {
- "description": "The properties of the last known good state for the Future Reservation.",
- "id": "FutureReservationStatusLastKnownGoodStateFutureReservationSpecs",
- "properties": {
- "shareSettings": {
- "$ref": "ShareSettings",
- "description": "[Output Only] The previous share settings of the Future Reservation."
- },
- "specificSkuProperties": {
- "$ref": "FutureReservationSpecificSKUProperties",
- "description": "[Output Only] The previous instance related properties of the Future Reservation."
- },
- "timeWindow": {
- "$ref": "FutureReservationTimeWindow",
- "description": "[Output Only] The previous time window of the Future Reservation."
- }
- },
- "type": "object"
- },
- "FutureReservationStatusSpecificSKUProperties": {
- "description": "Properties to be set for the Future Reservation.",
- "id": "FutureReservationStatusSpecificSKUProperties",
- "properties": {
- "sourceInstanceTemplateId": {
- "description": "ID of the instance template used to populate the Future Reservation properties.",
- "type": "string"
- }
- },
- "type": "object"
- },
- "FutureReservationTimeWindow": {
- "id": "FutureReservationTimeWindow",
- "properties": {
- "duration": {
- "$ref": "Duration"
- },
- "endTime": {
- "type": "string"
- },
- "startTime": {
- "description": "Start time of the Future Reservation. The start_time is an RFC3339 string.",
- "type": "string"
- }
- },
- "type": "object"
- },
- "FutureReservationsAggregatedListResponse": {
- "description": "Contains a list of future reservations.",
- "id": "FutureReservationsAggregatedListResponse",
- "properties": {
- "etag": {
- "type": "string"
- },
- "id": {
- "description": "[Output Only] Unique identifier for the resource; defined by the server.",
- "type": "string"
- },
- "items": {
- "additionalProperties": {
- "$ref": "FutureReservationsScopedList",
- "description": "Name of the scope containing this set of future reservations."
- },
- "description": "A list of Future reservation resources.",
- "type": "object"
- },
- "kind": {
- "default": "compute#futureReservationsAggregatedListResponse",
- "description": "[Output Only] Type of resource. Always compute#futureReservationsAggregatedListResponse for future resevation aggregated list response.",
- "type": "string"
- },
- "nextPageToken": {
- "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.",
- "type": "string"
- },
- "selfLink": {
- "description": "[Output Only] Server-defined URL for this resource.",
- "type": "string"
- },
- "unreachables": {
- "description": "[Output Only] Unreachable resources.",
- "items": {
- "type": "string"
- },
- "type": "array"
- },
- "warning": {
- "description": "[Output Only] Informational warning message.",
- "properties": {
- "code": {
- "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.",
- "enum": [
- "CLEANUP_FAILED",
- "DEPRECATED_RESOURCE_USED",
- "DEPRECATED_TYPE_USED",
- "DISK_SIZE_LARGER_THAN_IMAGE_SIZE",
- "EXPERIMENTAL_TYPE_USED",
- "EXTERNAL_API_WARNING",
- "FIELD_VALUE_OVERRIDEN",
- "INJECTED_KERNELS_DEPRECATED",
- "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB",
- "LARGE_DEPLOYMENT_WARNING",
- "LIST_OVERHEAD_QUOTA_EXCEED",
- "MISSING_TYPE_DEPENDENCY",
- "NEXT_HOP_ADDRESS_NOT_ASSIGNED",
- "NEXT_HOP_CANNOT_IP_FORWARD",
- "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE",
- "NEXT_HOP_INSTANCE_NOT_FOUND",
- "NEXT_HOP_INSTANCE_NOT_ON_NETWORK",
- "NEXT_HOP_NOT_RUNNING",
- "NOT_CRITICAL_ERROR",
- "NO_RESULTS_ON_PAGE",
- "PARTIAL_SUCCESS",
- "REQUIRED_TOS_AGREEMENT",
- "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING",
- "RESOURCE_NOT_DELETED",
- "SCHEMA_VALIDATION_IGNORED",
- "SINGLE_INSTANCE_PROPERTY_TEMPLATE",
- "UNDECLARED_PROPERTIES",
- "UNREACHABLE"
- ],
- "enumDeprecated": [
- false,
- false,
- false,
- false,
- false,
- false,
- true,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false
- ],
- "enumDescriptions": [
- "Warning about failed cleanup of transient changes made by a failed operation.",
- "A link to a deprecated resource was created.",
- "When deploying and at least one of the resources has a type marked as deprecated",
- "The user created a boot disk that is larger than image size.",
- "When deploying and at least one of the resources has a type marked as experimental",
- "Warning that is present in an external api call",
- "Warning that value of a field has been overridden. Deprecated unused field.",
- "The operation involved use of an injected kernel, which is deprecated.",
- "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.",
- "When deploying a deployment with a exceedingly large number of resources",
- "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.",
- "A resource depends on a missing type",
- "The route's nextHopIp address is not assigned to an instance on the network.",
- "The route's next hop instance cannot ip forward.",
- "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.",
- "The route's nextHopInstance URL refers to an instance that does not exist.",
- "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.",
- "The route's next hop instance does not have a status of RUNNING.",
- "Error which is not critical. We decided to continue the process despite the mentioned error.",
- "No results are present on a particular list page.",
- "Success is reported, but some results may be missing due to errors",
- "The user attempted to use a resource that requires a TOS they have not accepted.",
- "Warning that a resource is in use.",
- "One or more of the resources set to auto-delete could not be deleted because they were in use.",
- "When a resource schema validation is ignored.",
- "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.",
- "When undeclared properties in the schema are present",
- "A given scope cannot be reached."
- ],
- "type": "string"
- },
- "data": {
- "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ",
- "items": {
- "properties": {
- "key": {
- "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).",
- "type": "string"
- },
- "value": {
- "description": "[Output Only] A warning data value corresponding to the key.",
- "type": "string"
- }
- },
- "type": "object"
- },
- "type": "array"
- },
- "message": {
- "description": "[Output Only] A human-readable description of the warning code.",
- "type": "string"
- }
- },
- "type": "object"
- }
- },
- "type": "object"
- },
- "FutureReservationsListResponse": {
- "id": "FutureReservationsListResponse",
- "properties": {
- "etag": {
- "type": "string"
- },
- "id": {
- "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.",
- "type": "string"
- },
- "items": {
- "description": "[Output Only] A list of future reservation resources.",
- "items": {
- "$ref": "FutureReservation"
- },
- "type": "array"
- },
- "kind": {
- "default": "compute#futureReservationsListResponse",
- "description": "[Output Only] Type of resource.Always compute#FutureReservationsListResponse for lists of reservations",
- "type": "string"
- },
- "nextPageToken": {
- "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.",
- "type": "string"
- },
- "selfLink": {
- "description": "[Output Only] Server-defined URL for this resource.",
- "type": "string"
- },
- "unreachables": {
- "description": "[Output Only] Unreachable resources.",
- "items": {
- "type": "string"
- },
- "type": "array"
- },
- "warning": {
- "description": "[Output Only] Informational warning message.",
- "properties": {
- "code": {
- "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.",
- "enum": [
- "CLEANUP_FAILED",
- "DEPRECATED_RESOURCE_USED",
- "DEPRECATED_TYPE_USED",
- "DISK_SIZE_LARGER_THAN_IMAGE_SIZE",
- "EXPERIMENTAL_TYPE_USED",
- "EXTERNAL_API_WARNING",
- "FIELD_VALUE_OVERRIDEN",
- "INJECTED_KERNELS_DEPRECATED",
- "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB",
- "LARGE_DEPLOYMENT_WARNING",
- "LIST_OVERHEAD_QUOTA_EXCEED",
- "MISSING_TYPE_DEPENDENCY",
- "NEXT_HOP_ADDRESS_NOT_ASSIGNED",
- "NEXT_HOP_CANNOT_IP_FORWARD",
- "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE",
- "NEXT_HOP_INSTANCE_NOT_FOUND",
- "NEXT_HOP_INSTANCE_NOT_ON_NETWORK",
- "NEXT_HOP_NOT_RUNNING",
- "NOT_CRITICAL_ERROR",
- "NO_RESULTS_ON_PAGE",
- "PARTIAL_SUCCESS",
- "REQUIRED_TOS_AGREEMENT",
- "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING",
- "RESOURCE_NOT_DELETED",
- "SCHEMA_VALIDATION_IGNORED",
- "SINGLE_INSTANCE_PROPERTY_TEMPLATE",
- "UNDECLARED_PROPERTIES",
- "UNREACHABLE"
- ],
- "enumDeprecated": [
- false,
- false,
- false,
- false,
- false,
- false,
- true,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false
- ],
- "enumDescriptions": [
- "Warning about failed cleanup of transient changes made by a failed operation.",
- "A link to a deprecated resource was created.",
- "When deploying and at least one of the resources has a type marked as deprecated",
- "The user created a boot disk that is larger than image size.",
- "When deploying and at least one of the resources has a type marked as experimental",
- "Warning that is present in an external api call",
- "Warning that value of a field has been overridden. Deprecated unused field.",
- "The operation involved use of an injected kernel, which is deprecated.",
- "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.",
- "When deploying a deployment with a exceedingly large number of resources",
- "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.",
- "A resource depends on a missing type",
- "The route's nextHopIp address is not assigned to an instance on the network.",
- "The route's next hop instance cannot ip forward.",
- "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.",
- "The route's nextHopInstance URL refers to an instance that does not exist.",
- "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.",
- "The route's next hop instance does not have a status of RUNNING.",
- "Error which is not critical. We decided to continue the process despite the mentioned error.",
- "No results are present on a particular list page.",
- "Success is reported, but some results may be missing due to errors",
- "The user attempted to use a resource that requires a TOS they have not accepted.",
- "Warning that a resource is in use.",
- "One or more of the resources set to auto-delete could not be deleted because they were in use.",
- "When a resource schema validation is ignored.",
- "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.",
- "When undeclared properties in the schema are present",
- "A given scope cannot be reached."
- ],
- "type": "string"
- },
- "data": {
- "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ",
- "items": {
- "properties": {
- "key": {
- "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).",
- "type": "string"
- },
- "value": {
- "description": "[Output Only] A warning data value corresponding to the key.",
- "type": "string"
- }
- },
- "type": "object"
- },
- "type": "array"
- },
- "message": {
- "description": "[Output Only] A human-readable description of the warning code.",
- "type": "string"
- }
- },
- "type": "object"
- }
- },
- "type": "object"
- },
- "FutureReservationsScopedList": {
- "id": "FutureReservationsScopedList",
- "properties": {
- "futureReservations": {
- "description": "A list of future reservations contained in this scope.",
- "items": {
- "$ref": "FutureReservation"
- },
- "type": "array"
- },
- "warning": {
- "description": "Informational warning which replaces the list of future reservations when the list is empty.",
- "properties": {
- "code": {
- "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.",
- "enum": [
- "CLEANUP_FAILED",
- "DEPRECATED_RESOURCE_USED",
- "DEPRECATED_TYPE_USED",
- "DISK_SIZE_LARGER_THAN_IMAGE_SIZE",
- "EXPERIMENTAL_TYPE_USED",
- "EXTERNAL_API_WARNING",
- "FIELD_VALUE_OVERRIDEN",
- "INJECTED_KERNELS_DEPRECATED",
- "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB",
- "LARGE_DEPLOYMENT_WARNING",
- "LIST_OVERHEAD_QUOTA_EXCEED",
- "MISSING_TYPE_DEPENDENCY",
- "NEXT_HOP_ADDRESS_NOT_ASSIGNED",
- "NEXT_HOP_CANNOT_IP_FORWARD",
- "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE",
- "NEXT_HOP_INSTANCE_NOT_FOUND",
- "NEXT_HOP_INSTANCE_NOT_ON_NETWORK",
- "NEXT_HOP_NOT_RUNNING",
- "NOT_CRITICAL_ERROR",
- "NO_RESULTS_ON_PAGE",
- "PARTIAL_SUCCESS",
- "REQUIRED_TOS_AGREEMENT",
- "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING",
- "RESOURCE_NOT_DELETED",
- "SCHEMA_VALIDATION_IGNORED",
- "SINGLE_INSTANCE_PROPERTY_TEMPLATE",
- "UNDECLARED_PROPERTIES",
- "UNREACHABLE"
- ],
- "enumDeprecated": [
- false,
- false,
- false,
- false,
- false,
- false,
- true,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false
- ],
- "enumDescriptions": [
- "Warning about failed cleanup of transient changes made by a failed operation.",
- "A link to a deprecated resource was created.",
- "When deploying and at least one of the resources has a type marked as deprecated",
- "The user created a boot disk that is larger than image size.",
- "When deploying and at least one of the resources has a type marked as experimental",
- "Warning that is present in an external api call",
- "Warning that value of a field has been overridden. Deprecated unused field.",
- "The operation involved use of an injected kernel, which is deprecated.",
- "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.",
- "When deploying a deployment with a exceedingly large number of resources",
- "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.",
- "A resource depends on a missing type",
- "The route's nextHopIp address is not assigned to an instance on the network.",
- "The route's next hop instance cannot ip forward.",
- "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.",
- "The route's nextHopInstance URL refers to an instance that does not exist.",
- "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.",
- "The route's next hop instance does not have a status of RUNNING.",
- "Error which is not critical. We decided to continue the process despite the mentioned error.",
- "No results are present on a particular list page.",
- "Success is reported, but some results may be missing due to errors",
- "The user attempted to use a resource that requires a TOS they have not accepted.",
- "Warning that a resource is in use.",
- "One or more of the resources set to auto-delete could not be deleted because they were in use.",
- "When a resource schema validation is ignored.",
- "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.",
- "When undeclared properties in the schema are present",
- "A given scope cannot be reached."
- ],
- "type": "string"
- },
- "data": {
- "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ",
- "items": {
- "properties": {
- "key": {
- "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).",
- "type": "string"
- },
- "value": {
- "description": "[Output Only] A warning data value corresponding to the key.",
- "type": "string"
- }
- },
- "type": "object"
- },
- "type": "array"
- },
- "message": {
- "description": "[Output Only] A human-readable description of the warning code.",
- "type": "string"
- }
- },
- "type": "object"
- }
- },
- "type": "object"
- },
"GRPCHealthCheck": {
"id": "GRPCHealthCheck",
"properties": {
@@ -47965,6 +46879,7 @@
"SEV_LIVE_MIGRATABLE",
"SEV_LIVE_MIGRATABLE_V2",
"SEV_SNP_CAPABLE",
+ "TDX_CAPABLE",
"UEFI_COMPATIBLE",
"VIRTIO_SCSI_MULTIQUEUE",
"WINDOWS"
@@ -47981,6 +46896,7 @@
"",
"",
"",
+ "",
""
],
"type": "string"
@@ -48997,6 +47913,21 @@
"description": "For target pool based Network Load Balancing, it indicates the forwarding rule's IP address assigned to this instance. For other types of load balancing, the field indicates VM internal ip.",
"type": "string"
},
+ "ipv6Address": {
+ "type": "string"
+ },
+ "ipv6HealthState": {
+ "description": "Health state of the IPv6 address of the instance.",
+ "enum": [
+ "HEALTHY",
+ "UNHEALTHY"
+ ],
+ "enumDescriptions": [
+ "",
+ ""
+ ],
+ "type": "string"
+ },
"port": {
"description": "The named port of the instance group, not necessarily the port that is health-checked.",
"format": "int32",
@@ -55222,7 +54153,7 @@
"type": "boolean"
},
"availableFeatures": {
- "description": "[Output only] List of features available for this Interconnect connection, which can take one of the following values: - MACSEC If present then the Interconnect connection is provisioned on MACsec capable hardware ports. If not present then the Interconnect connection is provisioned on non-MACsec capable ports and MACsec isn't supported and enabling MACsec fails.",
+ "description": "[Output only] List of features available for this Interconnect connection, which can take one of the following values: - IF_MACSEC If present then the Interconnect connection is provisioned on MACsec capable hardware ports. If not present then the Interconnect connection is provisioned on non-MACsec capable ports and MACsec isn't supported and enabling MACsec fails.",
"items": {
"enum": [
"IF_MACSEC"
@@ -55375,7 +54306,7 @@
"type": "string"
},
"requestedFeatures": {
- "description": "Optional. List of features requested for this Interconnect connection, which can take one of the following values: - MACSEC If specified then the connection is created on MACsec capable hardware ports. If not specified, the default value is false, which allocates non-MACsec capable ports first if available. This parameter can be provided only with Interconnect INSERT. It isn't valid for Interconnect PATCH.",
+ "description": "Optional. List of features requested for this Interconnect connection, which can take one of the following values: - IF_MACSEC If specified then the connection is created on MACsec capable hardware ports. If not specified, the default value is false, which allocates non-MACsec capable ports first if available. This parameter can be provided only with Interconnect INSERT. It isn't valid for Interconnect PATCH.",
"items": {
"enum": [
"IF_MACSEC"
@@ -60154,6 +59085,11 @@
"description": "Metadata defined as annotations on the network endpoint.",
"type": "object"
},
+ "clientDestinationPort": {
+ "description": "Represents the port number to which PSC consumer sends packets. Only valid for network endpoint groups created with GCE_VM_IP_PORTMAP endpoint type.",
+ "format": "int32",
+ "type": "integer"
+ },
"fqdn": {
"description": "Optional fully qualified domain name of network endpoint. This can only be specified when NetworkEndpointGroup.network_endpoint_type is NON_GCP_FQDN_PORT.",
"type": "string"
@@ -60233,6 +59169,7 @@
"enum": [
"GCE_VM_IP",
"GCE_VM_IP_PORT",
+ "GCE_VM_IP_PORTMAP",
"INTERNET_FQDN_PORT",
"INTERNET_IP_PORT",
"NON_GCP_PRIVATE_IP_PORT",
@@ -60242,6 +59179,7 @@
"enumDescriptions": [
"The network endpoint is represented by an IP address.",
"The network endpoint is represented by IP address and port pair.",
+ "The network endpoint is represented by an IP, Port and Client Destination Port.",
"The network endpoint is represented by fully qualified domain name and port.",
"The network endpoint is represented by an internet IP address and port.",
"The network endpoint is represented by an IP address and port. The endpoint belongs to a VM or pod running in a customer's on-premises.",
@@ -60652,6 +59590,11 @@
"description": "[Output Only] Address allocated from given subnetwork for PSC. This IP address acts as a VIP for a PSC NEG, allowing it to act as an endpoint in L7 PSC-XLB.",
"type": "string"
},
+ "producerPort": {
+ "description": "The psc producer port is used to connect PSC NEG with specific port on the PSC Producer side; should only be used for the PRIVATE_SERVICE_CONNECT NEG type",
+ "format": "int32",
+ "type": "integer"
+ },
"pscConnectionId": {
"description": "[Output Only] The PSC connection id of the PSC Network Endpoint Group Consumer.",
"format": "uint64",
@@ -61119,11 +60062,13 @@
"description": "The stack type for this network interface. To assign only IPv4 addresses, use IPV4_ONLY. To assign both IPv4 and IPv6 addresses, use IPV4_IPV6. If not specified, IPV4_ONLY is used. This field can be both set at instance creation and update network interface operations.",
"enum": [
"IPV4_IPV6",
- "IPV4_ONLY"
+ "IPV4_ONLY",
+ "IPV6_ONLY"
],
"enumDescriptions": [
"The network interface can have both IPv4 and IPv6 addresses.",
- "The network interface will be assigned IPv4 address."
+ "The network interface will only be assigned IPv4 addresses.",
+ "The network interface will only be assigned IPv6 addresses."
],
"type": "string"
},
@@ -61453,6 +60398,11 @@
"description": "[Output Only] The name of the firewall policy.",
"type": "string"
},
+ "priority": {
+ "description": "[Output only] Priority of firewall policy association. Not applicable for type=HIERARCHY.",
+ "format": "int32",
+ "type": "integer"
+ },
"rules": {
"description": "The rules that apply to the network.",
"items": {
@@ -61469,9 +60419,11 @@
"enum": [
"HIERARCHY",
"NETWORK",
+ "SYSTEM",
"UNSPECIFIED"
],
"enumDescriptions": [
+ "",
"",
"",
""
@@ -63852,7 +62804,7 @@
"type": "string"
},
"targetLink": {
- "description": "[Output Only] The URL of the resource that the operation modifies. For operations related to creating a snapshot, this points to the persistent disk that the snapshot was created from.",
+ "description": "[Output Only] The URL of the resource that the operation modifies. For operations related to creating a snapshot, this points to the disk that the snapshot was created from.",
"type": "string"
},
"user": {
@@ -73997,6 +72949,11 @@
"description": "The URL of a forwarding rule with loadBalancingScheme INTERNAL* that is serving the endpoint identified by this service attachment.",
"type": "string"
},
+ "propagatedConnectionLimit": {
+ "description": "The number of consumer spokes that connected Private Service Connect endpoints can be propagated to through Network Connectivity Center. This limit lets the service producer limit how many propagated Private Service Connect connections can be established to this service attachment from a single consumer. If the connection preference of the service attachment is ACCEPT_MANUAL, the limit applies to each project or network that is listed in the consumer accept list. If the connection preference of the service attachment is ACCEPT_AUTOMATIC, the limit applies to each project that contains a connected endpoint. If unspecified, the default propagated connection limit is 250.",
+ "format": "uint32",
+ "type": "integer"
+ },
"pscServiceAttachmentId": {
"$ref": "Uint128",
"description": "[Output Only] An 128-bit global unique ID of the PSC service attachment."
@@ -74192,6 +73149,11 @@
"description": "The url of a connected endpoint.",
"type": "string"
},
+ "propagatedConnectionCount": {
+ "description": "The number of consumer Network Connectivity Center spokes that the connected Private Service Connect endpoint has propagated to.",
+ "format": "uint32",
+ "type": "integer"
+ },
"pscConnectionId": {
"description": "The PSC connection id of the connected endpoint.",
"format": "uint64",
@@ -78057,7 +77019,7 @@
"type": "string"
},
"internalIpv6Prefix": {
- "description": "[Output Only] The internal IPv6 address range that is assigned to this subnetwork.",
+ "description": "The internal IPv6 address range that is owned by this subnetwork.",
"type": "string"
},
"ipCidrRange": {
@@ -78173,11 +77135,13 @@
"description": "The stack type for the subnet. If set to IPV4_ONLY, new VMs in the subnet are assigned IPv4 addresses only. If set to IPV4_IPV6, new VMs in the subnet can be assigned both IPv4 and IPv6 addresses. If not specified, IPV4_ONLY is used. This field can be both set at resource creation time and updated using patch.",
"enum": [
"IPV4_IPV6",
- "IPV4_ONLY"
+ "IPV4_ONLY",
+ "IPV6_ONLY"
],
"enumDescriptions": [
"New VMs in this subnet can have both IPv4 and IPv6 addresses.",
- "New VMs in this subnet will only be assigned IPv4 addresses."
+ "New VMs in this subnet will only be assigned IPv4 addresses.",
+ "New VMs in this subnet will only be assigned IPv6 addresses."
],
"type": "string"
},
@@ -82602,7 +81566,7 @@
"type": "object"
},
"UrlMap": {
- "description": "Represents a URL Map resource. Compute Engine has two URL Map resources: * [Global](/compute/docs/reference/rest/v1/urlMaps) * [Regional](/compute/docs/reference/rest/v1/regionUrlMaps) A URL map resource is a component of certain types of cloud load balancers and Traffic Director: * urlMaps are used by global external Application Load Balancers, classic Application Load Balancers, and cross-region internal Application Load Balancers. * regionUrlMaps are used by internal Application Load Balancers, regional external Application Load Balancers and regional internal Application Load Balancers. For a list of supported URL map features by the load balancer type, see the Load balancing features: Routing and traffic management table. For a list of supported URL map features for Traffic Director, see the Traffic Director features: Routing and traffic management table. This resource defines mappings from hostnames and URL paths to either a backend service or a backend bucket. To use the global urlMaps resource, the backend service must have a loadBalancingScheme of either EXTERNAL or INTERNAL_SELF_MANAGED. To use the regionUrlMaps resource, the backend service must have a loadBalancingScheme of INTERNAL_MANAGED. For more information, read URL Map Concepts.",
+ "description": "Represents a URL Map resource. Compute Engine has two URL Map resources: * [Global](/compute/docs/reference/rest/v1/urlMaps) * [Regional](/compute/docs/reference/rest/v1/regionUrlMaps) A URL map resource is a component of certain types of cloud load balancers and Traffic Director: * urlMaps are used by global external Application Load Balancers, classic Application Load Balancers, and cross-region internal Application Load Balancers. * regionUrlMaps are used by internal Application Load Balancers, regional external Application Load Balancers and regional internal Application Load Balancers. For a list of supported URL map features by the load balancer type, see the Load balancing features: Routing and traffic management table. For a list of supported URL map features for Traffic Director, see the Traffic Director features: Routing and traffic management table. This resource defines mappings from hostnames and URL paths to either a backend service or a backend bucket. To use the global urlMaps resource, the backend service must have a loadBalancingScheme of either EXTERNAL, EXTERNAL_MANAGED, or INTERNAL_SELF_MANAGED. To use the regionUrlMaps resource, the backend service must have a loadBalancingScheme of INTERNAL_MANAGED. For more information, read URL Map Concepts.",
"id": "UrlMap",
"properties": {
"creationTimestamp": {
@@ -83352,11 +82316,13 @@
"description": "The stack type for the subnet. If set to IPV4_ONLY, new VMs in the subnet are assigned IPv4 addresses only. If set to IPV4_IPV6, new VMs in the subnet can be assigned both IPv4 and IPv6 addresses. If not specified, IPV4_ONLY is used. This field can be both set at resource creation time and updated using patch.",
"enum": [
"IPV4_IPV6",
- "IPV4_ONLY"
+ "IPV4_ONLY",
+ "IPV6_ONLY"
],
"enumDescriptions": [
"New VMs in this subnet can have both IPv4 and IPv6 addresses.",
- "New VMs in this subnet will only be assigned IPv4 addresses."
+ "New VMs in this subnet will only be assigned IPv4 addresses.",
+ "New VMs in this subnet will only be assigned IPv6 addresses."
],
"type": "string"
},
diff --git a/vendor/google.golang.org/api/compute/v1/compute-gen.go b/vendor/google.golang.org/api/compute/v1/compute-gen.go
index 9ea1149d2bd6f..245c3c6871457 100644
--- a/vendor/google.golang.org/api/compute/v1/compute-gen.go
+++ b/vendor/google.golang.org/api/compute/v1/compute-gen.go
@@ -173,7 +173,6 @@ func New(client *http.Client) (*Service, error) {
s.FirewallPolicies = NewFirewallPoliciesService(s)
s.Firewalls = NewFirewallsService(s)
s.ForwardingRules = NewForwardingRulesService(s)
- s.FutureReservations = NewFutureReservationsService(s)
s.GlobalAddresses = NewGlobalAddressesService(s)
s.GlobalForwardingRules = NewGlobalForwardingRulesService(s)
s.GlobalNetworkEndpointGroups = NewGlobalNetworkEndpointGroupsService(s)
@@ -293,8 +292,6 @@ type Service struct {
ForwardingRules *ForwardingRulesService
- FutureReservations *FutureReservationsService
-
GlobalAddresses *GlobalAddressesService
GlobalForwardingRules *GlobalForwardingRulesService
@@ -580,15 +577,6 @@ type ForwardingRulesService struct {
s *Service
}
-func NewFutureReservationsService(s *Service) *FutureReservationsService {
- rs := &FutureReservationsService{s: s}
- return rs
-}
-
-type FutureReservationsService struct {
- s *Service
-}
-
func NewGlobalAddressesService(s *Service) *GlobalAddressesService {
rs := &GlobalAddressesService{s: s}
return rs
@@ -2683,6 +2671,10 @@ type AdvancedMachineFeatures struct {
// simultaneous multithreading (SMT) set this to 1. If unset, the maximum
// number of threads supported per core by the underlying processor is assumed.
ThreadsPerCore int64 `json:"threadsPerCore,omitempty"`
+ // TurboMode: Turbo frequency mode to use for the instance. Supported modes
+ // include: * ALL_CORE_MAX Using empty string or not setting this field will
+ // use the platform-specific default turbo mode.
+ TurboMode string `json:"turboMode,omitempty"`
// VisibleCoreCount: The number of physical cores to expose to an instance.
// Multiply by the number of threads per core to compute the total number of
// virtual CPUs to expose to the instance. If unset, the number of cores is
@@ -3268,8 +3260,6 @@ func (s AttachedDiskInitializeParams) MarshalJSON() ([]byte, error) {
type AuditConfig struct {
// AuditLogConfigs: The configuration for logging of each type of permission.
AuditLogConfigs []*AuditLogConfig `json:"auditLogConfigs,omitempty"`
- // ExemptedMembers: This is deprecated and has no effect. Do not use.
- ExemptedMembers []string `json:"exemptedMembers,omitempty"`
// Service: Specifies a service that will be enabled for audit logging. For
// example, `storage.googleapis.com`, `cloudsql.googleapis.com`. `allServices`
// is a special value that covers all services.
@@ -3301,8 +3291,6 @@ type AuditLogConfig struct {
// ExemptedMembers: Specifies the identities that do not cause logging for this
// type of permission. Follows the same format of Binding.members.
ExemptedMembers []string `json:"exemptedMembers,omitempty"`
- // IgnoreChildExemptions: This is deprecated and has no effect. Do not use.
- IgnoreChildExemptions bool `json:"ignoreChildExemptions,omitempty"`
// LogType: The log type that this config enables.
//
// Possible values:
@@ -4488,6 +4476,8 @@ type BackendBucket struct {
Name string `json:"name,omitempty"`
// SelfLink: [Output Only] Server-defined URL for the resource.
SelfLink string `json:"selfLink,omitempty"`
+ // UsedBy: [Output Only] List of resources referencing that backend bucket.
+ UsedBy []*BackendBucketUsedBy `json:"usedBy,omitempty"`
// ServerResponse contains the HTTP response code and headers from the server.
googleapi.ServerResponse `json:"-"`
@@ -4877,6 +4867,28 @@ func (s BackendBucketListWarningData) MarshalJSON() ([]byte, error) {
return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
}
+type BackendBucketUsedBy struct {
+ // Reference: [Output Only] Server-defined URL for UrlMaps referencing that
+ // BackendBucket.
+ Reference string `json:"reference,omitempty"`
+ // ForceSendFields is a list of field names (e.g. "Reference") to
+ // unconditionally include in API requests. By default, fields with empty or
+ // default values are omitted from API requests. See
+ // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more
+ // details.
+ ForceSendFields []string `json:"-"`
+ // NullFields is a list of field names (e.g. "Reference") to include in API
+ // requests with the JSON null value. By default, fields with empty values are
+ // omitted from API requests. See
+ // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details.
+ NullFields []string `json:"-"`
+}
+
+func (s BackendBucketUsedBy) MarshalJSON() ([]byte, error) {
+ type NoMethod BackendBucketUsedBy
+ return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
+}
+
// BackendService: Represents a Backend Service resource. A backend service
// defines how Google Cloud load balancers distribute traffic. The backend
// service configuration contains a set of values, such as the protocol used to
@@ -4980,6 +4992,39 @@ type BackendService struct {
// Id: [Output Only] The unique identifier for the resource. This identifier is
// defined by the server.
Id uint64 `json:"id,omitempty,string"`
+ // IpAddressSelectionPolicy: Specifies a preference for traffic sent from the
+ // proxy to the backend (or from the client to the backend for proxyless gRPC).
+ // The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends
+ // of the backend service (Instance Group, Managed Instance Group, Network
+ // Endpoint Group), regardless of traffic from the client to the proxy. Only
+ // IPv4 health checks are used to check the health of the backends. This is the
+ // default setting. - PREFER_IPV6: Prioritize the connection to the endpoint's
+ // IPv6 address over its IPv4 address (provided there is a healthy IPv6
+ // address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the backend
+ // service (Instance Group, Managed Instance Group, Network Endpoint Group),
+ // regardless of traffic from the client to the proxy. Only IPv6 health checks
+ // are used to check the health of the backends. This field is applicable to
+ // either: - Advanced global external Application Load Balancer (load balancing
+ // scheme EXTERNAL_MANAGED), - Regional external Application Load Balancer, -
+ // Internal proxy Network Load Balancer (load balancing scheme
+ // INTERNAL_MANAGED), - Regional internal Application Load Balancer (load
+ // balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies
+ // and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED).
+ //
+ // Possible values:
+ // "IPV4_ONLY" - Only send IPv4 traffic to the backends of the Backend
+ // Service (Instance Group, Managed Instance Group, Network Endpoint Group)
+ // regardless of traffic from the client to the proxy. Only IPv4 health-checks
+ // are used to check the health of the backends. This is the default setting.
+ // "IPV6_ONLY" - Only send IPv6 traffic to the backends of the Backend
+ // Service (Instance Group, Managed Instance Group, Network Endpoint Group)
+ // regardless of traffic from the client to the proxy. Only IPv6 health-checks
+ // are used to check the health of the backends.
+ // "IP_ADDRESS_SELECTION_POLICY_UNSPECIFIED" - Unspecified IP address
+ // selection policy.
+ // "PREFER_IPV6" - Prioritize the connection to the endpoints IPv6 address
+ // over its IPv4 address (provided there is a healthy IPv6 address).
+ IpAddressSelectionPolicy string `json:"ipAddressSelectionPolicy,omitempty"`
// Kind: [Output Only] Type of resource. Always compute#backendService for
// backend services.
Kind string `json:"kind,omitempty"`
@@ -6791,8 +6836,8 @@ type BulkInsertInstanceResource struct {
// InstanceProperties: The instance properties defining the VM instances to be
// created. Required if sourceInstanceTemplate is not provided.
InstanceProperties *InstanceProperties `json:"instanceProperties,omitempty"`
- // LocationPolicy: Policy for chosing target zone. For more information, see
- // Create VMs in bulk .
+ // LocationPolicy: Policy for choosing target zone. For more information, see
+ // Create VMs in bulk.
LocationPolicy *LocationPolicy `json:"locationPolicy,omitempty"`
// MinCount: The minimum number of instances to create. If no min_count is
// specified then count is used as the default value. If min_count instances
@@ -7046,6 +7091,10 @@ type Commitment struct {
Category string `json:"category,omitempty"`
// CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text format.
CreationTimestamp string `json:"creationTimestamp,omitempty"`
+ // CustomEndTimestamp: [Input Only] Optional, specifies the CUD end time
+ // requested by the customer in RFC3339 text format. Needed when the customer
+ // wants CUD's end date is later than the start date + term duration.
+ CustomEndTimestamp string `json:"customEndTimestamp,omitempty"`
// Description: An optional description of this resource. Provide this property
// when you create the resource.
Description string `json:"description,omitempty"`
@@ -7091,6 +7140,8 @@ type Commitment struct {
Region string `json:"region,omitempty"`
// Reservations: List of create-on-create reservations for this commitment.
Reservations []*Reservation `json:"reservations,omitempty"`
+ // ResourceStatus: [Output Only] Status information for Commitment resource.
+ ResourceStatus *CommitmentResourceStatus `json:"resourceStatus,omitempty"`
// Resources: A list of commitment amounts for particular resources. Note that
// VCPU and MEMORY resource commitments must occur together.
Resources []*ResourceCommitment `json:"resources,omitempty"`
@@ -7474,6 +7525,33 @@ func (s CommitmentListWarningData) MarshalJSON() ([]byte, error) {
return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
}
+// CommitmentResourceStatus: [Output Only] Contains output only fields.
+type CommitmentResourceStatus struct {
+ // CustomTermEligibilityEndTimestamp: [Output Only] Indicates the end time of
+ // customer's eligibility to send custom term requests in RFC3339 text format.
+ // Term extension requests that (not the end time in the request) after this
+ // time will be rejected.
+ CustomTermEligibilityEndTimestamp string `json:"customTermEligibilityEndTimestamp,omitempty"`
+ // ForceSendFields is a list of field names (e.g.
+ // "CustomTermEligibilityEndTimestamp") to unconditionally include in API
+ // requests. By default, fields with empty or default values are omitted from
+ // API requests. See
+ // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more
+ // details.
+ ForceSendFields []string `json:"-"`
+ // NullFields is a list of field names (e.g.
+ // "CustomTermEligibilityEndTimestamp") to include in API requests with the
+ // JSON null value. By default, fields with empty values are omitted from API
+ // requests. See https://pkg.go.dev/google.golang.org/api#hdr-NullFields for
+ // more details.
+ NullFields []string `json:"-"`
+}
+
+func (s CommitmentResourceStatus) MarshalJSON() ([]byte, error) {
+ type NoMethod CommitmentResourceStatus
+ return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
+}
+
type CommitmentsScopedList struct {
// Commitments: [Output Only] A list of commitments contained in this scope.
Commitments []*Commitment `json:"commitments,omitempty"`
@@ -7678,6 +7756,7 @@ type ConfidentialInstanceConfig struct {
// this value.
// "SEV" - AMD Secure Encrypted Virtualization.
// "SEV_SNP" - AMD Secure Encrypted Virtualization - Secure Nested Paging.
+ // "TDX" - Intel Trust Domain eXtension.
ConfidentialInstanceType string `json:"confidentialInstanceType,omitempty"`
// EnableConfidentialCompute: Defines whether the instance should have
// confidential compute enabled.
@@ -12129,832 +12208,6 @@ func (s ForwardingRulesScopedListWarningData) MarshalJSON() ([]byte, error) {
return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
}
-type FutureReservation struct {
- // AutoCreatedReservationsDeleteTime: Future timestamp when the FR auto-created
- // reservations will be deleted by Compute Engine. Format of this field must be
- // a valid href="https://www.ietf.org/rfc/rfc3339.txt">RFC3339 value.
- AutoCreatedReservationsDeleteTime string `json:"autoCreatedReservationsDeleteTime,omitempty"`
- // AutoCreatedReservationsDuration: Specifies the duration of auto-created
- // reservations. It represents relative time to future reservation start_time
- // when auto-created reservations will be automatically deleted by Compute
- // Engine. Duration time unit is represented as a count of seconds and
- // fractions of seconds at nanosecond resolution.
- AutoCreatedReservationsDuration *Duration `json:"autoCreatedReservationsDuration,omitempty"`
- // AutoDeleteAutoCreatedReservations: Setting for enabling or disabling
- // automatic deletion for auto-created reservation. If set to true,
- // auto-created reservations will be deleted at Future Reservation's end time
- // (default) or at user's defined timestamp if any of the
- // [auto_created_reservations_delete_time, auto_created_reservations_duration]
- // values is specified. For keeping auto-created reservation indefinitely, this
- // value should be set to false.
- AutoDeleteAutoCreatedReservations bool `json:"autoDeleteAutoCreatedReservations,omitempty"`
- // CreationTimestamp: [Output Only] The creation timestamp for this future
- // reservation in RFC3339 text format.
- CreationTimestamp string `json:"creationTimestamp,omitempty"`
- // Description: An optional description of this resource. Provide this property
- // when you create the future reservation.
- Description string `json:"description,omitempty"`
- // Id: [Output Only] A unique identifier for this future reservation. The
- // server defines this identifier.
- Id uint64 `json:"id,omitempty,string"`
- // Kind: [Output Only] Type of the resource. Always compute#futureReservation
- // for future reservations.
- Kind string `json:"kind,omitempty"`
- // Name: The name of the resource, provided by the client when initially
- // creating the resource. The resource name must be 1-63 characters long, and
- // comply with RFC1035. Specifically, the name must be 1-63 characters long and
- // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the
- // first character must be a lowercase letter, and all following characters
- // must be a dash, lowercase letter, or digit, except the last character, which
- // cannot be a dash.
- Name string `json:"name,omitempty"`
- // NamePrefix: Name prefix for the reservations to be created at the time of
- // delivery. The name prefix must comply with RFC1035. Maximum allowed length
- // for name prefix is 20. Automatically created reservations name format will
- // be -date-####.
- NamePrefix string `json:"namePrefix,omitempty"`
- // PlanningStatus: Planning state before being submitted for evaluation
- //
- // Possible values:
- // "DRAFT" - Future Reservation is being drafted.
- // "PLANNING_STATUS_UNSPECIFIED"
- // "SUBMITTED" - Future Reservation has been submitted for evaluation by GCP.
- PlanningStatus string `json:"planningStatus,omitempty"`
- // SelfLink: [Output Only] Server-defined fully-qualified URL for this
- // resource.
- SelfLink string `json:"selfLink,omitempty"`
- // SelfLinkWithId: [Output Only] Server-defined URL for this resource with the
- // resource id.
- SelfLinkWithId string `json:"selfLinkWithId,omitempty"`
- // ShareSettings: List of Projects/Folders to share with.
- ShareSettings *ShareSettings `json:"shareSettings,omitempty"`
- // SpecificSkuProperties: Future Reservation configuration to indicate instance
- // properties and total count.
- SpecificSkuProperties *FutureReservationSpecificSKUProperties `json:"specificSkuProperties,omitempty"`
- // Status: [Output only] Status of the Future Reservation
- Status *FutureReservationStatus `json:"status,omitempty"`
- // TimeWindow: Time window for this Future Reservation.
- TimeWindow *FutureReservationTimeWindow `json:"timeWindow,omitempty"`
- // Zone: [Output Only] URL of the Zone where this future reservation resides.
- Zone string `json:"zone,omitempty"`
-
- // ServerResponse contains the HTTP response code and headers from the server.
- googleapi.ServerResponse `json:"-"`
- // ForceSendFields is a list of field names (e.g.
- // "AutoCreatedReservationsDeleteTime") to unconditionally include in API
- // requests. By default, fields with empty or default values are omitted from
- // API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more
- // details.
- ForceSendFields []string `json:"-"`
- // NullFields is a list of field names (e.g.
- // "AutoCreatedReservationsDeleteTime") to include in API requests with the
- // JSON null value. By default, fields with empty values are omitted from API
- // requests. See https://pkg.go.dev/google.golang.org/api#hdr-NullFields for
- // more details.
- NullFields []string `json:"-"`
-}
-
-func (s FutureReservation) MarshalJSON() ([]byte, error) {
- type NoMethod FutureReservation
- return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
-}
-
-type FutureReservationSpecificSKUProperties struct {
- // InstanceProperties: Properties of the SKU instances being reserved.
- InstanceProperties *AllocationSpecificSKUAllocationReservedInstanceProperties `json:"instanceProperties,omitempty"`
- // SourceInstanceTemplate: The instance template that will be used to populate
- // the ReservedInstanceProperties of the future reservation
- SourceInstanceTemplate string `json:"sourceInstanceTemplate,omitempty"`
- // TotalCount: Total number of instances for which capacity assurance is
- // requested at a future time period.
- TotalCount int64 `json:"totalCount,omitempty,string"`
- // ForceSendFields is a list of field names (e.g. "InstanceProperties") to
- // unconditionally include in API requests. By default, fields with empty or
- // default values are omitted from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more
- // details.
- ForceSendFields []string `json:"-"`
- // NullFields is a list of field names (e.g. "InstanceProperties") to include
- // in API requests with the JSON null value. By default, fields with empty
- // values are omitted from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details.
- NullFields []string `json:"-"`
-}
-
-func (s FutureReservationSpecificSKUProperties) MarshalJSON() ([]byte, error) {
- type NoMethod FutureReservationSpecificSKUProperties
- return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
-}
-
-// FutureReservationStatus: [Output only] Represents status related to the
-// future reservation.
-type FutureReservationStatus struct {
- // AmendmentStatus: [Output Only] The current status of the requested
- // amendment.
- //
- // Possible values:
- // "AMENDMENT_APPROVED" - The requested amendment to the Future Resevation
- // has been approved and applied by GCP.
- // "AMENDMENT_DECLINED" - The requested amendment to the Future Reservation
- // has been declined by GCP and the original state was restored.
- // "AMENDMENT_IN_REVIEW" - The requested amendment to the Future Reservation
- // is currently being reviewd by GCP.
- // "AMENDMENT_STATUS_UNSPECIFIED"
- AmendmentStatus string `json:"amendmentStatus,omitempty"`
- // AutoCreatedReservations: Fully qualified urls of the automatically created
- // reservations at start_time.
- AutoCreatedReservations []string `json:"autoCreatedReservations,omitempty"`
- // ExistingMatchingUsageInfo: [Output Only] Represents the existing matching
- // usage for the future reservation.
- ExistingMatchingUsageInfo *FutureReservationStatusExistingMatchingUsageInfo `json:"existingMatchingUsageInfo,omitempty"`
- // FulfilledCount: This count indicates the fulfilled capacity so far. This is
- // set during "PROVISIONING" state. This count also includes capacity delivered
- // as part of existing matching reservations.
- FulfilledCount int64 `json:"fulfilledCount,omitempty,string"`
- // LastKnownGoodState: [Output Only] This field represents the future
- // reservation before an amendment was requested. If the amendment is declined,
- // the Future Reservation will be reverted to the last known good state. The
- // last known good state is not set when updating a future reservation whose
- // Procurement Status is DRAFTING.
- LastKnownGoodState *FutureReservationStatusLastKnownGoodState `json:"lastKnownGoodState,omitempty"`
- // LockTime: Time when Future Reservation would become LOCKED, after which no
- // modifications to Future Reservation will be allowed. Applicable only after
- // the Future Reservation is in the APPROVED state. The lock_time is an RFC3339
- // string. The procurement_status will transition to PROCURING state at this
- // time.
- LockTime string `json:"lockTime,omitempty"`
- // ProcurementStatus: Current state of this Future Reservation
- //
- // Possible values:
- // "APPROVED" - Future reservation is approved by GCP.
- // "CANCELLED" - Future reservation is cancelled by the customer.
- // "COMMITTED" - Future reservation is committed by the customer.
- // "DECLINED" - Future reservation is rejected by GCP.
- // "DRAFTING" - Related status for PlanningStatus.Draft. Transitions to
- // PENDING_APPROVAL upon user submitting FR.
- // "FAILED" - Future reservation failed. No additional reservations were
- // provided.
- // "FAILED_PARTIALLY_FULFILLED" - Future reservation is partially fulfilled.
- // Additional reservations were provided but did not reach total_count reserved
- // instance slots.
- // "FULFILLED" - Future reservation is fulfilled completely.
- // "PENDING_AMENDMENT_APPROVAL" - An Amendment to the Future Reservation has
- // been requested. If the Amendment is declined, the Future Reservation will be
- // restored to the last known good state.
- // "PENDING_APPROVAL" - Future reservation is pending approval by GCP.
- // "PROCUREMENT_STATUS_UNSPECIFIED"
- // "PROCURING" - Future reservation is being procured by GCP. Beyond this
- // point, Future reservation is locked and no further modifications are
- // allowed.
- // "PROVISIONING" - Future reservation capacity is being provisioned. This
- // state will be entered after start_time, while reservations are being created
- // to provide total_count reserved instance slots. This state will not persist
- // past start_time + 24h.
- ProcurementStatus string `json:"procurementStatus,omitempty"`
- SpecificSkuProperties *FutureReservationStatusSpecificSKUProperties `json:"specificSkuProperties,omitempty"`
- // ForceSendFields is a list of field names (e.g. "AmendmentStatus") to
- // unconditionally include in API requests. By default, fields with empty or
- // default values are omitted from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more
- // details.
- ForceSendFields []string `json:"-"`
- // NullFields is a list of field names (e.g. "AmendmentStatus") to include in
- // API requests with the JSON null value. By default, fields with empty values
- // are omitted from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details.
- NullFields []string `json:"-"`
-}
-
-func (s FutureReservationStatus) MarshalJSON() ([]byte, error) {
- type NoMethod FutureReservationStatus
- return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
-}
-
-// FutureReservationStatusExistingMatchingUsageInfo: [Output Only] Represents
-// the existing matching usage for the future reservation.
-type FutureReservationStatusExistingMatchingUsageInfo struct {
- // Count: Count to represent min(FR total_count,
- // matching_reserved_capacity+matching_unreserved_instances)
- Count int64 `json:"count,omitempty,string"`
- // Timestamp: Timestamp when the matching usage was calculated
- Timestamp string `json:"timestamp,omitempty"`
- // ForceSendFields is a list of field names (e.g. "Count") to unconditionally
- // include in API requests. By default, fields with empty or default values are
- // omitted from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more
- // details.
- ForceSendFields []string `json:"-"`
- // NullFields is a list of field names (e.g. "Count") to include in API
- // requests with the JSON null value. By default, fields with empty values are
- // omitted from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details.
- NullFields []string `json:"-"`
-}
-
-func (s FutureReservationStatusExistingMatchingUsageInfo) MarshalJSON() ([]byte, error) {
- type NoMethod FutureReservationStatusExistingMatchingUsageInfo
- return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
-}
-
-// FutureReservationStatusLastKnownGoodState: The state that the future
-// reservation will be reverted to should the amendment be declined.
-type FutureReservationStatusLastKnownGoodState struct {
- // Description: [Output Only] The description of the FutureReservation before
- // an amendment was requested.
- Description string `json:"description,omitempty"`
- // ExistingMatchingUsageInfo: [Output Only] Represents the matching usage for
- // the future reservation before an amendment was requested.
- ExistingMatchingUsageInfo *FutureReservationStatusExistingMatchingUsageInfo `json:"existingMatchingUsageInfo,omitempty"`
- FutureReservationSpecs *FutureReservationStatusLastKnownGoodStateFutureReservationSpecs `json:"futureReservationSpecs,omitempty"`
- // LockTime: [Output Only] The lock time of the FutureReservation before an
- // amendment was requested.
- LockTime string `json:"lockTime,omitempty"`
- // NamePrefix: [Output Only] The name prefix of the Future Reservation before
- // an amendment was requested.
- NamePrefix string `json:"namePrefix,omitempty"`
- // ProcurementStatus: [Output Only] The status of the last known good state for
- // the Future Reservation.
- //
- // Possible values:
- // "APPROVED" - Future reservation is approved by GCP.
- // "CANCELLED" - Future reservation is cancelled by the customer.
- // "COMMITTED" - Future reservation is committed by the customer.
- // "DECLINED" - Future reservation is rejected by GCP.
- // "DRAFTING" - Related status for PlanningStatus.Draft. Transitions to
- // PENDING_APPROVAL upon user submitting FR.
- // "FAILED" - Future reservation failed. No additional reservations were
- // provided.
- // "FAILED_PARTIALLY_FULFILLED" - Future reservation is partially fulfilled.
- // Additional reservations were provided but did not reach total_count reserved
- // instance slots.
- // "FULFILLED" - Future reservation is fulfilled completely.
- // "PENDING_AMENDMENT_APPROVAL" - An Amendment to the Future Reservation has
- // been requested. If the Amendment is declined, the Future Reservation will be
- // restored to the last known good state.
- // "PENDING_APPROVAL" - Future reservation is pending approval by GCP.
- // "PROCUREMENT_STATUS_UNSPECIFIED"
- // "PROCURING" - Future reservation is being procured by GCP. Beyond this
- // point, Future reservation is locked and no further modifications are
- // allowed.
- // "PROVISIONING" - Future reservation capacity is being provisioned. This
- // state will be entered after start_time, while reservations are being created
- // to provide total_count reserved instance slots. This state will not persist
- // past start_time + 24h.
- ProcurementStatus string `json:"procurementStatus,omitempty"`
- // ForceSendFields is a list of field names (e.g. "Description") to
- // unconditionally include in API requests. By default, fields with empty or
- // default values are omitted from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more
- // details.
- ForceSendFields []string `json:"-"`
- // NullFields is a list of field names (e.g. "Description") to include in API
- // requests with the JSON null value. By default, fields with empty values are
- // omitted from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details.
- NullFields []string `json:"-"`
-}
-
-func (s FutureReservationStatusLastKnownGoodState) MarshalJSON() ([]byte, error) {
- type NoMethod FutureReservationStatusLastKnownGoodState
- return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
-}
-
-// FutureReservationStatusLastKnownGoodStateFutureReservationSpecs: The
-// properties of the last known good state for the Future Reservation.
-type FutureReservationStatusLastKnownGoodStateFutureReservationSpecs struct {
- // ShareSettings: [Output Only] The previous share settings of the Future
- // Reservation.
- ShareSettings *ShareSettings `json:"shareSettings,omitempty"`
- // SpecificSkuProperties: [Output Only] The previous instance related
- // properties of the Future Reservation.
- SpecificSkuProperties *FutureReservationSpecificSKUProperties `json:"specificSkuProperties,omitempty"`
- // TimeWindow: [Output Only] The previous time window of the Future
- // Reservation.
- TimeWindow *FutureReservationTimeWindow `json:"timeWindow,omitempty"`
- // ForceSendFields is a list of field names (e.g. "ShareSettings") to
- // unconditionally include in API requests. By default, fields with empty or
- // default values are omitted from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more
- // details.
- ForceSendFields []string `json:"-"`
- // NullFields is a list of field names (e.g. "ShareSettings") to include in API
- // requests with the JSON null value. By default, fields with empty values are
- // omitted from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details.
- NullFields []string `json:"-"`
-}
-
-func (s FutureReservationStatusLastKnownGoodStateFutureReservationSpecs) MarshalJSON() ([]byte, error) {
- type NoMethod FutureReservationStatusLastKnownGoodStateFutureReservationSpecs
- return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
-}
-
-// FutureReservationStatusSpecificSKUProperties: Properties to be set for the
-// Future Reservation.
-type FutureReservationStatusSpecificSKUProperties struct {
- // SourceInstanceTemplateId: ID of the instance template used to populate the
- // Future Reservation properties.
- SourceInstanceTemplateId string `json:"sourceInstanceTemplateId,omitempty"`
- // ForceSendFields is a list of field names (e.g. "SourceInstanceTemplateId")
- // to unconditionally include in API requests. By default, fields with empty or
- // default values are omitted from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more
- // details.
- ForceSendFields []string `json:"-"`
- // NullFields is a list of field names (e.g. "SourceInstanceTemplateId") to
- // include in API requests with the JSON null value. By default, fields with
- // empty values are omitted from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details.
- NullFields []string `json:"-"`
-}
-
-func (s FutureReservationStatusSpecificSKUProperties) MarshalJSON() ([]byte, error) {
- type NoMethod FutureReservationStatusSpecificSKUProperties
- return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
-}
-
-type FutureReservationTimeWindow struct {
- Duration *Duration `json:"duration,omitempty"`
- EndTime string `json:"endTime,omitempty"`
- // StartTime: Start time of the Future Reservation. The start_time is an
- // RFC3339 string.
- StartTime string `json:"startTime,omitempty"`
- // ForceSendFields is a list of field names (e.g. "Duration") to
- // unconditionally include in API requests. By default, fields with empty or
- // default values are omitted from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more
- // details.
- ForceSendFields []string `json:"-"`
- // NullFields is a list of field names (e.g. "Duration") to include in API
- // requests with the JSON null value. By default, fields with empty values are
- // omitted from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details.
- NullFields []string `json:"-"`
-}
-
-func (s FutureReservationTimeWindow) MarshalJSON() ([]byte, error) {
- type NoMethod FutureReservationTimeWindow
- return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
-}
-
-// FutureReservationsAggregatedListResponse: Contains a list of future
-// reservations.
-type FutureReservationsAggregatedListResponse struct {
- Etag string `json:"etag,omitempty"`
- // Id: [Output Only] Unique identifier for the resource; defined by the server.
- Id string `json:"id,omitempty"`
- // Items: A list of Future reservation resources.
- Items map[string]FutureReservationsScopedList `json:"items,omitempty"`
- // Kind: [Output Only] Type of resource. Always
- // compute#futureReservationsAggregatedListResponse for future resevation
- // aggregated list response.
- Kind string `json:"kind,omitempty"`
- // NextPageToken: [Output Only] This token allows you to get the next page of
- // results for list requests. If the number of results is larger than
- // maxResults, use the nextPageToken as a value for the query parameter
- // pageToken in the next list request. Subsequent list requests will have their
- // own nextPageToken to continue paging through the results.
- NextPageToken string `json:"nextPageToken,omitempty"`
- // SelfLink: [Output Only] Server-defined URL for this resource.
- SelfLink string `json:"selfLink,omitempty"`
- // Unreachables: [Output Only] Unreachable resources.
- Unreachables []string `json:"unreachables,omitempty"`
- // Warning: [Output Only] Informational warning message.
- Warning *FutureReservationsAggregatedListResponseWarning `json:"warning,omitempty"`
-
- // ServerResponse contains the HTTP response code and headers from the server.
- googleapi.ServerResponse `json:"-"`
- // ForceSendFields is a list of field names (e.g. "Etag") to unconditionally
- // include in API requests. By default, fields with empty or default values are
- // omitted from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more
- // details.
- ForceSendFields []string `json:"-"`
- // NullFields is a list of field names (e.g. "Etag") to include in API requests
- // with the JSON null value. By default, fields with empty values are omitted
- // from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details.
- NullFields []string `json:"-"`
-}
-
-func (s FutureReservationsAggregatedListResponse) MarshalJSON() ([]byte, error) {
- type NoMethod FutureReservationsAggregatedListResponse
- return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
-}
-
-// FutureReservationsAggregatedListResponseWarning: [Output Only] Informational
-// warning message.
-type FutureReservationsAggregatedListResponseWarning struct {
- // Code: [Output Only] A warning code, if applicable. For example, Compute
- // Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.
- //
- // Possible values:
- // "CLEANUP_FAILED" - Warning about failed cleanup of transient changes made
- // by a failed operation.
- // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was created.
- // "DEPRECATED_TYPE_USED" - When deploying and at least one of the resources
- // has a type marked as deprecated
- // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk that is
- // larger than image size.
- // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the
- // resources has a type marked as experimental
- // "EXTERNAL_API_WARNING" - Warning that is present in an external api call
- // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been
- // overridden. Deprecated unused field.
- // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an injected
- // kernel, which is deprecated.
- // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV backend
- // service is associated with a health check that is not of type
- // HTTP/HTTPS/HTTP2.
- // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a
- // exceedingly large number of resources
- // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to list
- // overhead quota exceed which captures the amount of resources filtered out by
- // user-defined list filter.
- // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type
- // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is not
- // assigned to an instance on the network.
- // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot ip
- // forward.
- // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's nextHopInstance
- // URL refers to an instance that does not have an ipv6 interface on the same
- // network as the route.
- // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL refers to
- // an instance that does not exist.
- // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance URL
- // refers to an instance that is not on the same network as the route.
- // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not have a
- // status of RUNNING.
- // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to continue
- // the process despite the mentioned error.
- // "NO_RESULTS_ON_PAGE" - No results are present on a particular list page.
- // "PARTIAL_SUCCESS" - Success is reported, but some results may be missing
- // due to errors
- // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource that
- // requires a TOS they have not accepted.
- // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a resource is
- // in use.
- // "RESOURCE_NOT_DELETED" - One or more of the resources set to auto-delete
- // could not be deleted because they were in use.
- // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is
- // ignored.
- // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in instance
- // group manager is valid as such, but its application does not make a lot of
- // sense, because it allows only single instance in instance group.
- // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema are
- // present
- // "UNREACHABLE" - A given scope cannot be reached.
- Code string `json:"code,omitempty"`
- // Data: [Output Only] Metadata about this warning in key: value format. For
- // example: "data": [ { "key": "scope", "value": "zones/us-east1-d" }
- Data []*FutureReservationsAggregatedListResponseWarningData `json:"data,omitempty"`
- // Message: [Output Only] A human-readable description of the warning code.
- Message string `json:"message,omitempty"`
- // ForceSendFields is a list of field names (e.g. "Code") to unconditionally
- // include in API requests. By default, fields with empty or default values are
- // omitted from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more
- // details.
- ForceSendFields []string `json:"-"`
- // NullFields is a list of field names (e.g. "Code") to include in API requests
- // with the JSON null value. By default, fields with empty values are omitted
- // from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details.
- NullFields []string `json:"-"`
-}
-
-func (s FutureReservationsAggregatedListResponseWarning) MarshalJSON() ([]byte, error) {
- type NoMethod FutureReservationsAggregatedListResponseWarning
- return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
-}
-
-type FutureReservationsAggregatedListResponseWarningData struct {
- // Key: [Output Only] A key that provides more detail on the warning being
- // returned. For example, for warnings where there are no results in a list
- // request for a particular zone, this key might be scope and the key value
- // might be the zone name. Other examples might be a key indicating a
- // deprecated resource and a suggested replacement, or a warning about invalid
- // network settings (for example, if an instance attempts to perform IP
- // forwarding but is not enabled for IP forwarding).
- Key string `json:"key,omitempty"`
- // Value: [Output Only] A warning data value corresponding to the key.
- Value string `json:"value,omitempty"`
- // ForceSendFields is a list of field names (e.g. "Key") to unconditionally
- // include in API requests. By default, fields with empty or default values are
- // omitted from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more
- // details.
- ForceSendFields []string `json:"-"`
- // NullFields is a list of field names (e.g. "Key") to include in API requests
- // with the JSON null value. By default, fields with empty values are omitted
- // from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details.
- NullFields []string `json:"-"`
-}
-
-func (s FutureReservationsAggregatedListResponseWarningData) MarshalJSON() ([]byte, error) {
- type NoMethod FutureReservationsAggregatedListResponseWarningData
- return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
-}
-
-type FutureReservationsListResponse struct {
- Etag string `json:"etag,omitempty"`
- // Id: [Output Only] The unique identifier for the resource. This identifier is
- // defined by the server.
- Id string `json:"id,omitempty"`
- // Items: [Output Only] A list of future reservation resources.
- Items []*FutureReservation `json:"items,omitempty"`
- // Kind: [Output Only] Type of resource.Always
- // compute#FutureReservationsListResponse for lists of reservations
- Kind string `json:"kind,omitempty"`
- // NextPageToken: [Output Only] This token allows you to get the next page of
- // results for list requests. If the number of results is larger than
- // maxResults, use the nextPageToken as a value for the query parameter
- // pageToken in the next list request. Subsequent list requests will have their
- // own nextPageToken to continue paging through the results.
- NextPageToken string `json:"nextPageToken,omitempty"`
- // SelfLink: [Output Only] Server-defined URL for this resource.
- SelfLink string `json:"selfLink,omitempty"`
- // Unreachables: [Output Only] Unreachable resources.
- Unreachables []string `json:"unreachables,omitempty"`
- // Warning: [Output Only] Informational warning message.
- Warning *FutureReservationsListResponseWarning `json:"warning,omitempty"`
-
- // ServerResponse contains the HTTP response code and headers from the server.
- googleapi.ServerResponse `json:"-"`
- // ForceSendFields is a list of field names (e.g. "Etag") to unconditionally
- // include in API requests. By default, fields with empty or default values are
- // omitted from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more
- // details.
- ForceSendFields []string `json:"-"`
- // NullFields is a list of field names (e.g. "Etag") to include in API requests
- // with the JSON null value. By default, fields with empty values are omitted
- // from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details.
- NullFields []string `json:"-"`
-}
-
-func (s FutureReservationsListResponse) MarshalJSON() ([]byte, error) {
- type NoMethod FutureReservationsListResponse
- return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
-}
-
-// FutureReservationsListResponseWarning: [Output Only] Informational warning
-// message.
-type FutureReservationsListResponseWarning struct {
- // Code: [Output Only] A warning code, if applicable. For example, Compute
- // Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.
- //
- // Possible values:
- // "CLEANUP_FAILED" - Warning about failed cleanup of transient changes made
- // by a failed operation.
- // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was created.
- // "DEPRECATED_TYPE_USED" - When deploying and at least one of the resources
- // has a type marked as deprecated
- // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk that is
- // larger than image size.
- // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the
- // resources has a type marked as experimental
- // "EXTERNAL_API_WARNING" - Warning that is present in an external api call
- // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been
- // overridden. Deprecated unused field.
- // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an injected
- // kernel, which is deprecated.
- // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV backend
- // service is associated with a health check that is not of type
- // HTTP/HTTPS/HTTP2.
- // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a
- // exceedingly large number of resources
- // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to list
- // overhead quota exceed which captures the amount of resources filtered out by
- // user-defined list filter.
- // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type
- // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is not
- // assigned to an instance on the network.
- // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot ip
- // forward.
- // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's nextHopInstance
- // URL refers to an instance that does not have an ipv6 interface on the same
- // network as the route.
- // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL refers to
- // an instance that does not exist.
- // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance URL
- // refers to an instance that is not on the same network as the route.
- // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not have a
- // status of RUNNING.
- // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to continue
- // the process despite the mentioned error.
- // "NO_RESULTS_ON_PAGE" - No results are present on a particular list page.
- // "PARTIAL_SUCCESS" - Success is reported, but some results may be missing
- // due to errors
- // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource that
- // requires a TOS they have not accepted.
- // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a resource is
- // in use.
- // "RESOURCE_NOT_DELETED" - One or more of the resources set to auto-delete
- // could not be deleted because they were in use.
- // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is
- // ignored.
- // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in instance
- // group manager is valid as such, but its application does not make a lot of
- // sense, because it allows only single instance in instance group.
- // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema are
- // present
- // "UNREACHABLE" - A given scope cannot be reached.
- Code string `json:"code,omitempty"`
- // Data: [Output Only] Metadata about this warning in key: value format. For
- // example: "data": [ { "key": "scope", "value": "zones/us-east1-d" }
- Data []*FutureReservationsListResponseWarningData `json:"data,omitempty"`
- // Message: [Output Only] A human-readable description of the warning code.
- Message string `json:"message,omitempty"`
- // ForceSendFields is a list of field names (e.g. "Code") to unconditionally
- // include in API requests. By default, fields with empty or default values are
- // omitted from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more
- // details.
- ForceSendFields []string `json:"-"`
- // NullFields is a list of field names (e.g. "Code") to include in API requests
- // with the JSON null value. By default, fields with empty values are omitted
- // from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details.
- NullFields []string `json:"-"`
-}
-
-func (s FutureReservationsListResponseWarning) MarshalJSON() ([]byte, error) {
- type NoMethod FutureReservationsListResponseWarning
- return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
-}
-
-type FutureReservationsListResponseWarningData struct {
- // Key: [Output Only] A key that provides more detail on the warning being
- // returned. For example, for warnings where there are no results in a list
- // request for a particular zone, this key might be scope and the key value
- // might be the zone name. Other examples might be a key indicating a
- // deprecated resource and a suggested replacement, or a warning about invalid
- // network settings (for example, if an instance attempts to perform IP
- // forwarding but is not enabled for IP forwarding).
- Key string `json:"key,omitempty"`
- // Value: [Output Only] A warning data value corresponding to the key.
- Value string `json:"value,omitempty"`
- // ForceSendFields is a list of field names (e.g. "Key") to unconditionally
- // include in API requests. By default, fields with empty or default values are
- // omitted from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more
- // details.
- ForceSendFields []string `json:"-"`
- // NullFields is a list of field names (e.g. "Key") to include in API requests
- // with the JSON null value. By default, fields with empty values are omitted
- // from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details.
- NullFields []string `json:"-"`
-}
-
-func (s FutureReservationsListResponseWarningData) MarshalJSON() ([]byte, error) {
- type NoMethod FutureReservationsListResponseWarningData
- return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
-}
-
-type FutureReservationsScopedList struct {
- // FutureReservations: A list of future reservations contained in this scope.
- FutureReservations []*FutureReservation `json:"futureReservations,omitempty"`
- // Warning: Informational warning which replaces the list of future
- // reservations when the list is empty.
- Warning *FutureReservationsScopedListWarning `json:"warning,omitempty"`
- // ForceSendFields is a list of field names (e.g. "FutureReservations") to
- // unconditionally include in API requests. By default, fields with empty or
- // default values are omitted from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more
- // details.
- ForceSendFields []string `json:"-"`
- // NullFields is a list of field names (e.g. "FutureReservations") to include
- // in API requests with the JSON null value. By default, fields with empty
- // values are omitted from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details.
- NullFields []string `json:"-"`
-}
-
-func (s FutureReservationsScopedList) MarshalJSON() ([]byte, error) {
- type NoMethod FutureReservationsScopedList
- return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
-}
-
-// FutureReservationsScopedListWarning: Informational warning which replaces
-// the list of future reservations when the list is empty.
-type FutureReservationsScopedListWarning struct {
- // Code: [Output Only] A warning code, if applicable. For example, Compute
- // Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.
- //
- // Possible values:
- // "CLEANUP_FAILED" - Warning about failed cleanup of transient changes made
- // by a failed operation.
- // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was created.
- // "DEPRECATED_TYPE_USED" - When deploying and at least one of the resources
- // has a type marked as deprecated
- // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk that is
- // larger than image size.
- // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the
- // resources has a type marked as experimental
- // "EXTERNAL_API_WARNING" - Warning that is present in an external api call
- // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been
- // overridden. Deprecated unused field.
- // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an injected
- // kernel, which is deprecated.
- // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV backend
- // service is associated with a health check that is not of type
- // HTTP/HTTPS/HTTP2.
- // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a
- // exceedingly large number of resources
- // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to list
- // overhead quota exceed which captures the amount of resources filtered out by
- // user-defined list filter.
- // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type
- // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is not
- // assigned to an instance on the network.
- // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot ip
- // forward.
- // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's nextHopInstance
- // URL refers to an instance that does not have an ipv6 interface on the same
- // network as the route.
- // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL refers to
- // an instance that does not exist.
- // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance URL
- // refers to an instance that is not on the same network as the route.
- // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not have a
- // status of RUNNING.
- // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to continue
- // the process despite the mentioned error.
- // "NO_RESULTS_ON_PAGE" - No results are present on a particular list page.
- // "PARTIAL_SUCCESS" - Success is reported, but some results may be missing
- // due to errors
- // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource that
- // requires a TOS they have not accepted.
- // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a resource is
- // in use.
- // "RESOURCE_NOT_DELETED" - One or more of the resources set to auto-delete
- // could not be deleted because they were in use.
- // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is
- // ignored.
- // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in instance
- // group manager is valid as such, but its application does not make a lot of
- // sense, because it allows only single instance in instance group.
- // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema are
- // present
- // "UNREACHABLE" - A given scope cannot be reached.
- Code string `json:"code,omitempty"`
- // Data: [Output Only] Metadata about this warning in key: value format. For
- // example: "data": [ { "key": "scope", "value": "zones/us-east1-d" }
- Data []*FutureReservationsScopedListWarningData `json:"data,omitempty"`
- // Message: [Output Only] A human-readable description of the warning code.
- Message string `json:"message,omitempty"`
- // ForceSendFields is a list of field names (e.g. "Code") to unconditionally
- // include in API requests. By default, fields with empty or default values are
- // omitted from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more
- // details.
- ForceSendFields []string `json:"-"`
- // NullFields is a list of field names (e.g. "Code") to include in API requests
- // with the JSON null value. By default, fields with empty values are omitted
- // from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details.
- NullFields []string `json:"-"`
-}
-
-func (s FutureReservationsScopedListWarning) MarshalJSON() ([]byte, error) {
- type NoMethod FutureReservationsScopedListWarning
- return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
-}
-
-type FutureReservationsScopedListWarningData struct {
- // Key: [Output Only] A key that provides more detail on the warning being
- // returned. For example, for warnings where there are no results in a list
- // request for a particular zone, this key might be scope and the key value
- // might be the zone name. Other examples might be a key indicating a
- // deprecated resource and a suggested replacement, or a warning about invalid
- // network settings (for example, if an instance attempts to perform IP
- // forwarding but is not enabled for IP forwarding).
- Key string `json:"key,omitempty"`
- // Value: [Output Only] A warning data value corresponding to the key.
- Value string `json:"value,omitempty"`
- // ForceSendFields is a list of field names (e.g. "Key") to unconditionally
- // include in API requests. By default, fields with empty or default values are
- // omitted from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more
- // details.
- ForceSendFields []string `json:"-"`
- // NullFields is a list of field names (e.g. "Key") to include in API requests
- // with the JSON null value. By default, fields with empty values are omitted
- // from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details.
- NullFields []string `json:"-"`
-}
-
-func (s FutureReservationsScopedListWarningData) MarshalJSON() ([]byte, error) {
- type NoMethod FutureReservationsScopedListWarningData
- return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
-}
-
type GRPCHealthCheck struct {
// GrpcServiceName: The gRPC service name for the health check. This field is
// optional. The value of grpc_service_name has the following meanings by
@@ -13281,6 +12534,7 @@ type GuestOsFeature struct {
// "SEV_LIVE_MIGRATABLE"
// "SEV_LIVE_MIGRATABLE_V2"
// "SEV_SNP_CAPABLE"
+ // "TDX_CAPABLE"
// "UEFI_COMPATIBLE"
// "VIRTIO_SCSI_MULTIQUEUE"
// "WINDOWS"
@@ -14425,7 +13679,14 @@ type HealthStatus struct {
// IpAddress: For target pool based Network Load Balancing, it indicates the
// forwarding rule's IP address assigned to this instance. For other types of
// load balancing, the field indicates VM internal ip.
- IpAddress string `json:"ipAddress,omitempty"`
+ IpAddress string `json:"ipAddress,omitempty"`
+ Ipv6Address string `json:"ipv6Address,omitempty"`
+ // Ipv6HealthState: Health state of the IPv6 address of the instance.
+ //
+ // Possible values:
+ // "HEALTHY"
+ // "UNHEALTHY"
+ Ipv6HealthState string `json:"ipv6HealthState,omitempty"`
// Port: The named port of the instance group, not necessarily the port that is
// health-checked.
Port int64 `json:"port,omitempty"`
@@ -21675,10 +20936,10 @@ type Interconnect struct {
AdminEnabled bool `json:"adminEnabled,omitempty"`
// AvailableFeatures: [Output only] List of features available for this
// Interconnect connection, which can take one of the following values: -
- // MACSEC If present then the Interconnect connection is provisioned on MACsec
- // capable hardware ports. If not present then the Interconnect connection is
- // provisioned on non-MACsec capable ports and MACsec isn't supported and
- // enabling MACsec fails.
+ // IF_MACSEC If present then the Interconnect connection is provisioned on
+ // MACsec capable hardware ports. If not present then the Interconnect
+ // connection is provisioned on non-MACsec capable ports and MACsec isn't
+ // supported and enabling MACsec fails.
//
// Possible values:
// "IF_MACSEC" - Media Access Control security (MACsec)
@@ -21802,7 +21063,7 @@ type Interconnect struct {
RemoteLocation string `json:"remoteLocation,omitempty"`
// RequestedFeatures: Optional. List of features requested for this
// Interconnect connection, which can take one of the following values: -
- // MACSEC If specified then the connection is created on MACsec capable
+ // IF_MACSEC If specified then the connection is created on MACsec capable
// hardware ports. If not specified, the default value is false, which
// allocates non-MACsec capable ports first if available. This parameter can be
// provided only with Interconnect INSERT. It isn't valid for Interconnect
@@ -27092,6 +26353,10 @@ func (s NetworkEdgeSecurityServicesScopedListWarningData) MarshalJSON() ([]byte,
type NetworkEndpoint struct {
// Annotations: Metadata defined as annotations on the network endpoint.
Annotations map[string]string `json:"annotations,omitempty"`
+ // ClientDestinationPort: Represents the port number to which PSC consumer
+ // sends packets. Only valid for network endpoint groups created with
+ // GCE_VM_IP_PORTMAP endpoint type.
+ ClientDestinationPort int64 `json:"clientDestinationPort,omitempty"`
// Fqdn: Optional fully qualified domain name of network endpoint. This can
// only be specified when NetworkEndpointGroup.network_endpoint_type is
// NON_GCP_FQDN_PORT.
@@ -27188,6 +26453,8 @@ type NetworkEndpointGroup struct {
// "GCE_VM_IP" - The network endpoint is represented by an IP address.
// "GCE_VM_IP_PORT" - The network endpoint is represented by IP address and
// port pair.
+ // "GCE_VM_IP_PORTMAP" - The network endpoint is represented by an IP, Port
+ // and Client Destination Port.
// "INTERNET_FQDN_PORT" - The network endpoint is represented by fully
// qualified domain name and port.
// "INTERNET_IP_PORT" - The network endpoint is represented by an internet IP
@@ -27675,6 +26942,10 @@ type NetworkEndpointGroupPscData struct {
// for PSC. This IP address acts as a VIP for a PSC NEG, allowing it to act as
// an endpoint in L7 PSC-XLB.
ConsumerPscAddress string `json:"consumerPscAddress,omitempty"`
+ // ProducerPort: The psc producer port is used to connect PSC NEG with specific
+ // port on the PSC Producer side; should only be used for the
+ // PRIVATE_SERVICE_CONNECT NEG type
+ ProducerPort int64 `json:"producerPort,omitempty"`
// PscConnectionId: [Output Only] The PSC connection id of the PSC Network
// Endpoint Group Consumer.
PscConnectionId uint64 `json:"pscConnectionId,omitempty,string"`
@@ -28181,7 +27452,8 @@ type NetworkInterface struct {
//
// Possible values:
// "IPV4_IPV6" - The network interface can have both IPv4 and IPv6 addresses.
- // "IPV4_ONLY" - The network interface will be assigned IPv4 address.
+ // "IPV4_ONLY" - The network interface will only be assigned IPv4 addresses.
+ // "IPV6_ONLY" - The network interface will only be assigned IPv6 addresses.
StackType string `json:"stackType,omitempty"`
// Subnetwork: The URL of the Subnetwork resource for this instance. If the
// network resource is in legacy mode, do not specify this field. If the
@@ -28577,6 +27849,9 @@ type NetworksGetEffectiveFirewallsResponseEffectiveFirewallPolicy struct {
DisplayName string `json:"displayName,omitempty"`
// Name: [Output Only] The name of the firewall policy.
Name string `json:"name,omitempty"`
+ // Priority: [Output only] Priority of firewall policy association. Not
+ // applicable for type=HIERARCHY.
+ Priority int64 `json:"priority,omitempty"`
// Rules: The rules that apply to the network.
Rules []*FirewallPolicyRule `json:"rules,omitempty"`
// ShortName: [Output Only] The short name of the firewall policy.
@@ -28586,6 +27861,7 @@ type NetworksGetEffectiveFirewallsResponseEffectiveFirewallPolicy struct {
// Possible values:
// "HIERARCHY"
// "NETWORK"
+ // "SYSTEM"
// "UNSPECIFIED"
Type string `json:"type,omitempty"`
// ForceSendFields is a list of field names (e.g. "DisplayName") to
@@ -30985,7 +30261,7 @@ type Operation struct {
TargetId uint64 `json:"targetId,omitempty,string"`
// TargetLink: [Output Only] The URL of the resource that the operation
// modifies. For operations related to creating a snapshot, this points to the
- // persistent disk that the snapshot was created from.
+ // disk that the snapshot was created from.
TargetLink string `json:"targetLink,omitempty"`
// User: [Output Only] User who requested the operation, for example:
// `user@example.com` or `alice_smith_identifier
@@ -42170,6 +41446,17 @@ type ServiceAttachment struct {
// loadBalancingScheme INTERNAL* that is serving the endpoint identified by
// this service attachment.
ProducerForwardingRule string `json:"producerForwardingRule,omitempty"`
+ // PropagatedConnectionLimit: The number of consumer spokes that connected
+ // Private Service Connect endpoints can be propagated to through Network
+ // Connectivity Center. This limit lets the service producer limit how many
+ // propagated Private Service Connect connections can be established to this
+ // service attachment from a single consumer. If the connection preference of
+ // the service attachment is ACCEPT_MANUAL, the limit applies to each project
+ // or network that is listed in the consumer accept list. If the connection
+ // preference of the service attachment is ACCEPT_AUTOMATIC, the limit applies
+ // to each project that contains a connected endpoint. If unspecified, the
+ // default propagated connection limit is 250.
+ PropagatedConnectionLimit int64 `json:"propagatedConnectionLimit,omitempty"`
// PscServiceAttachmentId: [Output Only] An 128-bit global unique ID of the PSC
// service attachment.
PscServiceAttachmentId *Uint128 `json:"pscServiceAttachmentId,omitempty"`
@@ -42378,6 +41665,10 @@ type ServiceAttachmentConnectedEndpoint struct {
ConsumerNetwork string `json:"consumerNetwork,omitempty"`
// Endpoint: The url of a connected endpoint.
Endpoint string `json:"endpoint,omitempty"`
+ // PropagatedConnectionCount: The number of consumer Network Connectivity
+ // Center spokes that the connected Private Service Connect endpoint has
+ // propagated to.
+ PropagatedConnectionCount int64 `json:"propagatedConnectionCount,omitempty"`
// PscConnectionId: The PSC connection id of the connected endpoint.
PscConnectionId uint64 `json:"pscConnectionId,omitempty,string"`
// Status: The status of a connected endpoint to this service attachment.
@@ -46373,8 +45664,8 @@ type Subnetwork struct {
// Id: [Output Only] The unique identifier for the resource. This identifier is
// defined by the server.
Id uint64 `json:"id,omitempty,string"`
- // InternalIpv6Prefix: [Output Only] The internal IPv6 address range that is
- // assigned to this subnetwork.
+ // InternalIpv6Prefix: The internal IPv6 address range that is owned by this
+ // subnetwork.
InternalIpv6Prefix string `json:"internalIpv6Prefix,omitempty"`
// IpCidrRange: The range of internal addresses that are owned by this
// subnetwork. Provide this property when you create the subnetwork. For
@@ -46488,6 +45779,7 @@ type Subnetwork struct {
// "IPV4_IPV6" - New VMs in this subnet can have both IPv4 and IPv6
// addresses.
// "IPV4_ONLY" - New VMs in this subnet will only be assigned IPv4 addresses.
+ // "IPV6_ONLY" - New VMs in this subnet will only be assigned IPv6 addresses.
StackType string `json:"stackType,omitempty"`
// State: [Output Only] The state of the subnetwork, which can be one of the
// following values: READY: Subnetwork is created and ready to use DRAINING:
@@ -51431,10 +50723,10 @@ func (s UpcomingMaintenance) MarshalJSON() ([]byte, error) {
// Director, see the Traffic Director features: Routing and traffic management
// table. This resource defines mappings from hostnames and URL paths to either
// a backend service or a backend bucket. To use the global urlMaps resource,
-// the backend service must have a loadBalancingScheme of either EXTERNAL or
-// INTERNAL_SELF_MANAGED. To use the regionUrlMaps resource, the backend
-// service must have a loadBalancingScheme of INTERNAL_MANAGED. For more
-// information, read URL Map Concepts.
+// the backend service must have a loadBalancingScheme of either EXTERNAL,
+// EXTERNAL_MANAGED, or INTERNAL_SELF_MANAGED. To use the regionUrlMaps
+// resource, the backend service must have a loadBalancingScheme of
+// INTERNAL_MANAGED. For more information, read URL Map Concepts.
type UrlMap struct {
// CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text format.
CreationTimestamp string `json:"creationTimestamp,omitempty"`
@@ -52313,6 +51605,7 @@ type UsableSubnetwork struct {
// "IPV4_IPV6" - New VMs in this subnet can have both IPv4 and IPv6
// addresses.
// "IPV4_ONLY" - New VMs in this subnet will only be assigned IPv4 addresses.
+ // "IPV6_ONLY" - New VMs in this subnet will only be assigned IPv6 addresses.
StackType string `json:"stackType,omitempty"`
// Subnetwork: Subnetwork URL.
Subnetwork string `json:"subnetwork,omitempty"`
diff --git a/vendor/google.golang.org/api/compute/v1/compute2-gen.go b/vendor/google.golang.org/api/compute/v1/compute2-gen.go
index 8b9c7f13e7445..f66a5b58a344c 100644
--- a/vendor/google.golang.org/api/compute/v1/compute2-gen.go
+++ b/vendor/google.golang.org/api/compute/v1/compute2-gen.go
@@ -5413,8 +5413,8 @@ type BackendServicesListUsableCall struct {
header_ http.Header
}
-// ListUsable: Retrieves an aggregated list of all usable backend services in
-// the specified project.
+// ListUsable: Retrieves a list of all usable backend services in the specified
+// project.
//
// - project: Project ID for this request.
func (r *BackendServicesService) ListUsable(project string) *BackendServicesListUsableCall {
@@ -14168,129 +14168,145 @@ func (c *ForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (*Operat
return ret, nil
}
-type FutureReservationsAggregatedListCall struct {
- s *Service
- project string
- urlParams_ gensupport.URLParams
- ifNoneMatch_ string
- ctx_ context.Context
- header_ http.Header
+type GlobalAddressesDeleteCall struct {
+ s *Service
+ project string
+ address string
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+ header_ http.Header
}
-// AggregatedList: Retrieves an aggregated list of future reservations. To
-// prevent failure, Google recommends that you set the `returnPartialSuccess`
-// parameter to `true`.
+// Delete: Deletes the specified address resource.
//
+// - address: Name of the address resource to delete.
// - project: Project ID for this request.
-func (r *FutureReservationsService) AggregatedList(project string) *FutureReservationsAggregatedListCall {
- c := &FutureReservationsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+func (r *GlobalAddressesService) Delete(project string, address string) *GlobalAddressesDeleteCall {
+ c := &GlobalAddressesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.project = project
+ c.address = address
return c
}
-// Filter sets the optional parameter "filter": A filter expression that
-// filters resources listed in the response. Most Compute resources support two
-// types of filter expressions: expressions that support regular expressions
-// and expressions that follow API improvement proposal AIP-160. These two
-// types of filter expressions cannot be mixed in one request. If you want to
-// use AIP-160, your expression must specify the field name, an operator, and
-// the value that you want to use for filtering. The value must be a string, a
-// number, or a boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`,
-// `>=` or `:`. For example, if you are filtering Compute Engine instances, you
-// can exclude instances named `example-instance` by specifying `name !=
-// example-instance`. The `:*` comparison can be used to test whether a key has
-// been defined. For example, to find all objects with `owner` label use: ```
-// labels.owner:* ``` You can also filter nested fields. For example, you could
-// specify `scheduling.automaticRestart = false` to include instances only if
-// they are not scheduled for automatic restarts. You can use filtering on
-// nested fields to filter based on resource labels. To filter on multiple
-// expressions, provide each separate expression within parentheses. For
-// example: ``` (scheduling.automaticRestart = true) (cpuPlatform = "Intel
-// Skylake") ``` By default, each expression is an `AND` expression. However,
-// you can include `AND` and `OR` expressions explicitly. For example: ```
-// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND
-// (scheduling.automaticRestart = true) ``` If you want to use a regular
-// expression, use the `eq` (equal) or `ne` (not equal) operator against a
-// single un-parenthesized expression with or without quotes or against
-// multiple parenthesized expressions. Examples: `fieldname eq unquoted
-// literal` `fieldname eq 'single quoted literal'` `fieldname eq "double quoted
-// literal" `(fieldname1 eq literal) (fieldname2 ne "literal")` The literal
-// value is interpreted as a regular expression using Google RE2 library
-// syntax. The literal value must match the entire field. For example, to
-// filter for instances that do not end with name "instance", you would use
-// `name ne .*instance`. You cannot combine constraints on multiple fields
-// using regular expressions.
-func (c *FutureReservationsAggregatedListCall) Filter(filter string) *FutureReservationsAggregatedListCall {
- c.urlParams_.Set("filter", filter)
+// RequestId sets the optional parameter "requestId": An optional request ID to
+// identify requests. Specify a unique request ID so that if you must retry
+// your request, the server will know to ignore the request if it has already
+// been completed. For example, consider a situation where you make an initial
+// request and the request times out. If you make the request again with the
+// same request ID, the server can check if original operation with the same
+// request ID was received, and if so, will ignore the second request. This
+// prevents clients from accidentally creating duplicate commitments. The
+// request ID must be a valid UUID with the exception that zero UUID is not
+// supported ( 00000000-0000-0000-0000-000000000000).
+func (c *GlobalAddressesDeleteCall) RequestId(requestId string) *GlobalAddressesDeleteCall {
+ c.urlParams_.Set("requestId", requestId)
return c
}
-// IncludeAllScopes sets the optional parameter "includeAllScopes": Indicates
-// whether every visible scope for each scope type (zone, region, global)
-// should be included in the response. For new resource types added after this
-// field, the flag has no effect as new resource types will always include
-// every visible scope for each scope type in response. For resource types
-// which predate this field, if this flag is omitted or false, only scopes of
-// the scope types where the resource type is expected to be found will be
-// included.
-func (c *FutureReservationsAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *FutureReservationsAggregatedListCall {
- c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes))
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
+// details.
+func (c *GlobalAddressesDeleteCall) Fields(s ...googleapi.Field) *GlobalAddressesDeleteCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
-// MaxResults sets the optional parameter "maxResults": The maximum number of
-// results per page that should be returned. If the number of available results
-// is larger than `maxResults`, Compute Engine returns a `nextPageToken` that
-// can be used to get the next page of results in subsequent list requests.
-// Acceptable values are `0` to `500`, inclusive. (Default: `500`)
-func (c *FutureReservationsAggregatedListCall) MaxResults(maxResults int64) *FutureReservationsAggregatedListCall {
- c.urlParams_.Set("maxResults", fmt.Sprint(maxResults))
+// Context sets the context to be used in this call's Do method.
+func (c *GlobalAddressesDeleteCall) Context(ctx context.Context) *GlobalAddressesDeleteCall {
+ c.ctx_ = ctx
return c
}
-// OrderBy sets the optional parameter "orderBy": Sorts list results by a
-// certain order. By default, results are returned in alphanumerical order
-// based on the resource name. You can also sort results in descending order
-// based on the creation timestamp using `orderBy="creationTimestamp desc".
-// This sorts results based on the `creationTimestamp` field in reverse
-// chronological order (newest result first). Use this to sort resources like
-// operations so that the newest operation is returned first. Currently, only
-// sorting by `name` or `creationTimestamp desc` is supported.
-func (c *FutureReservationsAggregatedListCall) OrderBy(orderBy string) *FutureReservationsAggregatedListCall {
- c.urlParams_.Set("orderBy", orderBy)
- return c
+// Header returns a http.Header that can be modified by the caller to add
+// headers to the request.
+func (c *GlobalAddressesDeleteCall) Header() http.Header {
+ if c.header_ == nil {
+ c.header_ = make(http.Header)
+ }
+ return c.header_
}
-// PageToken sets the optional parameter "pageToken": Specifies a page token to
-// use. Set `pageToken` to the `nextPageToken` returned by a previous list
-// request to get the next page of results.
-func (c *FutureReservationsAggregatedListCall) PageToken(pageToken string) *FutureReservationsAggregatedListCall {
- c.urlParams_.Set("pageToken", pageToken)
- return c
+func (c *GlobalAddressesDeleteCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_)
+ var body io.Reader = nil
+ c.urlParams_.Set("alt", alt)
+ c.urlParams_.Set("prettyPrint", "false")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/addresses/{address}")
+ urls += "?" + c.urlParams_.Encode()
+ req, err := http.NewRequest("DELETE", urls, body)
+ if err != nil {
+ return nil, err
+ }
+ req.Header = reqHeaders
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "address": c.address,
+ })
+ return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
-// ReturnPartialSuccess sets the optional parameter "returnPartialSuccess":
-// Opt-in for partial success behavior which provides partial results in case
-// of failure. The default value is false. For example, when partial success
-// behavior is enabled, aggregatedList for a single zone scope either returns
-// all resources in the zone or no resources, with an error code.
-func (c *FutureReservationsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *FutureReservationsAggregatedListCall {
- c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess))
- return c
+// Do executes the "compute.globalAddresses.delete" call.
+// Any non-2xx status code is an error. Response headers are in either
+// *Operation.ServerResponse.Header or (if a response was returned at all) in
+// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
+// whether the returned error was because http.StatusNotModified was returned.
+func (c *GlobalAddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, gensupport.WrapError(&googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ })
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, gensupport.WrapError(err)
+ }
+ ret := &Operation{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ target := &ret
+ if err := gensupport.DecodeResponse(target, res); err != nil {
+ return nil, err
+ }
+ return ret, nil
}
-// ServiceProjectNumber sets the optional parameter "serviceProjectNumber": The
-// Shared VPC service project id or service project number for which aggregated
-// list request is invoked for subnetworks list-usable api.
-func (c *FutureReservationsAggregatedListCall) ServiceProjectNumber(serviceProjectNumber int64) *FutureReservationsAggregatedListCall {
- c.urlParams_.Set("serviceProjectNumber", fmt.Sprint(serviceProjectNumber))
+type GlobalAddressesGetCall struct {
+ s *Service
+ project string
+ address string
+ urlParams_ gensupport.URLParams
+ ifNoneMatch_ string
+ ctx_ context.Context
+ header_ http.Header
+}
+
+// Get: Returns the specified address resource.
+//
+// - address: Name of the address resource to return.
+// - project: Project ID for this request.
+func (r *GlobalAddressesService) Get(project string, address string) *GlobalAddressesGetCall {
+ c := &GlobalAddressesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.project = project
+ c.address = address
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
// details.
-func (c *FutureReservationsAggregatedListCall) Fields(s ...googleapi.Field) *FutureReservationsAggregatedListCall {
+func (c *GlobalAddressesGetCall) Fields(s ...googleapi.Field) *GlobalAddressesGetCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
@@ -14298,27 +14314,27 @@ func (c *FutureReservationsAggregatedListCall) Fields(s ...googleapi.Field) *Fut
// IfNoneMatch sets an optional parameter which makes the operation fail if the
// object's ETag matches the given value. This is useful for getting updates
// only after the object has changed since the last request.
-func (c *FutureReservationsAggregatedListCall) IfNoneMatch(entityTag string) *FutureReservationsAggregatedListCall {
+func (c *GlobalAddressesGetCall) IfNoneMatch(entityTag string) *GlobalAddressesGetCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method.
-func (c *FutureReservationsAggregatedListCall) Context(ctx context.Context) *FutureReservationsAggregatedListCall {
+func (c *GlobalAddressesGetCall) Context(ctx context.Context) *GlobalAddressesGetCall {
c.ctx_ = ctx
return c
}
// Header returns a http.Header that can be modified by the caller to add
// headers to the request.
-func (c *FutureReservationsAggregatedListCall) Header() http.Header {
+func (c *GlobalAddressesGetCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
-func (c *FutureReservationsAggregatedListCall) doRequest(alt string) (*http.Response, error) {
+func (c *GlobalAddressesGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_)
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
@@ -14326,7 +14342,7 @@ func (c *FutureReservationsAggregatedListCall) doRequest(alt string) (*http.Resp
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/futureReservations")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/addresses/{address}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
@@ -14335,17 +14351,17 @@ func (c *FutureReservationsAggregatedListCall) doRequest(alt string) (*http.Resp
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"project": c.project,
+ "address": c.address,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
-// Do executes the "compute.futureReservations.aggregatedList" call.
+// Do executes the "compute.globalAddresses.get" call.
// Any non-2xx status code is an error. Response headers are in either
-// *FutureReservationsAggregatedListResponse.ServerResponse.Header or (if a
-// response was returned at all) in error.(*googleapi.Error).Header. Use
-// googleapi.IsNotModified to check whether the returned error was because
-// http.StatusNotModified was returned.
-func (c *FutureReservationsAggregatedListCall) Do(opts ...googleapi.CallOption) (*FutureReservationsAggregatedListResponse, error) {
+// *Address.ServerResponse.Header or (if a response was returned at all) in
+// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
+// whether the returned error was because http.StatusNotModified was returned.
+func (c *GlobalAddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
@@ -14364,7 +14380,7 @@ func (c *FutureReservationsAggregatedListCall) Do(opts ...googleapi.CallOption)
if err := googleapi.CheckResponse(res); err != nil {
return nil, gensupport.WrapError(err)
}
- ret := &FutureReservationsAggregatedListResponse{
+ ret := &Address{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
@@ -14377,48 +14393,23 @@ func (c *FutureReservationsAggregatedListCall) Do(opts ...googleapi.CallOption)
return ret, nil
}
-// Pages invokes f for each page of results.
-// A non-nil error returned from f will halt the iteration.
-// The provided context supersedes any context provided to the Context method.
-func (c *FutureReservationsAggregatedListCall) Pages(ctx context.Context, f func(*FutureReservationsAggregatedListResponse) error) error {
- c.ctx_ = ctx
- defer c.PageToken(c.urlParams_.Get("pageToken"))
- for {
- x, err := c.Do()
- if err != nil {
- return err
- }
- if err := f(x); err != nil {
- return err
- }
- if x.NextPageToken == "" {
- return nil
- }
- c.PageToken(x.NextPageToken)
- }
-}
-
-type FutureReservationsCancelCall struct {
- s *Service
- project string
- zone string
- futureReservation string
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
+type GlobalAddressesInsertCall struct {
+ s *Service
+ project string
+ address *Address
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+ header_ http.Header
}
-// Cancel: Cancel the specified future reservation.
+// Insert: Creates an address resource in the specified project by using the
+// data included in the request.
//
-// - futureReservation: Name of the future reservation to retrieve. Name should
-// conform to RFC1035.
-// - project: Project ID for this request.
-// - zone: Name of the zone for this request. Name should conform to RFC1035.
-func (r *FutureReservationsService) Cancel(project string, zone string, futureReservation string) *FutureReservationsCancelCall {
- c := &FutureReservationsCancelCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+// - project: Project ID for this request.
+func (r *GlobalAddressesService) Insert(project string, address *Address) *GlobalAddressesInsertCall {
+ c := &GlobalAddressesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.project = project
- c.zone = zone
- c.futureReservation = futureReservation
+ c.address = address
return c
}
@@ -14432,7 +14423,7 @@ func (r *FutureReservationsService) Cancel(project string, zone string, futureRe
// prevents clients from accidentally creating duplicate commitments. The
// request ID must be a valid UUID with the exception that zero UUID is not
// supported ( 00000000-0000-0000-0000-000000000000).
-func (c *FutureReservationsCancelCall) RequestId(requestId string) *FutureReservationsCancelCall {
+func (c *GlobalAddressesInsertCall) RequestId(requestId string) *GlobalAddressesInsertCall {
c.urlParams_.Set("requestId", requestId)
return c
}
@@ -14440,32 +14431,36 @@ func (c *FutureReservationsCancelCall) RequestId(requestId string) *FutureReserv
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
// details.
-func (c *FutureReservationsCancelCall) Fields(s ...googleapi.Field) *FutureReservationsCancelCall {
+func (c *GlobalAddressesInsertCall) Fields(s ...googleapi.Field) *GlobalAddressesInsertCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method.
-func (c *FutureReservationsCancelCall) Context(ctx context.Context) *FutureReservationsCancelCall {
+func (c *GlobalAddressesInsertCall) Context(ctx context.Context) *GlobalAddressesInsertCall {
c.ctx_ = ctx
return c
}
// Header returns a http.Header that can be modified by the caller to add
// headers to the request.
-func (c *FutureReservationsCancelCall) Header() http.Header {
+func (c *GlobalAddressesInsertCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
-func (c *FutureReservationsCancelCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_)
+func (c *GlobalAddressesInsertCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_)
var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.address)
+ if err != nil {
+ return nil, err
+ }
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/futureReservations/{futureReservation}/cancel")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/addresses")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
@@ -14473,1061 +14468,17 @@ func (c *FutureReservationsCancelCall) doRequest(alt string) (*http.Response, er
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
- "zone": c.zone,
- "futureReservation": c.futureReservation,
+ "project": c.project,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
-// Do executes the "compute.futureReservations.cancel" call.
+// Do executes the "compute.globalAddresses.insert" call.
// Any non-2xx status code is an error. Response headers are in either
// *Operation.ServerResponse.Header or (if a response was returned at all) in
// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
// whether the returned error was because http.StatusNotModified was returned.
-func (c *FutureReservationsCancelCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, gensupport.WrapError(&googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- })
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, gensupport.WrapError(err)
- }
- ret := &Operation{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-type FutureReservationsDeleteCall struct {
- s *Service
- project string
- zone string
- futureReservation string
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
-}
-
-// Delete: Deletes the specified future reservation.
-//
-// - futureReservation: Name of the future reservation to retrieve. Name should
-// conform to RFC1035.
-// - project: Project ID for this request.
-// - zone: Name of the zone for this request. Name should conform to RFC1035.
-func (r *FutureReservationsService) Delete(project string, zone string, futureReservation string) *FutureReservationsDeleteCall {
- c := &FutureReservationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.project = project
- c.zone = zone
- c.futureReservation = futureReservation
- return c
-}
-
-// RequestId sets the optional parameter "requestId": An optional request ID to
-// identify requests. Specify a unique request ID so that if you must retry
-// your request, the server will know to ignore the request if it has already
-// been completed. For example, consider a situation where you make an initial
-// request and the request times out. If you make the request again with the
-// same request ID, the server can check if original operation with the same
-// request ID was received, and if so, will ignore the second request. This
-// prevents clients from accidentally creating duplicate commitments. The
-// request ID must be a valid UUID with the exception that zero UUID is not
-// supported ( 00000000-0000-0000-0000-000000000000).
-func (c *FutureReservationsDeleteCall) RequestId(requestId string) *FutureReservationsDeleteCall {
- c.urlParams_.Set("requestId", requestId)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
-// details.
-func (c *FutureReservationsDeleteCall) Fields(s ...googleapi.Field) *FutureReservationsDeleteCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method.
-func (c *FutureReservationsDeleteCall) Context(ctx context.Context) *FutureReservationsDeleteCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns a http.Header that can be modified by the caller to add
-// headers to the request.
-func (c *FutureReservationsDeleteCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *FutureReservationsDeleteCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_)
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/futureReservations/{futureReservation}")
- urls += "?" + c.urlParams_.Encode()
- req, err := http.NewRequest("DELETE", urls, body)
- if err != nil {
- return nil, err
- }
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
- "zone": c.zone,
- "futureReservation": c.futureReservation,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "compute.futureReservations.delete" call.
-// Any non-2xx status code is an error. Response headers are in either
-// *Operation.ServerResponse.Header or (if a response was returned at all) in
-// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
-// whether the returned error was because http.StatusNotModified was returned.
-func (c *FutureReservationsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, gensupport.WrapError(&googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- })
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, gensupport.WrapError(err)
- }
- ret := &Operation{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-type FutureReservationsGetCall struct {
- s *Service
- project string
- zone string
- futureReservation string
- urlParams_ gensupport.URLParams
- ifNoneMatch_ string
- ctx_ context.Context
- header_ http.Header
-}
-
-// Get: Retrieves information about the specified future reservation.
-//
-// - futureReservation: Name of the future reservation to retrieve. Name should
-// conform to RFC1035.
-// - project: Project ID for this request.
-// - zone: Name of the zone for this request. Name should conform to RFC1035.
-func (r *FutureReservationsService) Get(project string, zone string, futureReservation string) *FutureReservationsGetCall {
- c := &FutureReservationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.project = project
- c.zone = zone
- c.futureReservation = futureReservation
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
-// details.
-func (c *FutureReservationsGetCall) Fields(s ...googleapi.Field) *FutureReservationsGetCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// IfNoneMatch sets an optional parameter which makes the operation fail if the
-// object's ETag matches the given value. This is useful for getting updates
-// only after the object has changed since the last request.
-func (c *FutureReservationsGetCall) IfNoneMatch(entityTag string) *FutureReservationsGetCall {
- c.ifNoneMatch_ = entityTag
- return c
-}
-
-// Context sets the context to be used in this call's Do method.
-func (c *FutureReservationsGetCall) Context(ctx context.Context) *FutureReservationsGetCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns a http.Header that can be modified by the caller to add
-// headers to the request.
-func (c *FutureReservationsGetCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *FutureReservationsGetCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_)
- if c.ifNoneMatch_ != "" {
- reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
- }
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/futureReservations/{futureReservation}")
- urls += "?" + c.urlParams_.Encode()
- req, err := http.NewRequest("GET", urls, body)
- if err != nil {
- return nil, err
- }
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
- "zone": c.zone,
- "futureReservation": c.futureReservation,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "compute.futureReservations.get" call.
-// Any non-2xx status code is an error. Response headers are in either
-// *FutureReservation.ServerResponse.Header or (if a response was returned at
-// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
-// check whether the returned error was because http.StatusNotModified was
-// returned.
-func (c *FutureReservationsGetCall) Do(opts ...googleapi.CallOption) (*FutureReservation, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, gensupport.WrapError(&googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- })
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, gensupport.WrapError(err)
- }
- ret := &FutureReservation{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-type FutureReservationsInsertCall struct {
- s *Service
- project string
- zone string
- futurereservation *FutureReservation
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
-}
-
-// Insert: Creates a new Future Reservation.
-//
-// - project: Project ID for this request.
-// - zone: Name of the zone for this request. Name should conform to RFC1035.
-func (r *FutureReservationsService) Insert(project string, zone string, futurereservation *FutureReservation) *FutureReservationsInsertCall {
- c := &FutureReservationsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.project = project
- c.zone = zone
- c.futurereservation = futurereservation
- return c
-}
-
-// RequestId sets the optional parameter "requestId": An optional request ID to
-// identify requests. Specify a unique request ID so that if you must retry
-// your request, the server will know to ignore the request if it has already
-// been completed. For example, consider a situation where you make an initial
-// request and the request times out. If you make the request again with the
-// same request ID, the server can check if original operation with the same
-// request ID was received, and if so, will ignore the second request. This
-// prevents clients from accidentally creating duplicate commitments. The
-// request ID must be a valid UUID with the exception that zero UUID is not
-// supported ( 00000000-0000-0000-0000-000000000000).
-func (c *FutureReservationsInsertCall) RequestId(requestId string) *FutureReservationsInsertCall {
- c.urlParams_.Set("requestId", requestId)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
-// details.
-func (c *FutureReservationsInsertCall) Fields(s ...googleapi.Field) *FutureReservationsInsertCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method.
-func (c *FutureReservationsInsertCall) Context(ctx context.Context) *FutureReservationsInsertCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns a http.Header that can be modified by the caller to add
-// headers to the request.
-func (c *FutureReservationsInsertCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *FutureReservationsInsertCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_)
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.futurereservation)
- if err != nil {
- return nil, err
- }
- c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/futureReservations")
- urls += "?" + c.urlParams_.Encode()
- req, err := http.NewRequest("POST", urls, body)
- if err != nil {
- return nil, err
- }
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
- "zone": c.zone,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "compute.futureReservations.insert" call.
-// Any non-2xx status code is an error. Response headers are in either
-// *Operation.ServerResponse.Header or (if a response was returned at all) in
-// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
-// whether the returned error was because http.StatusNotModified was returned.
-func (c *FutureReservationsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, gensupport.WrapError(&googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- })
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, gensupport.WrapError(err)
- }
- ret := &Operation{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-type FutureReservationsListCall struct {
- s *Service
- project string
- zone string
- urlParams_ gensupport.URLParams
- ifNoneMatch_ string
- ctx_ context.Context
- header_ http.Header
-}
-
-// List: A list of all the future reservations that have been configured for
-// the specified project in specified zone.
-//
-// - project: Project ID for this request.
-// - zone: Name of the zone for this request. Name should conform to RFC1035.
-func (r *FutureReservationsService) List(project string, zone string) *FutureReservationsListCall {
- c := &FutureReservationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.project = project
- c.zone = zone
- return c
-}
-
-// Filter sets the optional parameter "filter": A filter expression that
-// filters resources listed in the response. Most Compute resources support two
-// types of filter expressions: expressions that support regular expressions
-// and expressions that follow API improvement proposal AIP-160. These two
-// types of filter expressions cannot be mixed in one request. If you want to
-// use AIP-160, your expression must specify the field name, an operator, and
-// the value that you want to use for filtering. The value must be a string, a
-// number, or a boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`,
-// `>=` or `:`. For example, if you are filtering Compute Engine instances, you
-// can exclude instances named `example-instance` by specifying `name !=
-// example-instance`. The `:*` comparison can be used to test whether a key has
-// been defined. For example, to find all objects with `owner` label use: ```
-// labels.owner:* ``` You can also filter nested fields. For example, you could
-// specify `scheduling.automaticRestart = false` to include instances only if
-// they are not scheduled for automatic restarts. You can use filtering on
-// nested fields to filter based on resource labels. To filter on multiple
-// expressions, provide each separate expression within parentheses. For
-// example: ``` (scheduling.automaticRestart = true) (cpuPlatform = "Intel
-// Skylake") ``` By default, each expression is an `AND` expression. However,
-// you can include `AND` and `OR` expressions explicitly. For example: ```
-// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND
-// (scheduling.automaticRestart = true) ``` If you want to use a regular
-// expression, use the `eq` (equal) or `ne` (not equal) operator against a
-// single un-parenthesized expression with or without quotes or against
-// multiple parenthesized expressions. Examples: `fieldname eq unquoted
-// literal` `fieldname eq 'single quoted literal'` `fieldname eq "double quoted
-// literal" `(fieldname1 eq literal) (fieldname2 ne "literal")` The literal
-// value is interpreted as a regular expression using Google RE2 library
-// syntax. The literal value must match the entire field. For example, to
-// filter for instances that do not end with name "instance", you would use
-// `name ne .*instance`. You cannot combine constraints on multiple fields
-// using regular expressions.
-func (c *FutureReservationsListCall) Filter(filter string) *FutureReservationsListCall {
- c.urlParams_.Set("filter", filter)
- return c
-}
-
-// MaxResults sets the optional parameter "maxResults": The maximum number of
-// results per page that should be returned. If the number of available results
-// is larger than `maxResults`, Compute Engine returns a `nextPageToken` that
-// can be used to get the next page of results in subsequent list requests.
-// Acceptable values are `0` to `500`, inclusive. (Default: `500`)
-func (c *FutureReservationsListCall) MaxResults(maxResults int64) *FutureReservationsListCall {
- c.urlParams_.Set("maxResults", fmt.Sprint(maxResults))
- return c
-}
-
-// OrderBy sets the optional parameter "orderBy": Sorts list results by a
-// certain order. By default, results are returned in alphanumerical order
-// based on the resource name. You can also sort results in descending order
-// based on the creation timestamp using `orderBy="creationTimestamp desc".
-// This sorts results based on the `creationTimestamp` field in reverse
-// chronological order (newest result first). Use this to sort resources like
-// operations so that the newest operation is returned first. Currently, only
-// sorting by `name` or `creationTimestamp desc` is supported.
-func (c *FutureReservationsListCall) OrderBy(orderBy string) *FutureReservationsListCall {
- c.urlParams_.Set("orderBy", orderBy)
- return c
-}
-
-// PageToken sets the optional parameter "pageToken": Specifies a page token to
-// use. Set `pageToken` to the `nextPageToken` returned by a previous list
-// request to get the next page of results.
-func (c *FutureReservationsListCall) PageToken(pageToken string) *FutureReservationsListCall {
- c.urlParams_.Set("pageToken", pageToken)
- return c
-}
-
-// ReturnPartialSuccess sets the optional parameter "returnPartialSuccess":
-// Opt-in for partial success behavior which provides partial results in case
-// of failure. The default value is false. For example, when partial success
-// behavior is enabled, aggregatedList for a single zone scope either returns
-// all resources in the zone or no resources, with an error code.
-func (c *FutureReservationsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *FutureReservationsListCall {
- c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess))
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
-// details.
-func (c *FutureReservationsListCall) Fields(s ...googleapi.Field) *FutureReservationsListCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// IfNoneMatch sets an optional parameter which makes the operation fail if the
-// object's ETag matches the given value. This is useful for getting updates
-// only after the object has changed since the last request.
-func (c *FutureReservationsListCall) IfNoneMatch(entityTag string) *FutureReservationsListCall {
- c.ifNoneMatch_ = entityTag
- return c
-}
-
-// Context sets the context to be used in this call's Do method.
-func (c *FutureReservationsListCall) Context(ctx context.Context) *FutureReservationsListCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns a http.Header that can be modified by the caller to add
-// headers to the request.
-func (c *FutureReservationsListCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *FutureReservationsListCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_)
- if c.ifNoneMatch_ != "" {
- reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
- }
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/futureReservations")
- urls += "?" + c.urlParams_.Encode()
- req, err := http.NewRequest("GET", urls, body)
- if err != nil {
- return nil, err
- }
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
- "zone": c.zone,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "compute.futureReservations.list" call.
-// Any non-2xx status code is an error. Response headers are in either
-// *FutureReservationsListResponse.ServerResponse.Header or (if a response was
-// returned at all) in error.(*googleapi.Error).Header. Use
-// googleapi.IsNotModified to check whether the returned error was because
-// http.StatusNotModified was returned.
-func (c *FutureReservationsListCall) Do(opts ...googleapi.CallOption) (*FutureReservationsListResponse, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, gensupport.WrapError(&googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- })
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, gensupport.WrapError(err)
- }
- ret := &FutureReservationsListResponse{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-// Pages invokes f for each page of results.
-// A non-nil error returned from f will halt the iteration.
-// The provided context supersedes any context provided to the Context method.
-func (c *FutureReservationsListCall) Pages(ctx context.Context, f func(*FutureReservationsListResponse) error) error {
- c.ctx_ = ctx
- defer c.PageToken(c.urlParams_.Get("pageToken"))
- for {
- x, err := c.Do()
- if err != nil {
- return err
- }
- if err := f(x); err != nil {
- return err
- }
- if x.NextPageToken == "" {
- return nil
- }
- c.PageToken(x.NextPageToken)
- }
-}
-
-type FutureReservationsUpdateCall struct {
- s *Service
- project string
- zone string
- futureReservation string
- futurereservation *FutureReservation
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
-}
-
-// Update: Updates the specified future reservation.
-//
-// - futureReservation: Name of the reservation to update. Name should conform
-// to RFC1035.
-// - project: Project ID for this request.
-// - zone: Name of the zone for this request. Name should conform to RFC1035.
-func (r *FutureReservationsService) Update(project string, zone string, futureReservation string, futurereservation *FutureReservation) *FutureReservationsUpdateCall {
- c := &FutureReservationsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.project = project
- c.zone = zone
- c.futureReservation = futureReservation
- c.futurereservation = futurereservation
- return c
-}
-
-// RequestId sets the optional parameter "requestId": An optional request ID to
-// identify requests. Specify a unique request ID so that if you must retry
-// your request, the server will know to ignore the request if it has already
-// been completed. For example, consider a situation where you make an initial
-// request and the request times out. If you make the request again with the
-// same request ID, the server can check if original operation with the same
-// request ID was received, and if so, will ignore the second request. This
-// prevents clients from accidentally creating duplicate commitments. The
-// request ID must be a valid UUID with the exception that zero UUID is not
-// supported ( 00000000-0000-0000-0000-000000000000).
-func (c *FutureReservationsUpdateCall) RequestId(requestId string) *FutureReservationsUpdateCall {
- c.urlParams_.Set("requestId", requestId)
- return c
-}
-
-// UpdateMask sets the optional parameter "updateMask": update_mask indicates
-// fields to be updated as part of this request.
-func (c *FutureReservationsUpdateCall) UpdateMask(updateMask string) *FutureReservationsUpdateCall {
- c.urlParams_.Set("updateMask", updateMask)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
-// details.
-func (c *FutureReservationsUpdateCall) Fields(s ...googleapi.Field) *FutureReservationsUpdateCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method.
-func (c *FutureReservationsUpdateCall) Context(ctx context.Context) *FutureReservationsUpdateCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns a http.Header that can be modified by the caller to add
-// headers to the request.
-func (c *FutureReservationsUpdateCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *FutureReservationsUpdateCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_)
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.futurereservation)
- if err != nil {
- return nil, err
- }
- c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/futureReservations/{futureReservation}")
- urls += "?" + c.urlParams_.Encode()
- req, err := http.NewRequest("PATCH", urls, body)
- if err != nil {
- return nil, err
- }
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
- "zone": c.zone,
- "futureReservation": c.futureReservation,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "compute.futureReservations.update" call.
-// Any non-2xx status code is an error. Response headers are in either
-// *Operation.ServerResponse.Header or (if a response was returned at all) in
-// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
-// whether the returned error was because http.StatusNotModified was returned.
-func (c *FutureReservationsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, gensupport.WrapError(&googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- })
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, gensupport.WrapError(err)
- }
- ret := &Operation{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-type GlobalAddressesDeleteCall struct {
- s *Service
- project string
- address string
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
-}
-
-// Delete: Deletes the specified address resource.
-//
-// - address: Name of the address resource to delete.
-// - project: Project ID for this request.
-func (r *GlobalAddressesService) Delete(project string, address string) *GlobalAddressesDeleteCall {
- c := &GlobalAddressesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.project = project
- c.address = address
- return c
-}
-
-// RequestId sets the optional parameter "requestId": An optional request ID to
-// identify requests. Specify a unique request ID so that if you must retry
-// your request, the server will know to ignore the request if it has already
-// been completed. For example, consider a situation where you make an initial
-// request and the request times out. If you make the request again with the
-// same request ID, the server can check if original operation with the same
-// request ID was received, and if so, will ignore the second request. This
-// prevents clients from accidentally creating duplicate commitments. The
-// request ID must be a valid UUID with the exception that zero UUID is not
-// supported ( 00000000-0000-0000-0000-000000000000).
-func (c *GlobalAddressesDeleteCall) RequestId(requestId string) *GlobalAddressesDeleteCall {
- c.urlParams_.Set("requestId", requestId)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
-// details.
-func (c *GlobalAddressesDeleteCall) Fields(s ...googleapi.Field) *GlobalAddressesDeleteCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method.
-func (c *GlobalAddressesDeleteCall) Context(ctx context.Context) *GlobalAddressesDeleteCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns a http.Header that can be modified by the caller to add
-// headers to the request.
-func (c *GlobalAddressesDeleteCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *GlobalAddressesDeleteCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_)
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/addresses/{address}")
- urls += "?" + c.urlParams_.Encode()
- req, err := http.NewRequest("DELETE", urls, body)
- if err != nil {
- return nil, err
- }
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
- "address": c.address,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "compute.globalAddresses.delete" call.
-// Any non-2xx status code is an error. Response headers are in either
-// *Operation.ServerResponse.Header or (if a response was returned at all) in
-// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
-// whether the returned error was because http.StatusNotModified was returned.
-func (c *GlobalAddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, gensupport.WrapError(&googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- })
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, gensupport.WrapError(err)
- }
- ret := &Operation{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-type GlobalAddressesGetCall struct {
- s *Service
- project string
- address string
- urlParams_ gensupport.URLParams
- ifNoneMatch_ string
- ctx_ context.Context
- header_ http.Header
-}
-
-// Get: Returns the specified address resource.
-//
-// - address: Name of the address resource to return.
-// - project: Project ID for this request.
-func (r *GlobalAddressesService) Get(project string, address string) *GlobalAddressesGetCall {
- c := &GlobalAddressesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.project = project
- c.address = address
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
-// details.
-func (c *GlobalAddressesGetCall) Fields(s ...googleapi.Field) *GlobalAddressesGetCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// IfNoneMatch sets an optional parameter which makes the operation fail if the
-// object's ETag matches the given value. This is useful for getting updates
-// only after the object has changed since the last request.
-func (c *GlobalAddressesGetCall) IfNoneMatch(entityTag string) *GlobalAddressesGetCall {
- c.ifNoneMatch_ = entityTag
- return c
-}
-
-// Context sets the context to be used in this call's Do method.
-func (c *GlobalAddressesGetCall) Context(ctx context.Context) *GlobalAddressesGetCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns a http.Header that can be modified by the caller to add
-// headers to the request.
-func (c *GlobalAddressesGetCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *GlobalAddressesGetCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_)
- if c.ifNoneMatch_ != "" {
- reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
- }
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/addresses/{address}")
- urls += "?" + c.urlParams_.Encode()
- req, err := http.NewRequest("GET", urls, body)
- if err != nil {
- return nil, err
- }
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
- "address": c.address,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "compute.globalAddresses.get" call.
-// Any non-2xx status code is an error. Response headers are in either
-// *Address.ServerResponse.Header or (if a response was returned at all) in
-// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
-// whether the returned error was because http.StatusNotModified was returned.
-func (c *GlobalAddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, gensupport.WrapError(&googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- })
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, gensupport.WrapError(err)
- }
- ret := &Address{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-type GlobalAddressesInsertCall struct {
- s *Service
- project string
- address *Address
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
-}
-
-// Insert: Creates an address resource in the specified project by using the
-// data included in the request.
-//
-// - project: Project ID for this request.
-func (r *GlobalAddressesService) Insert(project string, address *Address) *GlobalAddressesInsertCall {
- c := &GlobalAddressesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.project = project
- c.address = address
- return c
-}
-
-// RequestId sets the optional parameter "requestId": An optional request ID to
-// identify requests. Specify a unique request ID so that if you must retry
-// your request, the server will know to ignore the request if it has already
-// been completed. For example, consider a situation where you make an initial
-// request and the request times out. If you make the request again with the
-// same request ID, the server can check if original operation with the same
-// request ID was received, and if so, will ignore the second request. This
-// prevents clients from accidentally creating duplicate commitments. The
-// request ID must be a valid UUID with the exception that zero UUID is not
-// supported ( 00000000-0000-0000-0000-000000000000).
-func (c *GlobalAddressesInsertCall) RequestId(requestId string) *GlobalAddressesInsertCall {
- c.urlParams_.Set("requestId", requestId)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
-// details.
-func (c *GlobalAddressesInsertCall) Fields(s ...googleapi.Field) *GlobalAddressesInsertCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method.
-func (c *GlobalAddressesInsertCall) Context(ctx context.Context) *GlobalAddressesInsertCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns a http.Header that can be modified by the caller to add
-// headers to the request.
-func (c *GlobalAddressesInsertCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *GlobalAddressesInsertCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_)
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.address)
- if err != nil {
- return nil, err
- }
- c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/addresses")
- urls += "?" + c.urlParams_.Encode()
- req, err := http.NewRequest("POST", urls, body)
- if err != nil {
- return nil, err
- }
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "compute.globalAddresses.insert" call.
-// Any non-2xx status code is an error. Response headers are in either
-// *Operation.ServerResponse.Header or (if a response was returned at all) in
-// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
-// whether the returned error was because http.StatusNotModified was returned.
-func (c *GlobalAddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
+func (c *GlobalAddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
@@ -55959,28 +54910,382 @@ func (c *ProjectsMoveDiskCall) Do(opts ...googleapi.CallOption) (*Operation, err
return ret, nil
}
-type ProjectsMoveInstanceCall struct {
- s *Service
- project string
- instancemoverequest *InstanceMoveRequest
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
+type ProjectsMoveInstanceCall struct {
+ s *Service
+ project string
+ instancemoverequest *InstanceMoveRequest
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+ header_ http.Header
+}
+
+// MoveInstance: Moves an instance and its attached persistent disks from one
+// zone to another. *Note*: Moving VMs or disks by using this method might
+// cause unexpected behavior. For more information, see the known issue
+// (/compute/docs/troubleshooting/known-issues#moving_vms_or_disks_using_the_mov
+// einstance_api_or_the_causes_unexpected_behavior). [Deprecated] This method
+// is deprecated. See moving instance across zones
+// (/compute/docs/instances/moving-instance-across-zones) instead.
+//
+// - project: Project ID for this request.
+func (r *ProjectsService) MoveInstance(project string, instancemoverequest *InstanceMoveRequest) *ProjectsMoveInstanceCall {
+ c := &ProjectsMoveInstanceCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.project = project
+ c.instancemoverequest = instancemoverequest
+ return c
+}
+
+// RequestId sets the optional parameter "requestId": An optional request ID to
+// identify requests. Specify a unique request ID so that if you must retry
+// your request, the server will know to ignore the request if it has already
+// been completed. For example, consider a situation where you make an initial
+// request and the request times out. If you make the request again with the
+// same request ID, the server can check if original operation with the same
+// request ID was received, and if so, will ignore the second request. This
+// prevents clients from accidentally creating duplicate commitments. The
+// request ID must be a valid UUID with the exception that zero UUID is not
+// supported ( 00000000-0000-0000-0000-000000000000).
+func (c *ProjectsMoveInstanceCall) RequestId(requestId string) *ProjectsMoveInstanceCall {
+ c.urlParams_.Set("requestId", requestId)
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
+// details.
+func (c *ProjectsMoveInstanceCall) Fields(s ...googleapi.Field) *ProjectsMoveInstanceCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// Context sets the context to be used in this call's Do method.
+func (c *ProjectsMoveInstanceCall) Context(ctx context.Context) *ProjectsMoveInstanceCall {
+ c.ctx_ = ctx
+ return c
+}
+
+// Header returns a http.Header that can be modified by the caller to add
+// headers to the request.
+func (c *ProjectsMoveInstanceCall) Header() http.Header {
+ if c.header_ == nil {
+ c.header_ = make(http.Header)
+ }
+ return c.header_
+}
+
+func (c *ProjectsMoveInstanceCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_)
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancemoverequest)
+ if err != nil {
+ return nil, err
+ }
+ c.urlParams_.Set("alt", alt)
+ c.urlParams_.Set("prettyPrint", "false")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/moveInstance")
+ urls += "?" + c.urlParams_.Encode()
+ req, err := http.NewRequest("POST", urls, body)
+ if err != nil {
+ return nil, err
+ }
+ req.Header = reqHeaders
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ })
+ return gensupport.SendRequest(c.ctx_, c.s.client, req)
+}
+
+// Do executes the "compute.projects.moveInstance" call.
+// Any non-2xx status code is an error. Response headers are in either
+// *Operation.ServerResponse.Header or (if a response was returned at all) in
+// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
+// whether the returned error was because http.StatusNotModified was returned.
+func (c *ProjectsMoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, gensupport.WrapError(&googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ })
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, gensupport.WrapError(err)
+ }
+ ret := &Operation{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ target := &ret
+ if err := gensupport.DecodeResponse(target, res); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+type ProjectsSetCloudArmorTierCall struct {
+ s *Service
+ project string
+ projectssetcloudarmortierrequest *ProjectsSetCloudArmorTierRequest
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+ header_ http.Header
+}
+
+// SetCloudArmorTier: Sets the Cloud Armor tier of the project. To set
+// ENTERPRISE or above the billing account of the project must be subscribed to
+// Cloud Armor Enterprise. See Subscribing to Cloud Armor Enterprise for more
+// information.
+//
+// - project: Project ID for this request.
+func (r *ProjectsService) SetCloudArmorTier(project string, projectssetcloudarmortierrequest *ProjectsSetCloudArmorTierRequest) *ProjectsSetCloudArmorTierCall {
+ c := &ProjectsSetCloudArmorTierCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.project = project
+ c.projectssetcloudarmortierrequest = projectssetcloudarmortierrequest
+ return c
+}
+
+// RequestId sets the optional parameter "requestId": An optional request ID to
+// identify requests. Specify a unique request ID so that if you must retry
+// your request, the server will know to ignore the request if it has already
+// been completed. For example, consider a situation where you make an initial
+// request and the request times out. If you make the request again with the
+// same request ID, the server can check if original operation with the same
+// request ID was received, and if so, will ignore the second request. This
+// prevents clients from accidentally creating duplicate commitments. The
+// request ID must be a valid UUID with the exception that zero UUID is not
+// supported ( 00000000-0000-0000-0000-000000000000).
+func (c *ProjectsSetCloudArmorTierCall) RequestId(requestId string) *ProjectsSetCloudArmorTierCall {
+ c.urlParams_.Set("requestId", requestId)
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
+// details.
+func (c *ProjectsSetCloudArmorTierCall) Fields(s ...googleapi.Field) *ProjectsSetCloudArmorTierCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// Context sets the context to be used in this call's Do method.
+func (c *ProjectsSetCloudArmorTierCall) Context(ctx context.Context) *ProjectsSetCloudArmorTierCall {
+ c.ctx_ = ctx
+ return c
+}
+
+// Header returns a http.Header that can be modified by the caller to add
+// headers to the request.
+func (c *ProjectsSetCloudArmorTierCall) Header() http.Header {
+ if c.header_ == nil {
+ c.header_ = make(http.Header)
+ }
+ return c.header_
+}
+
+func (c *ProjectsSetCloudArmorTierCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_)
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectssetcloudarmortierrequest)
+ if err != nil {
+ return nil, err
+ }
+ c.urlParams_.Set("alt", alt)
+ c.urlParams_.Set("prettyPrint", "false")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/setCloudArmorTier")
+ urls += "?" + c.urlParams_.Encode()
+ req, err := http.NewRequest("POST", urls, body)
+ if err != nil {
+ return nil, err
+ }
+ req.Header = reqHeaders
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ })
+ return gensupport.SendRequest(c.ctx_, c.s.client, req)
+}
+
+// Do executes the "compute.projects.setCloudArmorTier" call.
+// Any non-2xx status code is an error. Response headers are in either
+// *Operation.ServerResponse.Header or (if a response was returned at all) in
+// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
+// whether the returned error was because http.StatusNotModified was returned.
+func (c *ProjectsSetCloudArmorTierCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, gensupport.WrapError(&googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ })
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, gensupport.WrapError(err)
+ }
+ ret := &Operation{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ target := &ret
+ if err := gensupport.DecodeResponse(target, res); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+type ProjectsSetCommonInstanceMetadataCall struct {
+ s *Service
+ project string
+ metadata *Metadata
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+ header_ http.Header
+}
+
+// SetCommonInstanceMetadata: Sets metadata common to all instances within the
+// specified project using the data included in the request.
+//
+// - project: Project ID for this request.
+func (r *ProjectsService) SetCommonInstanceMetadata(project string, metadata *Metadata) *ProjectsSetCommonInstanceMetadataCall {
+ c := &ProjectsSetCommonInstanceMetadataCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.project = project
+ c.metadata = metadata
+ return c
+}
+
+// RequestId sets the optional parameter "requestId": An optional request ID to
+// identify requests. Specify a unique request ID so that if you must retry
+// your request, the server will know to ignore the request if it has already
+// been completed. For example, consider a situation where you make an initial
+// request and the request times out. If you make the request again with the
+// same request ID, the server can check if original operation with the same
+// request ID was received, and if so, will ignore the second request. This
+// prevents clients from accidentally creating duplicate commitments. The
+// request ID must be a valid UUID with the exception that zero UUID is not
+// supported ( 00000000-0000-0000-0000-000000000000).
+func (c *ProjectsSetCommonInstanceMetadataCall) RequestId(requestId string) *ProjectsSetCommonInstanceMetadataCall {
+ c.urlParams_.Set("requestId", requestId)
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
+// details.
+func (c *ProjectsSetCommonInstanceMetadataCall) Fields(s ...googleapi.Field) *ProjectsSetCommonInstanceMetadataCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// Context sets the context to be used in this call's Do method.
+func (c *ProjectsSetCommonInstanceMetadataCall) Context(ctx context.Context) *ProjectsSetCommonInstanceMetadataCall {
+ c.ctx_ = ctx
+ return c
+}
+
+// Header returns a http.Header that can be modified by the caller to add
+// headers to the request.
+func (c *ProjectsSetCommonInstanceMetadataCall) Header() http.Header {
+ if c.header_ == nil {
+ c.header_ = make(http.Header)
+ }
+ return c.header_
+}
+
+func (c *ProjectsSetCommonInstanceMetadataCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_)
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.metadata)
+ if err != nil {
+ return nil, err
+ }
+ c.urlParams_.Set("alt", alt)
+ c.urlParams_.Set("prettyPrint", "false")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/setCommonInstanceMetadata")
+ urls += "?" + c.urlParams_.Encode()
+ req, err := http.NewRequest("POST", urls, body)
+ if err != nil {
+ return nil, err
+ }
+ req.Header = reqHeaders
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ })
+ return gensupport.SendRequest(c.ctx_, c.s.client, req)
+}
+
+// Do executes the "compute.projects.setCommonInstanceMetadata" call.
+// Any non-2xx status code is an error. Response headers are in either
+// *Operation.ServerResponse.Header or (if a response was returned at all) in
+// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
+// whether the returned error was because http.StatusNotModified was returned.
+func (c *ProjectsSetCommonInstanceMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, gensupport.WrapError(&googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ })
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, gensupport.WrapError(err)
+ }
+ ret := &Operation{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ target := &ret
+ if err := gensupport.DecodeResponse(target, res); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+type ProjectsSetDefaultNetworkTierCall struct {
+ s *Service
+ project string
+ projectssetdefaultnetworktierrequest *ProjectsSetDefaultNetworkTierRequest
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+ header_ http.Header
}
-// MoveInstance: Moves an instance and its attached persistent disks from one
-// zone to another. *Note*: Moving VMs or disks by using this method might
-// cause unexpected behavior. For more information, see the known issue
-// (/compute/docs/troubleshooting/known-issues#moving_vms_or_disks_using_the_mov
-// einstance_api_or_the_causes_unexpected_behavior). [Deprecated] This method
-// is deprecated. See moving instance across zones
-// (/compute/docs/instances/moving-instance-across-zones) instead.
+// SetDefaultNetworkTier: Sets the default network tier of the project. The
+// default network tier is used when an address/forwardingRule/instance is
+// created without specifying the network tier field.
//
// - project: Project ID for this request.
-func (r *ProjectsService) MoveInstance(project string, instancemoverequest *InstanceMoveRequest) *ProjectsMoveInstanceCall {
- c := &ProjectsMoveInstanceCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+func (r *ProjectsService) SetDefaultNetworkTier(project string, projectssetdefaultnetworktierrequest *ProjectsSetDefaultNetworkTierRequest) *ProjectsSetDefaultNetworkTierCall {
+ c := &ProjectsSetDefaultNetworkTierCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.project = project
- c.instancemoverequest = instancemoverequest
+ c.projectssetdefaultnetworktierrequest = projectssetdefaultnetworktierrequest
return c
}
@@ -55994,7 +55299,7 @@ func (r *ProjectsService) MoveInstance(project string, instancemoverequest *Inst
// prevents clients from accidentally creating duplicate commitments. The
// request ID must be a valid UUID with the exception that zero UUID is not
// supported ( 00000000-0000-0000-0000-000000000000).
-func (c *ProjectsMoveInstanceCall) RequestId(requestId string) *ProjectsMoveInstanceCall {
+func (c *ProjectsSetDefaultNetworkTierCall) RequestId(requestId string) *ProjectsSetDefaultNetworkTierCall {
c.urlParams_.Set("requestId", requestId)
return c
}
@@ -56002,36 +55307,36 @@ func (c *ProjectsMoveInstanceCall) RequestId(requestId string) *ProjectsMoveInst
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
// details.
-func (c *ProjectsMoveInstanceCall) Fields(s ...googleapi.Field) *ProjectsMoveInstanceCall {
+func (c *ProjectsSetDefaultNetworkTierCall) Fields(s ...googleapi.Field) *ProjectsSetDefaultNetworkTierCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method.
-func (c *ProjectsMoveInstanceCall) Context(ctx context.Context) *ProjectsMoveInstanceCall {
+func (c *ProjectsSetDefaultNetworkTierCall) Context(ctx context.Context) *ProjectsSetDefaultNetworkTierCall {
c.ctx_ = ctx
return c
}
// Header returns a http.Header that can be modified by the caller to add
// headers to the request.
-func (c *ProjectsMoveInstanceCall) Header() http.Header {
+func (c *ProjectsSetDefaultNetworkTierCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
-func (c *ProjectsMoveInstanceCall) doRequest(alt string) (*http.Response, error) {
+func (c *ProjectsSetDefaultNetworkTierCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_)
var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancemoverequest)
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectssetdefaultnetworktierrequest)
if err != nil {
return nil, err
}
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/moveInstance")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/setDefaultNetworkTier")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
@@ -56044,12 +55349,12 @@ func (c *ProjectsMoveInstanceCall) doRequest(alt string) (*http.Response, error)
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
-// Do executes the "compute.projects.moveInstance" call.
+// Do executes the "compute.projects.setDefaultNetworkTier" call.
// Any non-2xx status code is an error. Response headers are in either
// *Operation.ServerResponse.Header or (if a response was returned at all) in
// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
// whether the returned error was because http.StatusNotModified was returned.
-func (c *ProjectsMoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
+func (c *ProjectsSetDefaultNetworkTierCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
@@ -56081,25 +55386,24 @@ func (c *ProjectsMoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation,
return ret, nil
}
-type ProjectsSetCloudArmorTierCall struct {
- s *Service
- project string
- projectssetcloudarmortierrequest *ProjectsSetCloudArmorTierRequest
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
+type ProjectsSetUsageExportBucketCall struct {
+ s *Service
+ project string
+ usageexportlocation *UsageExportLocation
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+ header_ http.Header
}
-// SetCloudArmorTier: Sets the Cloud Armor tier of the project. To set
-// ENTERPRISE or above the billing account of the project must be subscribed to
-// Cloud Armor Enterprise. See Subscribing to Cloud Armor Enterprise for more
-// information.
+// SetUsageExportBucket: Enables the usage export feature and sets the usage
+// export bucket where reports are stored. If you provide an empty request body
+// using this method, the usage export feature will be disabled.
//
// - project: Project ID for this request.
-func (r *ProjectsService) SetCloudArmorTier(project string, projectssetcloudarmortierrequest *ProjectsSetCloudArmorTierRequest) *ProjectsSetCloudArmorTierCall {
- c := &ProjectsSetCloudArmorTierCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+func (r *ProjectsService) SetUsageExportBucket(project string, usageexportlocation *UsageExportLocation) *ProjectsSetUsageExportBucketCall {
+ c := &ProjectsSetUsageExportBucketCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.project = project
- c.projectssetcloudarmortierrequest = projectssetcloudarmortierrequest
+ c.usageexportlocation = usageexportlocation
return c
}
@@ -56113,7 +55417,7 @@ func (r *ProjectsService) SetCloudArmorTier(project string, projectssetcloudarmo
// prevents clients from accidentally creating duplicate commitments. The
// request ID must be a valid UUID with the exception that zero UUID is not
// supported ( 00000000-0000-0000-0000-000000000000).
-func (c *ProjectsSetCloudArmorTierCall) RequestId(requestId string) *ProjectsSetCloudArmorTierCall {
+func (c *ProjectsSetUsageExportBucketCall) RequestId(requestId string) *ProjectsSetUsageExportBucketCall {
c.urlParams_.Set("requestId", requestId)
return c
}
@@ -56121,36 +55425,36 @@ func (c *ProjectsSetCloudArmorTierCall) RequestId(requestId string) *ProjectsSet
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
// details.
-func (c *ProjectsSetCloudArmorTierCall) Fields(s ...googleapi.Field) *ProjectsSetCloudArmorTierCall {
+func (c *ProjectsSetUsageExportBucketCall) Fields(s ...googleapi.Field) *ProjectsSetUsageExportBucketCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method.
-func (c *ProjectsSetCloudArmorTierCall) Context(ctx context.Context) *ProjectsSetCloudArmorTierCall {
+func (c *ProjectsSetUsageExportBucketCall) Context(ctx context.Context) *ProjectsSetUsageExportBucketCall {
c.ctx_ = ctx
return c
}
// Header returns a http.Header that can be modified by the caller to add
// headers to the request.
-func (c *ProjectsSetCloudArmorTierCall) Header() http.Header {
+func (c *ProjectsSetUsageExportBucketCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
-func (c *ProjectsSetCloudArmorTierCall) doRequest(alt string) (*http.Response, error) {
+func (c *ProjectsSetUsageExportBucketCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_)
var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectssetcloudarmortierrequest)
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.usageexportlocation)
if err != nil {
return nil, err
}
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/setCloudArmorTier")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/setUsageExportBucket")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
@@ -56163,12 +55467,12 @@ func (c *ProjectsSetCloudArmorTierCall) doRequest(alt string) (*http.Response, e
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
-// Do executes the "compute.projects.setCloudArmorTier" call.
+// Do executes the "compute.projects.setUsageExportBucket" call.
// Any non-2xx status code is an error. Response headers are in either
// *Operation.ServerResponse.Header or (if a response was returned at all) in
// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
// whether the returned error was because http.StatusNotModified was returned.
-func (c *ProjectsSetCloudArmorTierCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
+func (c *ProjectsSetUsageExportBucketCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
@@ -56200,23 +55504,24 @@ func (c *ProjectsSetCloudArmorTierCall) Do(opts ...googleapi.CallOption) (*Opera
return ret, nil
}
-type ProjectsSetCommonInstanceMetadataCall struct {
- s *Service
- project string
- metadata *Metadata
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
+type PublicAdvertisedPrefixesAnnounceCall struct {
+ s *Service
+ project string
+ publicAdvertisedPrefix string
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+ header_ http.Header
}
-// SetCommonInstanceMetadata: Sets metadata common to all instances within the
-// specified project using the data included in the request.
+// Announce: Announces the specified PublicAdvertisedPrefix
//
-// - project: Project ID for this request.
-func (r *ProjectsService) SetCommonInstanceMetadata(project string, metadata *Metadata) *ProjectsSetCommonInstanceMetadataCall {
- c := &ProjectsSetCommonInstanceMetadataCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+// - project: Project ID for this request.
+// - publicAdvertisedPrefix: The name of the public advertised prefix. It
+// should comply with RFC1035.
+func (r *PublicAdvertisedPrefixesService) Announce(project string, publicAdvertisedPrefix string) *PublicAdvertisedPrefixesAnnounceCall {
+ c := &PublicAdvertisedPrefixesAnnounceCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.project = project
- c.metadata = metadata
+ c.publicAdvertisedPrefix = publicAdvertisedPrefix
return c
}
@@ -56230,7 +55535,7 @@ func (r *ProjectsService) SetCommonInstanceMetadata(project string, metadata *Me
// prevents clients from accidentally creating duplicate commitments. The
// request ID must be a valid UUID with the exception that zero UUID is not
// supported ( 00000000-0000-0000-0000-000000000000).
-func (c *ProjectsSetCommonInstanceMetadataCall) RequestId(requestId string) *ProjectsSetCommonInstanceMetadataCall {
+func (c *PublicAdvertisedPrefixesAnnounceCall) RequestId(requestId string) *PublicAdvertisedPrefixesAnnounceCall {
c.urlParams_.Set("requestId", requestId)
return c
}
@@ -56238,36 +55543,32 @@ func (c *ProjectsSetCommonInstanceMetadataCall) RequestId(requestId string) *Pro
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
// details.
-func (c *ProjectsSetCommonInstanceMetadataCall) Fields(s ...googleapi.Field) *ProjectsSetCommonInstanceMetadataCall {
+func (c *PublicAdvertisedPrefixesAnnounceCall) Fields(s ...googleapi.Field) *PublicAdvertisedPrefixesAnnounceCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method.
-func (c *ProjectsSetCommonInstanceMetadataCall) Context(ctx context.Context) *ProjectsSetCommonInstanceMetadataCall {
+func (c *PublicAdvertisedPrefixesAnnounceCall) Context(ctx context.Context) *PublicAdvertisedPrefixesAnnounceCall {
c.ctx_ = ctx
return c
}
// Header returns a http.Header that can be modified by the caller to add
// headers to the request.
-func (c *ProjectsSetCommonInstanceMetadataCall) Header() http.Header {
+func (c *PublicAdvertisedPrefixesAnnounceCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
-func (c *ProjectsSetCommonInstanceMetadataCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_)
+func (c *PublicAdvertisedPrefixesAnnounceCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_)
var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.metadata)
- if err != nil {
- return nil, err
- }
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/setCommonInstanceMetadata")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}/announce")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
@@ -56275,17 +55576,18 @@ func (c *ProjectsSetCommonInstanceMetadataCall) doRequest(alt string) (*http.Res
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
+ "project": c.project,
+ "publicAdvertisedPrefix": c.publicAdvertisedPrefix,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
-// Do executes the "compute.projects.setCommonInstanceMetadata" call.
+// Do executes the "compute.publicAdvertisedPrefixes.announce" call.
// Any non-2xx status code is an error. Response headers are in either
// *Operation.ServerResponse.Header or (if a response was returned at all) in
// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
// whether the returned error was because http.StatusNotModified was returned.
-func (c *ProjectsSetCommonInstanceMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
+func (c *PublicAdvertisedPrefixesAnnounceCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
@@ -56317,24 +55619,24 @@ func (c *ProjectsSetCommonInstanceMetadataCall) Do(opts ...googleapi.CallOption)
return ret, nil
}
-type ProjectsSetDefaultNetworkTierCall struct {
- s *Service
- project string
- projectssetdefaultnetworktierrequest *ProjectsSetDefaultNetworkTierRequest
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
+type PublicAdvertisedPrefixesDeleteCall struct {
+ s *Service
+ project string
+ publicAdvertisedPrefix string
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+ header_ http.Header
}
-// SetDefaultNetworkTier: Sets the default network tier of the project. The
-// default network tier is used when an address/forwardingRule/instance is
-// created without specifying the network tier field.
+// Delete: Deletes the specified PublicAdvertisedPrefix
//
-// - project: Project ID for this request.
-func (r *ProjectsService) SetDefaultNetworkTier(project string, projectssetdefaultnetworktierrequest *ProjectsSetDefaultNetworkTierRequest) *ProjectsSetDefaultNetworkTierCall {
- c := &ProjectsSetDefaultNetworkTierCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+// - project: Project ID for this request.
+// - publicAdvertisedPrefix: Name of the PublicAdvertisedPrefix resource to
+// delete.
+func (r *PublicAdvertisedPrefixesService) Delete(project string, publicAdvertisedPrefix string) *PublicAdvertisedPrefixesDeleteCall {
+ c := &PublicAdvertisedPrefixesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.project = project
- c.projectssetdefaultnetworktierrequest = projectssetdefaultnetworktierrequest
+ c.publicAdvertisedPrefix = publicAdvertisedPrefix
return c
}
@@ -56348,7 +55650,7 @@ func (r *ProjectsService) SetDefaultNetworkTier(project string, projectssetdefau
// prevents clients from accidentally creating duplicate commitments. The
// request ID must be a valid UUID with the exception that zero UUID is not
// supported ( 00000000-0000-0000-0000-000000000000).
-func (c *ProjectsSetDefaultNetworkTierCall) RequestId(requestId string) *ProjectsSetDefaultNetworkTierCall {
+func (c *PublicAdvertisedPrefixesDeleteCall) RequestId(requestId string) *PublicAdvertisedPrefixesDeleteCall {
c.urlParams_.Set("requestId", requestId)
return c
}
@@ -56356,54 +55658,51 @@ func (c *ProjectsSetDefaultNetworkTierCall) RequestId(requestId string) *Project
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
// details.
-func (c *ProjectsSetDefaultNetworkTierCall) Fields(s ...googleapi.Field) *ProjectsSetDefaultNetworkTierCall {
+func (c *PublicAdvertisedPrefixesDeleteCall) Fields(s ...googleapi.Field) *PublicAdvertisedPrefixesDeleteCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method.
-func (c *ProjectsSetDefaultNetworkTierCall) Context(ctx context.Context) *ProjectsSetDefaultNetworkTierCall {
+func (c *PublicAdvertisedPrefixesDeleteCall) Context(ctx context.Context) *PublicAdvertisedPrefixesDeleteCall {
c.ctx_ = ctx
return c
}
// Header returns a http.Header that can be modified by the caller to add
// headers to the request.
-func (c *ProjectsSetDefaultNetworkTierCall) Header() http.Header {
+func (c *PublicAdvertisedPrefixesDeleteCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
-func (c *ProjectsSetDefaultNetworkTierCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_)
+func (c *PublicAdvertisedPrefixesDeleteCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_)
var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectssetdefaultnetworktierrequest)
- if err != nil {
- return nil, err
- }
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/setDefaultNetworkTier")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}")
urls += "?" + c.urlParams_.Encode()
- req, err := http.NewRequest("POST", urls, body)
+ req, err := http.NewRequest("DELETE", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
+ "project": c.project,
+ "publicAdvertisedPrefix": c.publicAdvertisedPrefix,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
-// Do executes the "compute.projects.setDefaultNetworkTier" call.
+// Do executes the "compute.publicAdvertisedPrefixes.delete" call.
// Any non-2xx status code is an error. Response headers are in either
// *Operation.ServerResponse.Header or (if a response was returned at all) in
// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
// whether the returned error was because http.StatusNotModified was returned.
-func (c *ProjectsSetDefaultNetworkTierCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
+func (c *PublicAdvertisedPrefixesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
@@ -56435,93 +55734,88 @@ func (c *ProjectsSetDefaultNetworkTierCall) Do(opts ...googleapi.CallOption) (*O
return ret, nil
}
-type ProjectsSetUsageExportBucketCall struct {
- s *Service
- project string
- usageexportlocation *UsageExportLocation
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
+type PublicAdvertisedPrefixesGetCall struct {
+ s *Service
+ project string
+ publicAdvertisedPrefix string
+ urlParams_ gensupport.URLParams
+ ifNoneMatch_ string
+ ctx_ context.Context
+ header_ http.Header
}
-// SetUsageExportBucket: Enables the usage export feature and sets the usage
-// export bucket where reports are stored. If you provide an empty request body
-// using this method, the usage export feature will be disabled.
+// Get: Returns the specified PublicAdvertisedPrefix resource.
//
-// - project: Project ID for this request.
-func (r *ProjectsService) SetUsageExportBucket(project string, usageexportlocation *UsageExportLocation) *ProjectsSetUsageExportBucketCall {
- c := &ProjectsSetUsageExportBucketCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+// - project: Project ID for this request.
+// - publicAdvertisedPrefix: Name of the PublicAdvertisedPrefix resource to
+// return.
+func (r *PublicAdvertisedPrefixesService) Get(project string, publicAdvertisedPrefix string) *PublicAdvertisedPrefixesGetCall {
+ c := &PublicAdvertisedPrefixesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.project = project
- c.usageexportlocation = usageexportlocation
- return c
-}
-
-// RequestId sets the optional parameter "requestId": An optional request ID to
-// identify requests. Specify a unique request ID so that if you must retry
-// your request, the server will know to ignore the request if it has already
-// been completed. For example, consider a situation where you make an initial
-// request and the request times out. If you make the request again with the
-// same request ID, the server can check if original operation with the same
-// request ID was received, and if so, will ignore the second request. This
-// prevents clients from accidentally creating duplicate commitments. The
-// request ID must be a valid UUID with the exception that zero UUID is not
-// supported ( 00000000-0000-0000-0000-000000000000).
-func (c *ProjectsSetUsageExportBucketCall) RequestId(requestId string) *ProjectsSetUsageExportBucketCall {
- c.urlParams_.Set("requestId", requestId)
+ c.publicAdvertisedPrefix = publicAdvertisedPrefix
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
// details.
-func (c *ProjectsSetUsageExportBucketCall) Fields(s ...googleapi.Field) *ProjectsSetUsageExportBucketCall {
+func (c *PublicAdvertisedPrefixesGetCall) Fields(s ...googleapi.Field) *PublicAdvertisedPrefixesGetCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
+// IfNoneMatch sets an optional parameter which makes the operation fail if the
+// object's ETag matches the given value. This is useful for getting updates
+// only after the object has changed since the last request.
+func (c *PublicAdvertisedPrefixesGetCall) IfNoneMatch(entityTag string) *PublicAdvertisedPrefixesGetCall {
+ c.ifNoneMatch_ = entityTag
+ return c
+}
+
// Context sets the context to be used in this call's Do method.
-func (c *ProjectsSetUsageExportBucketCall) Context(ctx context.Context) *ProjectsSetUsageExportBucketCall {
+func (c *PublicAdvertisedPrefixesGetCall) Context(ctx context.Context) *PublicAdvertisedPrefixesGetCall {
c.ctx_ = ctx
return c
}
// Header returns a http.Header that can be modified by the caller to add
// headers to the request.
-func (c *ProjectsSetUsageExportBucketCall) Header() http.Header {
+func (c *PublicAdvertisedPrefixesGetCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
-func (c *ProjectsSetUsageExportBucketCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_)
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.usageexportlocation)
- if err != nil {
- return nil, err
+func (c *PublicAdvertisedPrefixesGetCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_)
+ if c.ifNoneMatch_ != "" {
+ reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
+ var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/setUsageExportBucket")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}")
urls += "?" + c.urlParams_.Encode()
- req, err := http.NewRequest("POST", urls, body)
+ req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
+ "project": c.project,
+ "publicAdvertisedPrefix": c.publicAdvertisedPrefix,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
-// Do executes the "compute.projects.setUsageExportBucket" call.
+// Do executes the "compute.publicAdvertisedPrefixes.get" call.
// Any non-2xx status code is an error. Response headers are in either
-// *Operation.ServerResponse.Header or (if a response was returned at all) in
-// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
-// whether the returned error was because http.StatusNotModified was returned.
-func (c *ProjectsSetUsageExportBucketCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
+// *PublicAdvertisedPrefix.ServerResponse.Header or (if a response was returned
+// at all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
+// check whether the returned error was because http.StatusNotModified was
+// returned.
+func (c *PublicAdvertisedPrefixesGetCall) Do(opts ...googleapi.CallOption) (*PublicAdvertisedPrefix, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
@@ -56540,7 +55834,7 @@ func (c *ProjectsSetUsageExportBucketCall) Do(opts ...googleapi.CallOption) (*Op
if err := googleapi.CheckResponse(res); err != nil {
return nil, gensupport.WrapError(err)
}
- ret := &Operation{
+ ret := &PublicAdvertisedPrefix{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
@@ -56553,24 +55847,23 @@ func (c *ProjectsSetUsageExportBucketCall) Do(opts ...googleapi.CallOption) (*Op
return ret, nil
}
-type PublicAdvertisedPrefixesAnnounceCall struct {
+type PublicAdvertisedPrefixesInsertCall struct {
s *Service
project string
- publicAdvertisedPrefix string
+ publicadvertisedprefix *PublicAdvertisedPrefix
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
-// Announce: Announces the specified PublicAdvertisedPrefix
+// Insert: Creates a PublicAdvertisedPrefix in the specified project using the
+// parameters that are included in the request.
//
-// - project: Project ID for this request.
-// - publicAdvertisedPrefix: The name of the public advertised prefix. It
-// should comply with RFC1035.
-func (r *PublicAdvertisedPrefixesService) Announce(project string, publicAdvertisedPrefix string) *PublicAdvertisedPrefixesAnnounceCall {
- c := &PublicAdvertisedPrefixesAnnounceCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+// - project: Project ID for this request.
+func (r *PublicAdvertisedPrefixesService) Insert(project string, publicadvertisedprefix *PublicAdvertisedPrefix) *PublicAdvertisedPrefixesInsertCall {
+ c := &PublicAdvertisedPrefixesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.project = project
- c.publicAdvertisedPrefix = publicAdvertisedPrefix
+ c.publicadvertisedprefix = publicadvertisedprefix
return c
}
@@ -56584,7 +55877,7 @@ func (r *PublicAdvertisedPrefixesService) Announce(project string, publicAdverti
// prevents clients from accidentally creating duplicate commitments. The
// request ID must be a valid UUID with the exception that zero UUID is not
// supported ( 00000000-0000-0000-0000-000000000000).
-func (c *PublicAdvertisedPrefixesAnnounceCall) RequestId(requestId string) *PublicAdvertisedPrefixesAnnounceCall {
+func (c *PublicAdvertisedPrefixesInsertCall) RequestId(requestId string) *PublicAdvertisedPrefixesInsertCall {
c.urlParams_.Set("requestId", requestId)
return c
}
@@ -56592,32 +55885,36 @@ func (c *PublicAdvertisedPrefixesAnnounceCall) RequestId(requestId string) *Publ
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
// details.
-func (c *PublicAdvertisedPrefixesAnnounceCall) Fields(s ...googleapi.Field) *PublicAdvertisedPrefixesAnnounceCall {
+func (c *PublicAdvertisedPrefixesInsertCall) Fields(s ...googleapi.Field) *PublicAdvertisedPrefixesInsertCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method.
-func (c *PublicAdvertisedPrefixesAnnounceCall) Context(ctx context.Context) *PublicAdvertisedPrefixesAnnounceCall {
+func (c *PublicAdvertisedPrefixesInsertCall) Context(ctx context.Context) *PublicAdvertisedPrefixesInsertCall {
c.ctx_ = ctx
return c
}
// Header returns a http.Header that can be modified by the caller to add
// headers to the request.
-func (c *PublicAdvertisedPrefixesAnnounceCall) Header() http.Header {
+func (c *PublicAdvertisedPrefixesInsertCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
-func (c *PublicAdvertisedPrefixesAnnounceCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_)
+func (c *PublicAdvertisedPrefixesInsertCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_)
var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.publicadvertisedprefix)
+ if err != nil {
+ return nil, err
+ }
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}/announce")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/publicAdvertisedPrefixes")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
@@ -56625,18 +55922,17 @@ func (c *PublicAdvertisedPrefixesAnnounceCall) doRequest(alt string) (*http.Resp
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
- "publicAdvertisedPrefix": c.publicAdvertisedPrefix,
+ "project": c.project,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
-// Do executes the "compute.publicAdvertisedPrefixes.announce" call.
+// Do executes the "compute.publicAdvertisedPrefixes.insert" call.
// Any non-2xx status code is an error. Response headers are in either
// *Operation.ServerResponse.Header or (if a response was returned at all) in
// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
// whether the returned error was because http.StatusNotModified was returned.
-func (c *PublicAdvertisedPrefixesAnnounceCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
+func (c *PublicAdvertisedPrefixesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
@@ -56668,90 +55964,161 @@ func (c *PublicAdvertisedPrefixesAnnounceCall) Do(opts ...googleapi.CallOption)
return ret, nil
}
-type PublicAdvertisedPrefixesDeleteCall struct {
- s *Service
- project string
- publicAdvertisedPrefix string
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
+type PublicAdvertisedPrefixesListCall struct {
+ s *Service
+ project string
+ urlParams_ gensupport.URLParams
+ ifNoneMatch_ string
+ ctx_ context.Context
+ header_ http.Header
}
-// Delete: Deletes the specified PublicAdvertisedPrefix
+// List: Lists the PublicAdvertisedPrefixes for a project.
//
-// - project: Project ID for this request.
-// - publicAdvertisedPrefix: Name of the PublicAdvertisedPrefix resource to
-// delete.
-func (r *PublicAdvertisedPrefixesService) Delete(project string, publicAdvertisedPrefix string) *PublicAdvertisedPrefixesDeleteCall {
- c := &PublicAdvertisedPrefixesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+// - project: Project ID for this request.
+func (r *PublicAdvertisedPrefixesService) List(project string) *PublicAdvertisedPrefixesListCall {
+ c := &PublicAdvertisedPrefixesListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.project = project
- c.publicAdvertisedPrefix = publicAdvertisedPrefix
return c
}
-// RequestId sets the optional parameter "requestId": An optional request ID to
-// identify requests. Specify a unique request ID so that if you must retry
-// your request, the server will know to ignore the request if it has already
-// been completed. For example, consider a situation where you make an initial
-// request and the request times out. If you make the request again with the
-// same request ID, the server can check if original operation with the same
-// request ID was received, and if so, will ignore the second request. This
-// prevents clients from accidentally creating duplicate commitments. The
-// request ID must be a valid UUID with the exception that zero UUID is not
-// supported ( 00000000-0000-0000-0000-000000000000).
-func (c *PublicAdvertisedPrefixesDeleteCall) RequestId(requestId string) *PublicAdvertisedPrefixesDeleteCall {
- c.urlParams_.Set("requestId", requestId)
+// Filter sets the optional parameter "filter": A filter expression that
+// filters resources listed in the response. Most Compute resources support two
+// types of filter expressions: expressions that support regular expressions
+// and expressions that follow API improvement proposal AIP-160. These two
+// types of filter expressions cannot be mixed in one request. If you want to
+// use AIP-160, your expression must specify the field name, an operator, and
+// the value that you want to use for filtering. The value must be a string, a
+// number, or a boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`,
+// `>=` or `:`. For example, if you are filtering Compute Engine instances, you
+// can exclude instances named `example-instance` by specifying `name !=
+// example-instance`. The `:*` comparison can be used to test whether a key has
+// been defined. For example, to find all objects with `owner` label use: ```
+// labels.owner:* ``` You can also filter nested fields. For example, you could
+// specify `scheduling.automaticRestart = false` to include instances only if
+// they are not scheduled for automatic restarts. You can use filtering on
+// nested fields to filter based on resource labels. To filter on multiple
+// expressions, provide each separate expression within parentheses. For
+// example: ``` (scheduling.automaticRestart = true) (cpuPlatform = "Intel
+// Skylake") ``` By default, each expression is an `AND` expression. However,
+// you can include `AND` and `OR` expressions explicitly. For example: ```
+// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND
+// (scheduling.automaticRestart = true) ``` If you want to use a regular
+// expression, use the `eq` (equal) or `ne` (not equal) operator against a
+// single un-parenthesized expression with or without quotes or against
+// multiple parenthesized expressions. Examples: `fieldname eq unquoted
+// literal` `fieldname eq 'single quoted literal'` `fieldname eq "double quoted
+// literal" `(fieldname1 eq literal) (fieldname2 ne "literal")` The literal
+// value is interpreted as a regular expression using Google RE2 library
+// syntax. The literal value must match the entire field. For example, to
+// filter for instances that do not end with name "instance", you would use
+// `name ne .*instance`. You cannot combine constraints on multiple fields
+// using regular expressions.
+func (c *PublicAdvertisedPrefixesListCall) Filter(filter string) *PublicAdvertisedPrefixesListCall {
+ c.urlParams_.Set("filter", filter)
+ return c
+}
+
+// MaxResults sets the optional parameter "maxResults": The maximum number of
+// results per page that should be returned. If the number of available results
+// is larger than `maxResults`, Compute Engine returns a `nextPageToken` that
+// can be used to get the next page of results in subsequent list requests.
+// Acceptable values are `0` to `500`, inclusive. (Default: `500`)
+func (c *PublicAdvertisedPrefixesListCall) MaxResults(maxResults int64) *PublicAdvertisedPrefixesListCall {
+ c.urlParams_.Set("maxResults", fmt.Sprint(maxResults))
+ return c
+}
+
+// OrderBy sets the optional parameter "orderBy": Sorts list results by a
+// certain order. By default, results are returned in alphanumerical order
+// based on the resource name. You can also sort results in descending order
+// based on the creation timestamp using `orderBy="creationTimestamp desc".
+// This sorts results based on the `creationTimestamp` field in reverse
+// chronological order (newest result first). Use this to sort resources like
+// operations so that the newest operation is returned first. Currently, only
+// sorting by `name` or `creationTimestamp desc` is supported.
+func (c *PublicAdvertisedPrefixesListCall) OrderBy(orderBy string) *PublicAdvertisedPrefixesListCall {
+ c.urlParams_.Set("orderBy", orderBy)
+ return c
+}
+
+// PageToken sets the optional parameter "pageToken": Specifies a page token to
+// use. Set `pageToken` to the `nextPageToken` returned by a previous list
+// request to get the next page of results.
+func (c *PublicAdvertisedPrefixesListCall) PageToken(pageToken string) *PublicAdvertisedPrefixesListCall {
+ c.urlParams_.Set("pageToken", pageToken)
+ return c
+}
+
+// ReturnPartialSuccess sets the optional parameter "returnPartialSuccess":
+// Opt-in for partial success behavior which provides partial results in case
+// of failure. The default value is false. For example, when partial success
+// behavior is enabled, aggregatedList for a single zone scope either returns
+// all resources in the zone or no resources, with an error code.
+func (c *PublicAdvertisedPrefixesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *PublicAdvertisedPrefixesListCall {
+ c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess))
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
// details.
-func (c *PublicAdvertisedPrefixesDeleteCall) Fields(s ...googleapi.Field) *PublicAdvertisedPrefixesDeleteCall {
+func (c *PublicAdvertisedPrefixesListCall) Fields(s ...googleapi.Field) *PublicAdvertisedPrefixesListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
+// IfNoneMatch sets an optional parameter which makes the operation fail if the
+// object's ETag matches the given value. This is useful for getting updates
+// only after the object has changed since the last request.
+func (c *PublicAdvertisedPrefixesListCall) IfNoneMatch(entityTag string) *PublicAdvertisedPrefixesListCall {
+ c.ifNoneMatch_ = entityTag
+ return c
+}
+
// Context sets the context to be used in this call's Do method.
-func (c *PublicAdvertisedPrefixesDeleteCall) Context(ctx context.Context) *PublicAdvertisedPrefixesDeleteCall {
+func (c *PublicAdvertisedPrefixesListCall) Context(ctx context.Context) *PublicAdvertisedPrefixesListCall {
c.ctx_ = ctx
return c
}
// Header returns a http.Header that can be modified by the caller to add
// headers to the request.
-func (c *PublicAdvertisedPrefixesDeleteCall) Header() http.Header {
+func (c *PublicAdvertisedPrefixesListCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
-func (c *PublicAdvertisedPrefixesDeleteCall) doRequest(alt string) (*http.Response, error) {
+func (c *PublicAdvertisedPrefixesListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_)
+ if c.ifNoneMatch_ != "" {
+ reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
+ }
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/publicAdvertisedPrefixes")
urls += "?" + c.urlParams_.Encode()
- req, err := http.NewRequest("DELETE", urls, body)
+ req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
- "publicAdvertisedPrefix": c.publicAdvertisedPrefix,
+ "project": c.project,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
-// Do executes the "compute.publicAdvertisedPrefixes.delete" call.
+// Do executes the "compute.publicAdvertisedPrefixes.list" call.
// Any non-2xx status code is an error. Response headers are in either
-// *Operation.ServerResponse.Header or (if a response was returned at all) in
-// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
-// whether the returned error was because http.StatusNotModified was returned.
-func (c *PublicAdvertisedPrefixesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
+// *PublicAdvertisedPrefixList.ServerResponse.Header or (if a response was
+// returned at all) in error.(*googleapi.Error).Header. Use
+// googleapi.IsNotModified to check whether the returned error was because
+// http.StatusNotModified was returned.
+func (c *PublicAdvertisedPrefixesListCall) Do(opts ...googleapi.CallOption) (*PublicAdvertisedPrefixList, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
@@ -56770,7 +56137,7 @@ func (c *PublicAdvertisedPrefixesDeleteCall) Do(opts ...googleapi.CallOption) (*
if err := googleapi.CheckResponse(res); err != nil {
return nil, gensupport.WrapError(err)
}
- ret := &Operation{
+ ret := &PublicAdvertisedPrefixList{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
@@ -56783,70 +56150,102 @@ func (c *PublicAdvertisedPrefixesDeleteCall) Do(opts ...googleapi.CallOption) (*
return ret, nil
}
-type PublicAdvertisedPrefixesGetCall struct {
+// Pages invokes f for each page of results.
+// A non-nil error returned from f will halt the iteration.
+// The provided context supersedes any context provided to the Context method.
+func (c *PublicAdvertisedPrefixesListCall) Pages(ctx context.Context, f func(*PublicAdvertisedPrefixList) error) error {
+ c.ctx_ = ctx
+ defer c.PageToken(c.urlParams_.Get("pageToken"))
+ for {
+ x, err := c.Do()
+ if err != nil {
+ return err
+ }
+ if err := f(x); err != nil {
+ return err
+ }
+ if x.NextPageToken == "" {
+ return nil
+ }
+ c.PageToken(x.NextPageToken)
+ }
+}
+
+type PublicAdvertisedPrefixesPatchCall struct {
s *Service
project string
publicAdvertisedPrefix string
+ publicadvertisedprefix *PublicAdvertisedPrefix
urlParams_ gensupport.URLParams
- ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
-// Get: Returns the specified PublicAdvertisedPrefix resource.
+// Patch: Patches the specified Router resource with the data included in the
+// request. This method supports PATCH semantics and uses JSON merge patch
+// format and processing rules.
//
// - project: Project ID for this request.
// - publicAdvertisedPrefix: Name of the PublicAdvertisedPrefix resource to
-// return.
-func (r *PublicAdvertisedPrefixesService) Get(project string, publicAdvertisedPrefix string) *PublicAdvertisedPrefixesGetCall {
- c := &PublicAdvertisedPrefixesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+// patch.
+func (r *PublicAdvertisedPrefixesService) Patch(project string, publicAdvertisedPrefix string, publicadvertisedprefix *PublicAdvertisedPrefix) *PublicAdvertisedPrefixesPatchCall {
+ c := &PublicAdvertisedPrefixesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.project = project
c.publicAdvertisedPrefix = publicAdvertisedPrefix
+ c.publicadvertisedprefix = publicadvertisedprefix
+ return c
+}
+
+// RequestId sets the optional parameter "requestId": An optional request ID to
+// identify requests. Specify a unique request ID so that if you must retry
+// your request, the server will know to ignore the request if it has already
+// been completed. For example, consider a situation where you make an initial
+// request and the request times out. If you make the request again with the
+// same request ID, the server can check if original operation with the same
+// request ID was received, and if so, will ignore the second request. This
+// prevents clients from accidentally creating duplicate commitments. The
+// request ID must be a valid UUID with the exception that zero UUID is not
+// supported ( 00000000-0000-0000-0000-000000000000).
+func (c *PublicAdvertisedPrefixesPatchCall) RequestId(requestId string) *PublicAdvertisedPrefixesPatchCall {
+ c.urlParams_.Set("requestId", requestId)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
// details.
-func (c *PublicAdvertisedPrefixesGetCall) Fields(s ...googleapi.Field) *PublicAdvertisedPrefixesGetCall {
+func (c *PublicAdvertisedPrefixesPatchCall) Fields(s ...googleapi.Field) *PublicAdvertisedPrefixesPatchCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
-// IfNoneMatch sets an optional parameter which makes the operation fail if the
-// object's ETag matches the given value. This is useful for getting updates
-// only after the object has changed since the last request.
-func (c *PublicAdvertisedPrefixesGetCall) IfNoneMatch(entityTag string) *PublicAdvertisedPrefixesGetCall {
- c.ifNoneMatch_ = entityTag
- return c
-}
-
// Context sets the context to be used in this call's Do method.
-func (c *PublicAdvertisedPrefixesGetCall) Context(ctx context.Context) *PublicAdvertisedPrefixesGetCall {
+func (c *PublicAdvertisedPrefixesPatchCall) Context(ctx context.Context) *PublicAdvertisedPrefixesPatchCall {
c.ctx_ = ctx
return c
}
// Header returns a http.Header that can be modified by the caller to add
// headers to the request.
-func (c *PublicAdvertisedPrefixesGetCall) Header() http.Header {
+func (c *PublicAdvertisedPrefixesPatchCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
-func (c *PublicAdvertisedPrefixesGetCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_)
- if c.ifNoneMatch_ != "" {
- reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
- }
+func (c *PublicAdvertisedPrefixesPatchCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_)
var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.publicadvertisedprefix)
+ if err != nil {
+ return nil, err
+ }
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}")
urls += "?" + c.urlParams_.Encode()
- req, err := http.NewRequest("GET", urls, body)
+ req, err := http.NewRequest("PATCH", urls, body)
if err != nil {
return nil, err
}
@@ -56858,13 +56257,12 @@ func (c *PublicAdvertisedPrefixesGetCall) doRequest(alt string) (*http.Response,
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
-// Do executes the "compute.publicAdvertisedPrefixes.get" call.
+// Do executes the "compute.publicAdvertisedPrefixes.patch" call.
// Any non-2xx status code is an error. Response headers are in either
-// *PublicAdvertisedPrefix.ServerResponse.Header or (if a response was returned
-// at all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
-// check whether the returned error was because http.StatusNotModified was
-// returned.
-func (c *PublicAdvertisedPrefixesGetCall) Do(opts ...googleapi.CallOption) (*PublicAdvertisedPrefix, error) {
+// *Operation.ServerResponse.Header or (if a response was returned at all) in
+// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
+// whether the returned error was because http.StatusNotModified was returned.
+func (c *PublicAdvertisedPrefixesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
@@ -56883,7 +56281,7 @@ func (c *PublicAdvertisedPrefixesGetCall) Do(opts ...googleapi.CallOption) (*Pub
if err := googleapi.CheckResponse(res); err != nil {
return nil, gensupport.WrapError(err)
}
- ret := &PublicAdvertisedPrefix{
+ ret := &Operation{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
@@ -56896,23 +56294,24 @@ func (c *PublicAdvertisedPrefixesGetCall) Do(opts ...googleapi.CallOption) (*Pub
return ret, nil
}
-type PublicAdvertisedPrefixesInsertCall struct {
+type PublicAdvertisedPrefixesWithdrawCall struct {
s *Service
project string
- publicadvertisedprefix *PublicAdvertisedPrefix
+ publicAdvertisedPrefix string
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
-// Insert: Creates a PublicAdvertisedPrefix in the specified project using the
-// parameters that are included in the request.
+// Withdraw: Withdraws the specified PublicAdvertisedPrefix
//
-// - project: Project ID for this request.
-func (r *PublicAdvertisedPrefixesService) Insert(project string, publicadvertisedprefix *PublicAdvertisedPrefix) *PublicAdvertisedPrefixesInsertCall {
- c := &PublicAdvertisedPrefixesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+// - project: Project ID for this request.
+// - publicAdvertisedPrefix: The name of the public advertised prefix. It
+// should comply with RFC1035.
+func (r *PublicAdvertisedPrefixesService) Withdraw(project string, publicAdvertisedPrefix string) *PublicAdvertisedPrefixesWithdrawCall {
+ c := &PublicAdvertisedPrefixesWithdrawCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.project = project
- c.publicadvertisedprefix = publicadvertisedprefix
+ c.publicAdvertisedPrefix = publicAdvertisedPrefix
return c
}
@@ -56926,7 +56325,7 @@ func (r *PublicAdvertisedPrefixesService) Insert(project string, publicadvertise
// prevents clients from accidentally creating duplicate commitments. The
// request ID must be a valid UUID with the exception that zero UUID is not
// supported ( 00000000-0000-0000-0000-000000000000).
-func (c *PublicAdvertisedPrefixesInsertCall) RequestId(requestId string) *PublicAdvertisedPrefixesInsertCall {
+func (c *PublicAdvertisedPrefixesWithdrawCall) RequestId(requestId string) *PublicAdvertisedPrefixesWithdrawCall {
c.urlParams_.Set("requestId", requestId)
return c
}
@@ -56934,36 +56333,32 @@ func (c *PublicAdvertisedPrefixesInsertCall) RequestId(requestId string) *Public
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
// details.
-func (c *PublicAdvertisedPrefixesInsertCall) Fields(s ...googleapi.Field) *PublicAdvertisedPrefixesInsertCall {
+func (c *PublicAdvertisedPrefixesWithdrawCall) Fields(s ...googleapi.Field) *PublicAdvertisedPrefixesWithdrawCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method.
-func (c *PublicAdvertisedPrefixesInsertCall) Context(ctx context.Context) *PublicAdvertisedPrefixesInsertCall {
+func (c *PublicAdvertisedPrefixesWithdrawCall) Context(ctx context.Context) *PublicAdvertisedPrefixesWithdrawCall {
c.ctx_ = ctx
return c
}
// Header returns a http.Header that can be modified by the caller to add
// headers to the request.
-func (c *PublicAdvertisedPrefixesInsertCall) Header() http.Header {
+func (c *PublicAdvertisedPrefixesWithdrawCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
-func (c *PublicAdvertisedPrefixesInsertCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_)
+func (c *PublicAdvertisedPrefixesWithdrawCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_)
var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.publicadvertisedprefix)
- if err != nil {
- return nil, err
- }
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/publicAdvertisedPrefixes")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}/withdraw")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
@@ -56971,17 +56366,18 @@ func (c *PublicAdvertisedPrefixesInsertCall) doRequest(alt string) (*http.Respon
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
+ "project": c.project,
+ "publicAdvertisedPrefix": c.publicAdvertisedPrefix,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
-// Do executes the "compute.publicAdvertisedPrefixes.insert" call.
+// Do executes the "compute.publicAdvertisedPrefixes.withdraw" call.
// Any non-2xx status code is an error. Response headers are in either
// *Operation.ServerResponse.Header or (if a response was returned at all) in
// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
// whether the returned error was because http.StatusNotModified was returned.
-func (c *PublicAdvertisedPrefixesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
+func (c *PublicAdvertisedPrefixesWithdrawCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
@@ -57013,7 +56409,7 @@ func (c *PublicAdvertisedPrefixesInsertCall) Do(opts ...googleapi.CallOption) (*
return ret, nil
}
-type PublicAdvertisedPrefixesListCall struct {
+type PublicDelegatedPrefixesAggregatedListCall struct {
s *Service
project string
urlParams_ gensupport.URLParams
@@ -57022,11 +56418,13 @@ type PublicAdvertisedPrefixesListCall struct {
header_ http.Header
}
-// List: Lists the PublicAdvertisedPrefixes for a project.
+// AggregatedList: Lists all PublicDelegatedPrefix resources owned by the
+// specific project across all scopes. To prevent failure, Google recommends
+// that you set the `returnPartialSuccess` parameter to `true`.
//
-// - project: Project ID for this request.
-func (r *PublicAdvertisedPrefixesService) List(project string) *PublicAdvertisedPrefixesListCall {
- c := &PublicAdvertisedPrefixesListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+// - project: Name of the project scoping this request.
+func (r *PublicDelegatedPrefixesService) AggregatedList(project string) *PublicDelegatedPrefixesAggregatedListCall {
+ c := &PublicDelegatedPrefixesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.project = project
return c
}
@@ -57063,17 +56461,30 @@ func (r *PublicAdvertisedPrefixesService) List(project string) *PublicAdvertised
// filter for instances that do not end with name "instance", you would use
// `name ne .*instance`. You cannot combine constraints on multiple fields
// using regular expressions.
-func (c *PublicAdvertisedPrefixesListCall) Filter(filter string) *PublicAdvertisedPrefixesListCall {
+func (c *PublicDelegatedPrefixesAggregatedListCall) Filter(filter string) *PublicDelegatedPrefixesAggregatedListCall {
c.urlParams_.Set("filter", filter)
return c
}
+// IncludeAllScopes sets the optional parameter "includeAllScopes": Indicates
+// whether every visible scope for each scope type (zone, region, global)
+// should be included in the response. For new resource types added after this
+// field, the flag has no effect as new resource types will always include
+// every visible scope for each scope type in response. For resource types
+// which predate this field, if this flag is omitted or false, only scopes of
+// the scope types where the resource type is expected to be found will be
+// included.
+func (c *PublicDelegatedPrefixesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *PublicDelegatedPrefixesAggregatedListCall {
+ c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes))
+ return c
+}
+
// MaxResults sets the optional parameter "maxResults": The maximum number of
// results per page that should be returned. If the number of available results
// is larger than `maxResults`, Compute Engine returns a `nextPageToken` that
// can be used to get the next page of results in subsequent list requests.
// Acceptable values are `0` to `500`, inclusive. (Default: `500`)
-func (c *PublicAdvertisedPrefixesListCall) MaxResults(maxResults int64) *PublicAdvertisedPrefixesListCall {
+func (c *PublicDelegatedPrefixesAggregatedListCall) MaxResults(maxResults int64) *PublicDelegatedPrefixesAggregatedListCall {
c.urlParams_.Set("maxResults", fmt.Sprint(maxResults))
return c
}
@@ -57086,7 +56497,7 @@ func (c *PublicAdvertisedPrefixesListCall) MaxResults(maxResults int64) *PublicA
// chronological order (newest result first). Use this to sort resources like
// operations so that the newest operation is returned first. Currently, only
// sorting by `name` or `creationTimestamp desc` is supported.
-func (c *PublicAdvertisedPrefixesListCall) OrderBy(orderBy string) *PublicAdvertisedPrefixesListCall {
+func (c *PublicDelegatedPrefixesAggregatedListCall) OrderBy(orderBy string) *PublicDelegatedPrefixesAggregatedListCall {
c.urlParams_.Set("orderBy", orderBy)
return c
}
@@ -57094,7 +56505,7 @@ func (c *PublicAdvertisedPrefixesListCall) OrderBy(orderBy string) *PublicAdvert
// PageToken sets the optional parameter "pageToken": Specifies a page token to
// use. Set `pageToken` to the `nextPageToken` returned by a previous list
// request to get the next page of results.
-func (c *PublicAdvertisedPrefixesListCall) PageToken(pageToken string) *PublicAdvertisedPrefixesListCall {
+func (c *PublicDelegatedPrefixesAggregatedListCall) PageToken(pageToken string) *PublicDelegatedPrefixesAggregatedListCall {
c.urlParams_.Set("pageToken", pageToken)
return c
}
@@ -57104,15 +56515,23 @@ func (c *PublicAdvertisedPrefixesListCall) PageToken(pageToken string) *PublicAd
// of failure. The default value is false. For example, when partial success
// behavior is enabled, aggregatedList for a single zone scope either returns
// all resources in the zone or no resources, with an error code.
-func (c *PublicAdvertisedPrefixesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *PublicAdvertisedPrefixesListCall {
+func (c *PublicDelegatedPrefixesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *PublicDelegatedPrefixesAggregatedListCall {
c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess))
return c
}
+// ServiceProjectNumber sets the optional parameter "serviceProjectNumber": The
+// Shared VPC service project id or service project number for which aggregated
+// list request is invoked for subnetworks list-usable api.
+func (c *PublicDelegatedPrefixesAggregatedListCall) ServiceProjectNumber(serviceProjectNumber int64) *PublicDelegatedPrefixesAggregatedListCall {
+ c.urlParams_.Set("serviceProjectNumber", fmt.Sprint(serviceProjectNumber))
+ return c
+}
+
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
// details.
-func (c *PublicAdvertisedPrefixesListCall) Fields(s ...googleapi.Field) *PublicAdvertisedPrefixesListCall {
+func (c *PublicDelegatedPrefixesAggregatedListCall) Fields(s ...googleapi.Field) *PublicDelegatedPrefixesAggregatedListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
@@ -57120,54 +56539,195 @@ func (c *PublicAdvertisedPrefixesListCall) Fields(s ...googleapi.Field) *PublicA
// IfNoneMatch sets an optional parameter which makes the operation fail if the
// object's ETag matches the given value. This is useful for getting updates
// only after the object has changed since the last request.
-func (c *PublicAdvertisedPrefixesListCall) IfNoneMatch(entityTag string) *PublicAdvertisedPrefixesListCall {
+func (c *PublicDelegatedPrefixesAggregatedListCall) IfNoneMatch(entityTag string) *PublicDelegatedPrefixesAggregatedListCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method.
-func (c *PublicAdvertisedPrefixesListCall) Context(ctx context.Context) *PublicAdvertisedPrefixesListCall {
+func (c *PublicDelegatedPrefixesAggregatedListCall) Context(ctx context.Context) *PublicDelegatedPrefixesAggregatedListCall {
+ c.ctx_ = ctx
+ return c
+}
+
+// Header returns a http.Header that can be modified by the caller to add
+// headers to the request.
+func (c *PublicDelegatedPrefixesAggregatedListCall) Header() http.Header {
+ if c.header_ == nil {
+ c.header_ = make(http.Header)
+ }
+ return c.header_
+}
+
+func (c *PublicDelegatedPrefixesAggregatedListCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_)
+ if c.ifNoneMatch_ != "" {
+ reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
+ }
+ var body io.Reader = nil
+ c.urlParams_.Set("alt", alt)
+ c.urlParams_.Set("prettyPrint", "false")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/publicDelegatedPrefixes")
+ urls += "?" + c.urlParams_.Encode()
+ req, err := http.NewRequest("GET", urls, body)
+ if err != nil {
+ return nil, err
+ }
+ req.Header = reqHeaders
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ })
+ return gensupport.SendRequest(c.ctx_, c.s.client, req)
+}
+
+// Do executes the "compute.publicDelegatedPrefixes.aggregatedList" call.
+// Any non-2xx status code is an error. Response headers are in either
+// *PublicDelegatedPrefixAggregatedList.ServerResponse.Header or (if a response
+// was returned at all) in error.(*googleapi.Error).Header. Use
+// googleapi.IsNotModified to check whether the returned error was because
+// http.StatusNotModified was returned.
+func (c *PublicDelegatedPrefixesAggregatedListCall) Do(opts ...googleapi.CallOption) (*PublicDelegatedPrefixAggregatedList, error) {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, gensupport.WrapError(&googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ })
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, gensupport.WrapError(err)
+ }
+ ret := &PublicDelegatedPrefixAggregatedList{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ target := &ret
+ if err := gensupport.DecodeResponse(target, res); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// Pages invokes f for each page of results.
+// A non-nil error returned from f will halt the iteration.
+// The provided context supersedes any context provided to the Context method.
+func (c *PublicDelegatedPrefixesAggregatedListCall) Pages(ctx context.Context, f func(*PublicDelegatedPrefixAggregatedList) error) error {
+ c.ctx_ = ctx
+ defer c.PageToken(c.urlParams_.Get("pageToken"))
+ for {
+ x, err := c.Do()
+ if err != nil {
+ return err
+ }
+ if err := f(x); err != nil {
+ return err
+ }
+ if x.NextPageToken == "" {
+ return nil
+ }
+ c.PageToken(x.NextPageToken)
+ }
+}
+
+type PublicDelegatedPrefixesAnnounceCall struct {
+ s *Service
+ project string
+ region string
+ publicDelegatedPrefix string
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+ header_ http.Header
+}
+
+// Announce: Announces the specified PublicDelegatedPrefix in the given region.
+//
+// - project: Project ID for this request.
+// - publicDelegatedPrefix: The name of the public delegated prefix. It should
+// comply with RFC1035.
+// - region: The name of the region where the public delegated prefix is
+// located. It should comply with RFC1035.
+func (r *PublicDelegatedPrefixesService) Announce(project string, region string, publicDelegatedPrefix string) *PublicDelegatedPrefixesAnnounceCall {
+ c := &PublicDelegatedPrefixesAnnounceCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.project = project
+ c.region = region
+ c.publicDelegatedPrefix = publicDelegatedPrefix
+ return c
+}
+
+// RequestId sets the optional parameter "requestId": An optional request ID to
+// identify requests. Specify a unique request ID so that if you must retry
+// your request, the server will know to ignore the request if it has already
+// been completed. For example, consider a situation where you make an initial
+// request and the request times out. If you make the request again with the
+// same request ID, the server can check if original operation with the same
+// request ID was received, and if so, will ignore the second request. This
+// prevents clients from accidentally creating duplicate commitments. The
+// request ID must be a valid UUID with the exception that zero UUID is not
+// supported ( 00000000-0000-0000-0000-000000000000).
+func (c *PublicDelegatedPrefixesAnnounceCall) RequestId(requestId string) *PublicDelegatedPrefixesAnnounceCall {
+ c.urlParams_.Set("requestId", requestId)
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
+// details.
+func (c *PublicDelegatedPrefixesAnnounceCall) Fields(s ...googleapi.Field) *PublicDelegatedPrefixesAnnounceCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// Context sets the context to be used in this call's Do method.
+func (c *PublicDelegatedPrefixesAnnounceCall) Context(ctx context.Context) *PublicDelegatedPrefixesAnnounceCall {
c.ctx_ = ctx
return c
}
// Header returns a http.Header that can be modified by the caller to add
// headers to the request.
-func (c *PublicAdvertisedPrefixesListCall) Header() http.Header {
+func (c *PublicDelegatedPrefixesAnnounceCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
-func (c *PublicAdvertisedPrefixesListCall) doRequest(alt string) (*http.Response, error) {
+func (c *PublicDelegatedPrefixesAnnounceCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_)
- if c.ifNoneMatch_ != "" {
- reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
- }
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/publicAdvertisedPrefixes")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}/announce")
urls += "?" + c.urlParams_.Encode()
- req, err := http.NewRequest("GET", urls, body)
+ req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
+ "project": c.project,
+ "region": c.region,
+ "publicDelegatedPrefix": c.publicDelegatedPrefix,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
-// Do executes the "compute.publicAdvertisedPrefixes.list" call.
+// Do executes the "compute.publicDelegatedPrefixes.announce" call.
// Any non-2xx status code is an error. Response headers are in either
-// *PublicAdvertisedPrefixList.ServerResponse.Header or (if a response was
-// returned at all) in error.(*googleapi.Error).Header. Use
-// googleapi.IsNotModified to check whether the returned error was because
-// http.StatusNotModified was returned.
-func (c *PublicAdvertisedPrefixesListCall) Do(opts ...googleapi.CallOption) (*PublicAdvertisedPrefixList, error) {
+// *Operation.ServerResponse.Header or (if a response was returned at all) in
+// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
+// whether the returned error was because http.StatusNotModified was returned.
+func (c *PublicDelegatedPrefixesAnnounceCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
@@ -57186,7 +56746,7 @@ func (c *PublicAdvertisedPrefixesListCall) Do(opts ...googleapi.CallOption) (*Pu
if err := googleapi.CheckResponse(res); err != nil {
return nil, gensupport.WrapError(err)
}
- ret := &PublicAdvertisedPrefixList{
+ ret := &Operation{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
@@ -57199,49 +56759,27 @@ func (c *PublicAdvertisedPrefixesListCall) Do(opts ...googleapi.CallOption) (*Pu
return ret, nil
}
-// Pages invokes f for each page of results.
-// A non-nil error returned from f will halt the iteration.
-// The provided context supersedes any context provided to the Context method.
-func (c *PublicAdvertisedPrefixesListCall) Pages(ctx context.Context, f func(*PublicAdvertisedPrefixList) error) error {
- c.ctx_ = ctx
- defer c.PageToken(c.urlParams_.Get("pageToken"))
- for {
- x, err := c.Do()
- if err != nil {
- return err
- }
- if err := f(x); err != nil {
- return err
- }
- if x.NextPageToken == "" {
- return nil
- }
- c.PageToken(x.NextPageToken)
- }
-}
-
-type PublicAdvertisedPrefixesPatchCall struct {
- s *Service
- project string
- publicAdvertisedPrefix string
- publicadvertisedprefix *PublicAdvertisedPrefix
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
+type PublicDelegatedPrefixesDeleteCall struct {
+ s *Service
+ project string
+ region string
+ publicDelegatedPrefix string
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+ header_ http.Header
}
-// Patch: Patches the specified Router resource with the data included in the
-// request. This method supports PATCH semantics and uses JSON merge patch
-// format and processing rules.
+// Delete: Deletes the specified PublicDelegatedPrefix in the given region.
//
// - project: Project ID for this request.
-// - publicAdvertisedPrefix: Name of the PublicAdvertisedPrefix resource to
-// patch.
-func (r *PublicAdvertisedPrefixesService) Patch(project string, publicAdvertisedPrefix string, publicadvertisedprefix *PublicAdvertisedPrefix) *PublicAdvertisedPrefixesPatchCall {
- c := &PublicAdvertisedPrefixesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+// - publicDelegatedPrefix: Name of the PublicDelegatedPrefix resource to
+// delete.
+// - region: Name of the region of this request.
+func (r *PublicDelegatedPrefixesService) Delete(project string, region string, publicDelegatedPrefix string) *PublicDelegatedPrefixesDeleteCall {
+ c := &PublicDelegatedPrefixesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.project = project
- c.publicAdvertisedPrefix = publicAdvertisedPrefix
- c.publicadvertisedprefix = publicadvertisedprefix
+ c.region = region
+ c.publicDelegatedPrefix = publicDelegatedPrefix
return c
}
@@ -57255,7 +56793,7 @@ func (r *PublicAdvertisedPrefixesService) Patch(project string, publicAdvertised
// prevents clients from accidentally creating duplicate commitments. The
// request ID must be a valid UUID with the exception that zero UUID is not
// supported ( 00000000-0000-0000-0000-000000000000).
-func (c *PublicAdvertisedPrefixesPatchCall) RequestId(requestId string) *PublicAdvertisedPrefixesPatchCall {
+func (c *PublicDelegatedPrefixesDeleteCall) RequestId(requestId string) *PublicDelegatedPrefixesDeleteCall {
c.urlParams_.Set("requestId", requestId)
return c
}
@@ -57263,55 +56801,52 @@ func (c *PublicAdvertisedPrefixesPatchCall) RequestId(requestId string) *PublicA
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
// details.
-func (c *PublicAdvertisedPrefixesPatchCall) Fields(s ...googleapi.Field) *PublicAdvertisedPrefixesPatchCall {
+func (c *PublicDelegatedPrefixesDeleteCall) Fields(s ...googleapi.Field) *PublicDelegatedPrefixesDeleteCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method.
-func (c *PublicAdvertisedPrefixesPatchCall) Context(ctx context.Context) *PublicAdvertisedPrefixesPatchCall {
+func (c *PublicDelegatedPrefixesDeleteCall) Context(ctx context.Context) *PublicDelegatedPrefixesDeleteCall {
c.ctx_ = ctx
return c
}
// Header returns a http.Header that can be modified by the caller to add
// headers to the request.
-func (c *PublicAdvertisedPrefixesPatchCall) Header() http.Header {
+func (c *PublicDelegatedPrefixesDeleteCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
-func (c *PublicAdvertisedPrefixesPatchCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_)
+func (c *PublicDelegatedPrefixesDeleteCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_)
var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.publicadvertisedprefix)
- if err != nil {
- return nil, err
- }
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}")
urls += "?" + c.urlParams_.Encode()
- req, err := http.NewRequest("PATCH", urls, body)
+ req, err := http.NewRequest("DELETE", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
- "publicAdvertisedPrefix": c.publicAdvertisedPrefix,
+ "project": c.project,
+ "region": c.region,
+ "publicDelegatedPrefix": c.publicDelegatedPrefix,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
-// Do executes the "compute.publicAdvertisedPrefixes.patch" call.
+// Do executes the "compute.publicDelegatedPrefixes.delete" call.
// Any non-2xx status code is an error. Response headers are in either
// *Operation.ServerResponse.Header or (if a response was returned at all) in
// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
// whether the returned error was because http.StatusNotModified was returned.
-func (c *PublicAdvertisedPrefixesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
+func (c *PublicDelegatedPrefixesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
@@ -57343,24 +56878,144 @@ func (c *PublicAdvertisedPrefixesPatchCall) Do(opts ...googleapi.CallOption) (*O
return ret, nil
}
-type PublicAdvertisedPrefixesWithdrawCall struct {
- s *Service
- project string
- publicAdvertisedPrefix string
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
+type PublicDelegatedPrefixesGetCall struct {
+ s *Service
+ project string
+ region string
+ publicDelegatedPrefix string
+ urlParams_ gensupport.URLParams
+ ifNoneMatch_ string
+ ctx_ context.Context
+ header_ http.Header
}
-// Withdraw: Withdraws the specified PublicAdvertisedPrefix
+// Get: Returns the specified PublicDelegatedPrefix resource in the given
+// region.
//
// - project: Project ID for this request.
-// - publicAdvertisedPrefix: The name of the public advertised prefix. It
-// should comply with RFC1035.
-func (r *PublicAdvertisedPrefixesService) Withdraw(project string, publicAdvertisedPrefix string) *PublicAdvertisedPrefixesWithdrawCall {
- c := &PublicAdvertisedPrefixesWithdrawCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+// - publicDelegatedPrefix: Name of the PublicDelegatedPrefix resource to
+// return.
+// - region: Name of the region of this request.
+func (r *PublicDelegatedPrefixesService) Get(project string, region string, publicDelegatedPrefix string) *PublicDelegatedPrefixesGetCall {
+ c := &PublicDelegatedPrefixesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.project = project
- c.publicAdvertisedPrefix = publicAdvertisedPrefix
+ c.region = region
+ c.publicDelegatedPrefix = publicDelegatedPrefix
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
+// details.
+func (c *PublicDelegatedPrefixesGetCall) Fields(s ...googleapi.Field) *PublicDelegatedPrefixesGetCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// IfNoneMatch sets an optional parameter which makes the operation fail if the
+// object's ETag matches the given value. This is useful for getting updates
+// only after the object has changed since the last request.
+func (c *PublicDelegatedPrefixesGetCall) IfNoneMatch(entityTag string) *PublicDelegatedPrefixesGetCall {
+ c.ifNoneMatch_ = entityTag
+ return c
+}
+
+// Context sets the context to be used in this call's Do method.
+func (c *PublicDelegatedPrefixesGetCall) Context(ctx context.Context) *PublicDelegatedPrefixesGetCall {
+ c.ctx_ = ctx
+ return c
+}
+
+// Header returns a http.Header that can be modified by the caller to add
+// headers to the request.
+func (c *PublicDelegatedPrefixesGetCall) Header() http.Header {
+ if c.header_ == nil {
+ c.header_ = make(http.Header)
+ }
+ return c.header_
+}
+
+func (c *PublicDelegatedPrefixesGetCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_)
+ if c.ifNoneMatch_ != "" {
+ reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
+ }
+ var body io.Reader = nil
+ c.urlParams_.Set("alt", alt)
+ c.urlParams_.Set("prettyPrint", "false")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}")
+ urls += "?" + c.urlParams_.Encode()
+ req, err := http.NewRequest("GET", urls, body)
+ if err != nil {
+ return nil, err
+ }
+ req.Header = reqHeaders
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "region": c.region,
+ "publicDelegatedPrefix": c.publicDelegatedPrefix,
+ })
+ return gensupport.SendRequest(c.ctx_, c.s.client, req)
+}
+
+// Do executes the "compute.publicDelegatedPrefixes.get" call.
+// Any non-2xx status code is an error. Response headers are in either
+// *PublicDelegatedPrefix.ServerResponse.Header or (if a response was returned
+// at all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
+// check whether the returned error was because http.StatusNotModified was
+// returned.
+func (c *PublicDelegatedPrefixesGetCall) Do(opts ...googleapi.CallOption) (*PublicDelegatedPrefix, error) {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, gensupport.WrapError(&googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ })
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, gensupport.WrapError(err)
+ }
+ ret := &PublicDelegatedPrefix{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ target := &ret
+ if err := gensupport.DecodeResponse(target, res); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+type PublicDelegatedPrefixesInsertCall struct {
+ s *Service
+ project string
+ region string
+ publicdelegatedprefix *PublicDelegatedPrefix
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+ header_ http.Header
+}
+
+// Insert: Creates a PublicDelegatedPrefix in the specified project in the
+// given region using the parameters that are included in the request.
+//
+// - project: Project ID for this request.
+// - region: Name of the region of this request.
+func (r *PublicDelegatedPrefixesService) Insert(project string, region string, publicdelegatedprefix *PublicDelegatedPrefix) *PublicDelegatedPrefixesInsertCall {
+ c := &PublicDelegatedPrefixesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.project = project
+ c.region = region
+ c.publicdelegatedprefix = publicdelegatedprefix
return c
}
@@ -57374,7 +57029,7 @@ func (r *PublicAdvertisedPrefixesService) Withdraw(project string, publicAdverti
// prevents clients from accidentally creating duplicate commitments. The
// request ID must be a valid UUID with the exception that zero UUID is not
// supported ( 00000000-0000-0000-0000-000000000000).
-func (c *PublicAdvertisedPrefixesWithdrawCall) RequestId(requestId string) *PublicAdvertisedPrefixesWithdrawCall {
+func (c *PublicDelegatedPrefixesInsertCall) RequestId(requestId string) *PublicDelegatedPrefixesInsertCall {
c.urlParams_.Set("requestId", requestId)
return c
}
@@ -57382,32 +57037,36 @@ func (c *PublicAdvertisedPrefixesWithdrawCall) RequestId(requestId string) *Publ
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
// details.
-func (c *PublicAdvertisedPrefixesWithdrawCall) Fields(s ...googleapi.Field) *PublicAdvertisedPrefixesWithdrawCall {
+func (c *PublicDelegatedPrefixesInsertCall) Fields(s ...googleapi.Field) *PublicDelegatedPrefixesInsertCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method.
-func (c *PublicAdvertisedPrefixesWithdrawCall) Context(ctx context.Context) *PublicAdvertisedPrefixesWithdrawCall {
+func (c *PublicDelegatedPrefixesInsertCall) Context(ctx context.Context) *PublicDelegatedPrefixesInsertCall {
c.ctx_ = ctx
return c
}
// Header returns a http.Header that can be modified by the caller to add
// headers to the request.
-func (c *PublicAdvertisedPrefixesWithdrawCall) Header() http.Header {
+func (c *PublicDelegatedPrefixesInsertCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
-func (c *PublicAdvertisedPrefixesWithdrawCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_)
+func (c *PublicDelegatedPrefixesInsertCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_)
var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.publicdelegatedprefix)
+ if err != nil {
+ return nil, err
+ }
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}/withdraw")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/publicDelegatedPrefixes")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
@@ -57415,18 +57074,18 @@ func (c *PublicAdvertisedPrefixesWithdrawCall) doRequest(alt string) (*http.Resp
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
- "publicAdvertisedPrefix": c.publicAdvertisedPrefix,
+ "project": c.project,
+ "region": c.region,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
-// Do executes the "compute.publicAdvertisedPrefixes.withdraw" call.
+// Do executes the "compute.publicDelegatedPrefixes.insert" call.
// Any non-2xx status code is an error. Response headers are in either
// *Operation.ServerResponse.Header or (if a response was returned at all) in
// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
// whether the returned error was because http.StatusNotModified was returned.
-func (c *PublicAdvertisedPrefixesWithdrawCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
+func (c *PublicDelegatedPrefixesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
@@ -57458,23 +57117,24 @@ func (c *PublicAdvertisedPrefixesWithdrawCall) Do(opts ...googleapi.CallOption)
return ret, nil
}
-type PublicDelegatedPrefixesAggregatedListCall struct {
+type PublicDelegatedPrefixesListCall struct {
s *Service
project string
+ region string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
-// AggregatedList: Lists all PublicDelegatedPrefix resources owned by the
-// specific project across all scopes. To prevent failure, Google recommends
-// that you set the `returnPartialSuccess` parameter to `true`.
+// List: Lists the PublicDelegatedPrefixes for a project in the given region.
//
-// - project: Name of the project scoping this request.
-func (r *PublicDelegatedPrefixesService) AggregatedList(project string) *PublicDelegatedPrefixesAggregatedListCall {
- c := &PublicDelegatedPrefixesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+// - project: Project ID for this request.
+// - region: Name of the region of this request.
+func (r *PublicDelegatedPrefixesService) List(project string, region string) *PublicDelegatedPrefixesListCall {
+ c := &PublicDelegatedPrefixesListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.project = project
+ c.region = region
return c
}
@@ -57510,30 +57170,17 @@ func (r *PublicDelegatedPrefixesService) AggregatedList(project string) *PublicD
// filter for instances that do not end with name "instance", you would use
// `name ne .*instance`. You cannot combine constraints on multiple fields
// using regular expressions.
-func (c *PublicDelegatedPrefixesAggregatedListCall) Filter(filter string) *PublicDelegatedPrefixesAggregatedListCall {
+func (c *PublicDelegatedPrefixesListCall) Filter(filter string) *PublicDelegatedPrefixesListCall {
c.urlParams_.Set("filter", filter)
return c
}
-// IncludeAllScopes sets the optional parameter "includeAllScopes": Indicates
-// whether every visible scope for each scope type (zone, region, global)
-// should be included in the response. For new resource types added after this
-// field, the flag has no effect as new resource types will always include
-// every visible scope for each scope type in response. For resource types
-// which predate this field, if this flag is omitted or false, only scopes of
-// the scope types where the resource type is expected to be found will be
-// included.
-func (c *PublicDelegatedPrefixesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *PublicDelegatedPrefixesAggregatedListCall {
- c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes))
- return c
-}
-
// MaxResults sets the optional parameter "maxResults": The maximum number of
// results per page that should be returned. If the number of available results
// is larger than `maxResults`, Compute Engine returns a `nextPageToken` that
// can be used to get the next page of results in subsequent list requests.
// Acceptable values are `0` to `500`, inclusive. (Default: `500`)
-func (c *PublicDelegatedPrefixesAggregatedListCall) MaxResults(maxResults int64) *PublicDelegatedPrefixesAggregatedListCall {
+func (c *PublicDelegatedPrefixesListCall) MaxResults(maxResults int64) *PublicDelegatedPrefixesListCall {
c.urlParams_.Set("maxResults", fmt.Sprint(maxResults))
return c
}
@@ -57546,7 +57193,7 @@ func (c *PublicDelegatedPrefixesAggregatedListCall) MaxResults(maxResults int64)
// chronological order (newest result first). Use this to sort resources like
// operations so that the newest operation is returned first. Currently, only
// sorting by `name` or `creationTimestamp desc` is supported.
-func (c *PublicDelegatedPrefixesAggregatedListCall) OrderBy(orderBy string) *PublicDelegatedPrefixesAggregatedListCall {
+func (c *PublicDelegatedPrefixesListCall) OrderBy(orderBy string) *PublicDelegatedPrefixesListCall {
c.urlParams_.Set("orderBy", orderBy)
return c
}
@@ -57554,7 +57201,7 @@ func (c *PublicDelegatedPrefixesAggregatedListCall) OrderBy(orderBy string) *Pub
// PageToken sets the optional parameter "pageToken": Specifies a page token to
// use. Set `pageToken` to the `nextPageToken` returned by a previous list
// request to get the next page of results.
-func (c *PublicDelegatedPrefixesAggregatedListCall) PageToken(pageToken string) *PublicDelegatedPrefixesAggregatedListCall {
+func (c *PublicDelegatedPrefixesListCall) PageToken(pageToken string) *PublicDelegatedPrefixesListCall {
c.urlParams_.Set("pageToken", pageToken)
return c
}
@@ -57564,23 +57211,15 @@ func (c *PublicDelegatedPrefixesAggregatedListCall) PageToken(pageToken string)
// of failure. The default value is false. For example, when partial success
// behavior is enabled, aggregatedList for a single zone scope either returns
// all resources in the zone or no resources, with an error code.
-func (c *PublicDelegatedPrefixesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *PublicDelegatedPrefixesAggregatedListCall {
+func (c *PublicDelegatedPrefixesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *PublicDelegatedPrefixesListCall {
c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess))
return c
}
-// ServiceProjectNumber sets the optional parameter "serviceProjectNumber": The
-// Shared VPC service project id or service project number for which aggregated
-// list request is invoked for subnetworks list-usable api.
-func (c *PublicDelegatedPrefixesAggregatedListCall) ServiceProjectNumber(serviceProjectNumber int64) *PublicDelegatedPrefixesAggregatedListCall {
- c.urlParams_.Set("serviceProjectNumber", fmt.Sprint(serviceProjectNumber))
- return c
-}
-
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
// details.
-func (c *PublicDelegatedPrefixesAggregatedListCall) Fields(s ...googleapi.Field) *PublicDelegatedPrefixesAggregatedListCall {
+func (c *PublicDelegatedPrefixesListCall) Fields(s ...googleapi.Field) *PublicDelegatedPrefixesListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
@@ -57588,27 +57227,27 @@ func (c *PublicDelegatedPrefixesAggregatedListCall) Fields(s ...googleapi.Field)
// IfNoneMatch sets an optional parameter which makes the operation fail if the
// object's ETag matches the given value. This is useful for getting updates
// only after the object has changed since the last request.
-func (c *PublicDelegatedPrefixesAggregatedListCall) IfNoneMatch(entityTag string) *PublicDelegatedPrefixesAggregatedListCall {
+func (c *PublicDelegatedPrefixesListCall) IfNoneMatch(entityTag string) *PublicDelegatedPrefixesListCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method.
-func (c *PublicDelegatedPrefixesAggregatedListCall) Context(ctx context.Context) *PublicDelegatedPrefixesAggregatedListCall {
+func (c *PublicDelegatedPrefixesListCall) Context(ctx context.Context) *PublicDelegatedPrefixesListCall {
c.ctx_ = ctx
return c
}
// Header returns a http.Header that can be modified by the caller to add
// headers to the request.
-func (c *PublicDelegatedPrefixesAggregatedListCall) Header() http.Header {
+func (c *PublicDelegatedPrefixesListCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
-func (c *PublicDelegatedPrefixesAggregatedListCall) doRequest(alt string) (*http.Response, error) {
+func (c *PublicDelegatedPrefixesListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_)
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
@@ -57616,7 +57255,7 @@ func (c *PublicDelegatedPrefixesAggregatedListCall) doRequest(alt string) (*http
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/publicDelegatedPrefixes")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/publicDelegatedPrefixes")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
@@ -57625,17 +57264,18 @@ func (c *PublicDelegatedPrefixesAggregatedListCall) doRequest(alt string) (*http
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"project": c.project,
+ "region": c.region,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
-// Do executes the "compute.publicDelegatedPrefixes.aggregatedList" call.
+// Do executes the "compute.publicDelegatedPrefixes.list" call.
// Any non-2xx status code is an error. Response headers are in either
-// *PublicDelegatedPrefixAggregatedList.ServerResponse.Header or (if a response
-// was returned at all) in error.(*googleapi.Error).Header. Use
+// *PublicDelegatedPrefixList.ServerResponse.Header or (if a response was
+// returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was because
// http.StatusNotModified was returned.
-func (c *PublicDelegatedPrefixesAggregatedListCall) Do(opts ...googleapi.CallOption) (*PublicDelegatedPrefixAggregatedList, error) {
+func (c *PublicDelegatedPrefixesListCall) Do(opts ...googleapi.CallOption) (*PublicDelegatedPrefixList, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
@@ -57654,7 +57294,7 @@ func (c *PublicDelegatedPrefixesAggregatedListCall) Do(opts ...googleapi.CallOpt
if err := googleapi.CheckResponse(res); err != nil {
return nil, gensupport.WrapError(err)
}
- ret := &PublicDelegatedPrefixAggregatedList{
+ ret := &PublicDelegatedPrefixList{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
@@ -57670,7 +57310,7 @@ func (c *PublicDelegatedPrefixesAggregatedListCall) Do(opts ...googleapi.CallOpt
// Pages invokes f for each page of results.
// A non-nil error returned from f will halt the iteration.
// The provided context supersedes any context provided to the Context method.
-func (c *PublicDelegatedPrefixesAggregatedListCall) Pages(ctx context.Context, f func(*PublicDelegatedPrefixAggregatedList) error) error {
+func (c *PublicDelegatedPrefixesListCall) Pages(ctx context.Context, f func(*PublicDelegatedPrefixList) error) error {
c.ctx_ = ctx
defer c.PageToken(c.urlParams_.Get("pageToken"))
for {
@@ -57688,25 +57328,152 @@ func (c *PublicDelegatedPrefixesAggregatedListCall) Pages(ctx context.Context, f
}
}
-type PublicDelegatedPrefixesAnnounceCall struct {
+type PublicDelegatedPrefixesPatchCall struct {
s *Service
project string
region string
publicDelegatedPrefix string
+ publicdelegatedprefix *PublicDelegatedPrefix
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
-// Announce: Announces the specified PublicDelegatedPrefix in the given region.
+// Patch: Patches the specified PublicDelegatedPrefix resource with the data
+// included in the request. This method supports PATCH semantics and uses JSON
+// merge patch format and processing rules.
+//
+// - project: Project ID for this request.
+// - publicDelegatedPrefix: Name of the PublicDelegatedPrefix resource to
+// patch.
+// - region: Name of the region for this request.
+func (r *PublicDelegatedPrefixesService) Patch(project string, region string, publicDelegatedPrefix string, publicdelegatedprefix *PublicDelegatedPrefix) *PublicDelegatedPrefixesPatchCall {
+ c := &PublicDelegatedPrefixesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.project = project
+ c.region = region
+ c.publicDelegatedPrefix = publicDelegatedPrefix
+ c.publicdelegatedprefix = publicdelegatedprefix
+ return c
+}
+
+// RequestId sets the optional parameter "requestId": An optional request ID to
+// identify requests. Specify a unique request ID so that if you must retry
+// your request, the server will know to ignore the request if it has already
+// been completed. For example, consider a situation where you make an initial
+// request and the request times out. If you make the request again with the
+// same request ID, the server can check if original operation with the same
+// request ID was received, and if so, will ignore the second request. This
+// prevents clients from accidentally creating duplicate commitments. The
+// request ID must be a valid UUID with the exception that zero UUID is not
+// supported ( 00000000-0000-0000-0000-000000000000).
+func (c *PublicDelegatedPrefixesPatchCall) RequestId(requestId string) *PublicDelegatedPrefixesPatchCall {
+ c.urlParams_.Set("requestId", requestId)
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
+// details.
+func (c *PublicDelegatedPrefixesPatchCall) Fields(s ...googleapi.Field) *PublicDelegatedPrefixesPatchCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// Context sets the context to be used in this call's Do method.
+func (c *PublicDelegatedPrefixesPatchCall) Context(ctx context.Context) *PublicDelegatedPrefixesPatchCall {
+ c.ctx_ = ctx
+ return c
+}
+
+// Header returns a http.Header that can be modified by the caller to add
+// headers to the request.
+func (c *PublicDelegatedPrefixesPatchCall) Header() http.Header {
+ if c.header_ == nil {
+ c.header_ = make(http.Header)
+ }
+ return c.header_
+}
+
+func (c *PublicDelegatedPrefixesPatchCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_)
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.publicdelegatedprefix)
+ if err != nil {
+ return nil, err
+ }
+ c.urlParams_.Set("alt", alt)
+ c.urlParams_.Set("prettyPrint", "false")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}")
+ urls += "?" + c.urlParams_.Encode()
+ req, err := http.NewRequest("PATCH", urls, body)
+ if err != nil {
+ return nil, err
+ }
+ req.Header = reqHeaders
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "region": c.region,
+ "publicDelegatedPrefix": c.publicDelegatedPrefix,
+ })
+ return gensupport.SendRequest(c.ctx_, c.s.client, req)
+}
+
+// Do executes the "compute.publicDelegatedPrefixes.patch" call.
+// Any non-2xx status code is an error. Response headers are in either
+// *Operation.ServerResponse.Header or (if a response was returned at all) in
+// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
+// whether the returned error was because http.StatusNotModified was returned.
+func (c *PublicDelegatedPrefixesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, gensupport.WrapError(&googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ })
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, gensupport.WrapError(err)
+ }
+ ret := &Operation{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ target := &ret
+ if err := gensupport.DecodeResponse(target, res); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+type PublicDelegatedPrefixesWithdrawCall struct {
+ s *Service
+ project string
+ region string
+ publicDelegatedPrefix string
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+ header_ http.Header
+}
+
+// Withdraw: Withdraws the specified PublicDelegatedPrefix in the given region.
//
// - project: Project ID for this request.
// - publicDelegatedPrefix: The name of the public delegated prefix. It should
// comply with RFC1035.
// - region: The name of the region where the public delegated prefix is
// located. It should comply with RFC1035.
-func (r *PublicDelegatedPrefixesService) Announce(project string, region string, publicDelegatedPrefix string) *PublicDelegatedPrefixesAnnounceCall {
- c := &PublicDelegatedPrefixesAnnounceCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+func (r *PublicDelegatedPrefixesService) Withdraw(project string, region string, publicDelegatedPrefix string) *PublicDelegatedPrefixesWithdrawCall {
+ c := &PublicDelegatedPrefixesWithdrawCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.project = project
c.region = region
c.publicDelegatedPrefix = publicDelegatedPrefix
@@ -57723,7 +57490,7 @@ func (r *PublicDelegatedPrefixesService) Announce(project string, region string,
// prevents clients from accidentally creating duplicate commitments. The
// request ID must be a valid UUID with the exception that zero UUID is not
// supported ( 00000000-0000-0000-0000-000000000000).
-func (c *PublicDelegatedPrefixesAnnounceCall) RequestId(requestId string) *PublicDelegatedPrefixesAnnounceCall {
+func (c *PublicDelegatedPrefixesWithdrawCall) RequestId(requestId string) *PublicDelegatedPrefixesWithdrawCall {
c.urlParams_.Set("requestId", requestId)
return c
}
@@ -57731,32 +57498,32 @@ func (c *PublicDelegatedPrefixesAnnounceCall) RequestId(requestId string) *Publi
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
// details.
-func (c *PublicDelegatedPrefixesAnnounceCall) Fields(s ...googleapi.Field) *PublicDelegatedPrefixesAnnounceCall {
+func (c *PublicDelegatedPrefixesWithdrawCall) Fields(s ...googleapi.Field) *PublicDelegatedPrefixesWithdrawCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method.
-func (c *PublicDelegatedPrefixesAnnounceCall) Context(ctx context.Context) *PublicDelegatedPrefixesAnnounceCall {
+func (c *PublicDelegatedPrefixesWithdrawCall) Context(ctx context.Context) *PublicDelegatedPrefixesWithdrawCall {
c.ctx_ = ctx
return c
}
// Header returns a http.Header that can be modified by the caller to add
// headers to the request.
-func (c *PublicDelegatedPrefixesAnnounceCall) Header() http.Header {
+func (c *PublicDelegatedPrefixesWithdrawCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
-func (c *PublicDelegatedPrefixesAnnounceCall) doRequest(alt string) (*http.Response, error) {
+func (c *PublicDelegatedPrefixesWithdrawCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_)
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}/announce")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}/withdraw")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
@@ -57771,12 +57538,12 @@ func (c *PublicDelegatedPrefixesAnnounceCall) doRequest(alt string) (*http.Respo
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
-// Do executes the "compute.publicDelegatedPrefixes.announce" call.
+// Do executes the "compute.publicDelegatedPrefixes.withdraw" call.
// Any non-2xx status code is an error. Response headers are in either
// *Operation.ServerResponse.Header or (if a response was returned at all) in
// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
// whether the returned error was because http.StatusNotModified was returned.
-func (c *PublicDelegatedPrefixesAnnounceCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
+func (c *PublicDelegatedPrefixesWithdrawCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
@@ -57808,27 +57575,26 @@ func (c *PublicDelegatedPrefixesAnnounceCall) Do(opts ...googleapi.CallOption) (
return ret, nil
}
-type PublicDelegatedPrefixesDeleteCall struct {
- s *Service
- project string
- region string
- publicDelegatedPrefix string
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
+type RegionAutoscalersDeleteCall struct {
+ s *Service
+ project string
+ region string
+ autoscaler string
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+ header_ http.Header
}
-// Delete: Deletes the specified PublicDelegatedPrefix in the given region.
+// Delete: Deletes the specified autoscaler.
//
-// - project: Project ID for this request.
-// - publicDelegatedPrefix: Name of the PublicDelegatedPrefix resource to
-// delete.
-// - region: Name of the region of this request.
-func (r *PublicDelegatedPrefixesService) Delete(project string, region string, publicDelegatedPrefix string) *PublicDelegatedPrefixesDeleteCall {
- c := &PublicDelegatedPrefixesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+// - autoscaler: Name of the autoscaler to delete.
+// - project: Project ID for this request.
+// - region: Name of the region scoping this request.
+func (r *RegionAutoscalersService) Delete(project string, region string, autoscaler string) *RegionAutoscalersDeleteCall {
+ c := &RegionAutoscalersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.project = project
c.region = region
- c.publicDelegatedPrefix = publicDelegatedPrefix
+ c.autoscaler = autoscaler
return c
}
@@ -57842,7 +57608,7 @@ func (r *PublicDelegatedPrefixesService) Delete(project string, region string, p
// prevents clients from accidentally creating duplicate commitments. The
// request ID must be a valid UUID with the exception that zero UUID is not
// supported ( 00000000-0000-0000-0000-000000000000).
-func (c *PublicDelegatedPrefixesDeleteCall) RequestId(requestId string) *PublicDelegatedPrefixesDeleteCall {
+func (c *RegionAutoscalersDeleteCall) RequestId(requestId string) *RegionAutoscalersDeleteCall {
c.urlParams_.Set("requestId", requestId)
return c
}
@@ -57850,32 +57616,32 @@ func (c *PublicDelegatedPrefixesDeleteCall) RequestId(requestId string) *PublicD
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
// details.
-func (c *PublicDelegatedPrefixesDeleteCall) Fields(s ...googleapi.Field) *PublicDelegatedPrefixesDeleteCall {
+func (c *RegionAutoscalersDeleteCall) Fields(s ...googleapi.Field) *RegionAutoscalersDeleteCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method.
-func (c *PublicDelegatedPrefixesDeleteCall) Context(ctx context.Context) *PublicDelegatedPrefixesDeleteCall {
+func (c *RegionAutoscalersDeleteCall) Context(ctx context.Context) *RegionAutoscalersDeleteCall {
c.ctx_ = ctx
return c
}
// Header returns a http.Header that can be modified by the caller to add
// headers to the request.
-func (c *PublicDelegatedPrefixesDeleteCall) Header() http.Header {
+func (c *RegionAutoscalersDeleteCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
-func (c *PublicDelegatedPrefixesDeleteCall) doRequest(alt string) (*http.Response, error) {
+func (c *RegionAutoscalersDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_)
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/autoscalers/{autoscaler}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("DELETE", urls, body)
if err != nil {
@@ -57883,19 +57649,19 @@ func (c *PublicDelegatedPrefixesDeleteCall) doRequest(alt string) (*http.Respons
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
- "region": c.region,
- "publicDelegatedPrefix": c.publicDelegatedPrefix,
+ "project": c.project,
+ "region": c.region,
+ "autoscaler": c.autoscaler,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
-// Do executes the "compute.publicDelegatedPrefixes.delete" call.
+// Do executes the "compute.regionAutoscalers.delete" call.
// Any non-2xx status code is an error. Response headers are in either
// *Operation.ServerResponse.Header or (if a response was returned at all) in
// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
// whether the returned error was because http.StatusNotModified was returned.
-func (c *PublicDelegatedPrefixesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
+func (c *RegionAutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
@@ -57927,36 +57693,34 @@ func (c *PublicDelegatedPrefixesDeleteCall) Do(opts ...googleapi.CallOption) (*O
return ret, nil
}
-type PublicDelegatedPrefixesGetCall struct {
- s *Service
- project string
- region string
- publicDelegatedPrefix string
- urlParams_ gensupport.URLParams
- ifNoneMatch_ string
- ctx_ context.Context
- header_ http.Header
+type RegionAutoscalersGetCall struct {
+ s *Service
+ project string
+ region string
+ autoscaler string
+ urlParams_ gensupport.URLParams
+ ifNoneMatch_ string
+ ctx_ context.Context
+ header_ http.Header
}
-// Get: Returns the specified PublicDelegatedPrefix resource in the given
-// region.
+// Get: Returns the specified autoscaler.
//
-// - project: Project ID for this request.
-// - publicDelegatedPrefix: Name of the PublicDelegatedPrefix resource to
-// return.
-// - region: Name of the region of this request.
-func (r *PublicDelegatedPrefixesService) Get(project string, region string, publicDelegatedPrefix string) *PublicDelegatedPrefixesGetCall {
- c := &PublicDelegatedPrefixesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+// - autoscaler: Name of the autoscaler to return.
+// - project: Project ID for this request.
+// - region: Name of the region scoping this request.
+func (r *RegionAutoscalersService) Get(project string, region string, autoscaler string) *RegionAutoscalersGetCall {
+ c := &RegionAutoscalersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.project = project
c.region = region
- c.publicDelegatedPrefix = publicDelegatedPrefix
+ c.autoscaler = autoscaler
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
// details.
-func (c *PublicDelegatedPrefixesGetCall) Fields(s ...googleapi.Field) *PublicDelegatedPrefixesGetCall {
+func (c *RegionAutoscalersGetCall) Fields(s ...googleapi.Field) *RegionAutoscalersGetCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
@@ -57964,27 +57728,27 @@ func (c *PublicDelegatedPrefixesGetCall) Fields(s ...googleapi.Field) *PublicDel
// IfNoneMatch sets an optional parameter which makes the operation fail if the
// object's ETag matches the given value. This is useful for getting updates
// only after the object has changed since the last request.
-func (c *PublicDelegatedPrefixesGetCall) IfNoneMatch(entityTag string) *PublicDelegatedPrefixesGetCall {
+func (c *RegionAutoscalersGetCall) IfNoneMatch(entityTag string) *RegionAutoscalersGetCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method.
-func (c *PublicDelegatedPrefixesGetCall) Context(ctx context.Context) *PublicDelegatedPrefixesGetCall {
+func (c *RegionAutoscalersGetCall) Context(ctx context.Context) *RegionAutoscalersGetCall {
c.ctx_ = ctx
return c
}
// Header returns a http.Header that can be modified by the caller to add
// headers to the request.
-func (c *PublicDelegatedPrefixesGetCall) Header() http.Header {
+func (c *RegionAutoscalersGetCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
-func (c *PublicDelegatedPrefixesGetCall) doRequest(alt string) (*http.Response, error) {
+func (c *RegionAutoscalersGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_)
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
@@ -57992,7 +57756,7 @@ func (c *PublicDelegatedPrefixesGetCall) doRequest(alt string) (*http.Response,
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/autoscalers/{autoscaler}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
@@ -58000,20 +57764,19 @@ func (c *PublicDelegatedPrefixesGetCall) doRequest(alt string) (*http.Response,
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
- "region": c.region,
- "publicDelegatedPrefix": c.publicDelegatedPrefix,
+ "project": c.project,
+ "region": c.region,
+ "autoscaler": c.autoscaler,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
-// Do executes the "compute.publicDelegatedPrefixes.get" call.
+// Do executes the "compute.regionAutoscalers.get" call.
// Any non-2xx status code is an error. Response headers are in either
-// *PublicDelegatedPrefix.ServerResponse.Header or (if a response was returned
-// at all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
-// check whether the returned error was because http.StatusNotModified was
-// returned.
-func (c *PublicDelegatedPrefixesGetCall) Do(opts ...googleapi.CallOption) (*PublicDelegatedPrefix, error) {
+// *Autoscaler.ServerResponse.Header or (if a response was returned at all) in
+// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
+// whether the returned error was because http.StatusNotModified was returned.
+func (c *RegionAutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
@@ -58032,7 +57795,7 @@ func (c *PublicDelegatedPrefixesGetCall) Do(opts ...googleapi.CallOption) (*Publ
if err := googleapi.CheckResponse(res); err != nil {
return nil, gensupport.WrapError(err)
}
- ret := &PublicDelegatedPrefix{
+ ret := &Autoscaler{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
@@ -58045,26 +57808,26 @@ func (c *PublicDelegatedPrefixesGetCall) Do(opts ...googleapi.CallOption) (*Publ
return ret, nil
}
-type PublicDelegatedPrefixesInsertCall struct {
- s *Service
- project string
- region string
- publicdelegatedprefix *PublicDelegatedPrefix
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
+type RegionAutoscalersInsertCall struct {
+ s *Service
+ project string
+ region string
+ autoscaler *Autoscaler
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+ header_ http.Header
}
-// Insert: Creates a PublicDelegatedPrefix in the specified project in the
-// given region using the parameters that are included in the request.
+// Insert: Creates an autoscaler in the specified project using the data
+// included in the request.
//
// - project: Project ID for this request.
-// - region: Name of the region of this request.
-func (r *PublicDelegatedPrefixesService) Insert(project string, region string, publicdelegatedprefix *PublicDelegatedPrefix) *PublicDelegatedPrefixesInsertCall {
- c := &PublicDelegatedPrefixesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+// - region: Name of the region scoping this request.
+func (r *RegionAutoscalersService) Insert(project string, region string, autoscaler *Autoscaler) *RegionAutoscalersInsertCall {
+ c := &RegionAutoscalersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.project = project
c.region = region
- c.publicdelegatedprefix = publicdelegatedprefix
+ c.autoscaler = autoscaler
return c
}
@@ -58078,7 +57841,7 @@ func (r *PublicDelegatedPrefixesService) Insert(project string, region string, p
// prevents clients from accidentally creating duplicate commitments. The
// request ID must be a valid UUID with the exception that zero UUID is not
// supported ( 00000000-0000-0000-0000-000000000000).
-func (c *PublicDelegatedPrefixesInsertCall) RequestId(requestId string) *PublicDelegatedPrefixesInsertCall {
+func (c *RegionAutoscalersInsertCall) RequestId(requestId string) *RegionAutoscalersInsertCall {
c.urlParams_.Set("requestId", requestId)
return c
}
@@ -58086,36 +57849,36 @@ func (c *PublicDelegatedPrefixesInsertCall) RequestId(requestId string) *PublicD
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
// details.
-func (c *PublicDelegatedPrefixesInsertCall) Fields(s ...googleapi.Field) *PublicDelegatedPrefixesInsertCall {
+func (c *RegionAutoscalersInsertCall) Fields(s ...googleapi.Field) *RegionAutoscalersInsertCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method.
-func (c *PublicDelegatedPrefixesInsertCall) Context(ctx context.Context) *PublicDelegatedPrefixesInsertCall {
+func (c *RegionAutoscalersInsertCall) Context(ctx context.Context) *RegionAutoscalersInsertCall {
c.ctx_ = ctx
return c
}
// Header returns a http.Header that can be modified by the caller to add
// headers to the request.
-func (c *PublicDelegatedPrefixesInsertCall) Header() http.Header {
+func (c *RegionAutoscalersInsertCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
-func (c *PublicDelegatedPrefixesInsertCall) doRequest(alt string) (*http.Response, error) {
+func (c *RegionAutoscalersInsertCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_)
var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.publicdelegatedprefix)
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler)
if err != nil {
return nil, err
}
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/publicDelegatedPrefixes")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/autoscalers")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
@@ -58129,12 +57892,12 @@ func (c *PublicDelegatedPrefixesInsertCall) doRequest(alt string) (*http.Respons
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
-// Do executes the "compute.publicDelegatedPrefixes.insert" call.
+// Do executes the "compute.regionAutoscalers.insert" call.
// Any non-2xx status code is an error. Response headers are in either
// *Operation.ServerResponse.Header or (if a response was returned at all) in
// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
// whether the returned error was because http.StatusNotModified was returned.
-func (c *PublicDelegatedPrefixesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
+func (c *RegionAutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
@@ -58166,7 +57929,7 @@ func (c *PublicDelegatedPrefixesInsertCall) Do(opts ...googleapi.CallOption) (*O
return ret, nil
}
-type PublicDelegatedPrefixesListCall struct {
+type RegionAutoscalersListCall struct {
s *Service
project string
region string
@@ -58176,12 +57939,12 @@ type PublicDelegatedPrefixesListCall struct {
header_ http.Header
}
-// List: Lists the PublicDelegatedPrefixes for a project in the given region.
+// List: Retrieves a list of autoscalers contained within the specified region.
//
// - project: Project ID for this request.
-// - region: Name of the region of this request.
-func (r *PublicDelegatedPrefixesService) List(project string, region string) *PublicDelegatedPrefixesListCall {
- c := &PublicDelegatedPrefixesListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+// - region: Name of the region scoping this request.
+func (r *RegionAutoscalersService) List(project string, region string) *RegionAutoscalersListCall {
+ c := &RegionAutoscalersListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.project = project
c.region = region
return c
@@ -58219,7 +57982,7 @@ func (r *PublicDelegatedPrefixesService) List(project string, region string) *Pu
// filter for instances that do not end with name "instance", you would use
// `name ne .*instance`. You cannot combine constraints on multiple fields
// using regular expressions.
-func (c *PublicDelegatedPrefixesListCall) Filter(filter string) *PublicDelegatedPrefixesListCall {
+func (c *RegionAutoscalersListCall) Filter(filter string) *RegionAutoscalersListCall {
c.urlParams_.Set("filter", filter)
return c
}
@@ -58229,7 +57992,7 @@ func (c *PublicDelegatedPrefixesListCall) Filter(filter string) *PublicDelegated
// is larger than `maxResults`, Compute Engine returns a `nextPageToken` that
// can be used to get the next page of results in subsequent list requests.
// Acceptable values are `0` to `500`, inclusive. (Default: `500`)
-func (c *PublicDelegatedPrefixesListCall) MaxResults(maxResults int64) *PublicDelegatedPrefixesListCall {
+func (c *RegionAutoscalersListCall) MaxResults(maxResults int64) *RegionAutoscalersListCall {
c.urlParams_.Set("maxResults", fmt.Sprint(maxResults))
return c
}
@@ -58242,7 +58005,7 @@ func (c *PublicDelegatedPrefixesListCall) MaxResults(maxResults int64) *PublicDe
// chronological order (newest result first). Use this to sort resources like
// operations so that the newest operation is returned first. Currently, only
// sorting by `name` or `creationTimestamp desc` is supported.
-func (c *PublicDelegatedPrefixesListCall) OrderBy(orderBy string) *PublicDelegatedPrefixesListCall {
+func (c *RegionAutoscalersListCall) OrderBy(orderBy string) *RegionAutoscalersListCall {
c.urlParams_.Set("orderBy", orderBy)
return c
}
@@ -58250,7 +58013,7 @@ func (c *PublicDelegatedPrefixesListCall) OrderBy(orderBy string) *PublicDelegat
// PageToken sets the optional parameter "pageToken": Specifies a page token to
// use. Set `pageToken` to the `nextPageToken` returned by a previous list
// request to get the next page of results.
-func (c *PublicDelegatedPrefixesListCall) PageToken(pageToken string) *PublicDelegatedPrefixesListCall {
+func (c *RegionAutoscalersListCall) PageToken(pageToken string) *RegionAutoscalersListCall {
c.urlParams_.Set("pageToken", pageToken)
return c
}
@@ -58260,7 +58023,7 @@ func (c *PublicDelegatedPrefixesListCall) PageToken(pageToken string) *PublicDel
// of failure. The default value is false. For example, when partial success
// behavior is enabled, aggregatedList for a single zone scope either returns
// all resources in the zone or no resources, with an error code.
-func (c *PublicDelegatedPrefixesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *PublicDelegatedPrefixesListCall {
+func (c *RegionAutoscalersListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionAutoscalersListCall {
c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess))
return c
}
@@ -58268,7 +58031,7 @@ func (c *PublicDelegatedPrefixesListCall) ReturnPartialSuccess(returnPartialSucc
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
// details.
-func (c *PublicDelegatedPrefixesListCall) Fields(s ...googleapi.Field) *PublicDelegatedPrefixesListCall {
+func (c *RegionAutoscalersListCall) Fields(s ...googleapi.Field) *RegionAutoscalersListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
@@ -58276,27 +58039,27 @@ func (c *PublicDelegatedPrefixesListCall) Fields(s ...googleapi.Field) *PublicDe
// IfNoneMatch sets an optional parameter which makes the operation fail if the
// object's ETag matches the given value. This is useful for getting updates
// only after the object has changed since the last request.
-func (c *PublicDelegatedPrefixesListCall) IfNoneMatch(entityTag string) *PublicDelegatedPrefixesListCall {
+func (c *RegionAutoscalersListCall) IfNoneMatch(entityTag string) *RegionAutoscalersListCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method.
-func (c *PublicDelegatedPrefixesListCall) Context(ctx context.Context) *PublicDelegatedPrefixesListCall {
+func (c *RegionAutoscalersListCall) Context(ctx context.Context) *RegionAutoscalersListCall {
c.ctx_ = ctx
return c
}
// Header returns a http.Header that can be modified by the caller to add
// headers to the request.
-func (c *PublicDelegatedPrefixesListCall) Header() http.Header {
+func (c *RegionAutoscalersListCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
-func (c *PublicDelegatedPrefixesListCall) doRequest(alt string) (*http.Response, error) {
+func (c *RegionAutoscalersListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_)
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
@@ -58304,7 +58067,7 @@ func (c *PublicDelegatedPrefixesListCall) doRequest(alt string) (*http.Response,
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/publicDelegatedPrefixes")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/autoscalers")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
@@ -58318,13 +58081,13 @@ func (c *PublicDelegatedPrefixesListCall) doRequest(alt string) (*http.Response,
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
-// Do executes the "compute.publicDelegatedPrefixes.list" call.
+// Do executes the "compute.regionAutoscalers.list" call.
// Any non-2xx status code is an error. Response headers are in either
-// *PublicDelegatedPrefixList.ServerResponse.Header or (if a response was
-// returned at all) in error.(*googleapi.Error).Header. Use
-// googleapi.IsNotModified to check whether the returned error was because
-// http.StatusNotModified was returned.
-func (c *PublicDelegatedPrefixesListCall) Do(opts ...googleapi.CallOption) (*PublicDelegatedPrefixList, error) {
+// *RegionAutoscalerList.ServerResponse.Header or (if a response was returned
+// at all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
+// check whether the returned error was because http.StatusNotModified was
+// returned.
+func (c *RegionAutoscalersListCall) Do(opts ...googleapi.CallOption) (*RegionAutoscalerList, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
@@ -58343,7 +58106,7 @@ func (c *PublicDelegatedPrefixesListCall) Do(opts ...googleapi.CallOption) (*Pub
if err := googleapi.CheckResponse(res); err != nil {
return nil, gensupport.WrapError(err)
}
- ret := &PublicDelegatedPrefixList{
+ ret := &RegionAutoscalerList{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
@@ -58359,7 +58122,7 @@ func (c *PublicDelegatedPrefixesListCall) Do(opts ...googleapi.CallOption) (*Pub
// Pages invokes f for each page of results.
// A non-nil error returned from f will halt the iteration.
// The provided context supersedes any context provided to the Context method.
-func (c *PublicDelegatedPrefixesListCall) Pages(ctx context.Context, f func(*PublicDelegatedPrefixList) error) error {
+func (c *RegionAutoscalersListCall) Pages(ctx context.Context, f func(*RegionAutoscalerList) error) error {
c.ctx_ = ctx
defer c.PageToken(c.urlParams_.Get("pageToken"))
for {
@@ -58377,31 +58140,34 @@ func (c *PublicDelegatedPrefixesListCall) Pages(ctx context.Context, f func(*Pub
}
}
-type PublicDelegatedPrefixesPatchCall struct {
- s *Service
- project string
- region string
- publicDelegatedPrefix string
- publicdelegatedprefix *PublicDelegatedPrefix
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
+type RegionAutoscalersPatchCall struct {
+ s *Service
+ project string
+ region string
+ autoscaler *Autoscaler
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+ header_ http.Header
}
-// Patch: Patches the specified PublicDelegatedPrefix resource with the data
-// included in the request. This method supports PATCH semantics and uses JSON
-// merge patch format and processing rules.
+// Patch: Updates an autoscaler in the specified project using the data
+// included in the request. This method supports PATCH semantics and uses the
+// JSON merge patch format and processing rules.
//
-// - project: Project ID for this request.
-// - publicDelegatedPrefix: Name of the PublicDelegatedPrefix resource to
-// patch.
-// - region: Name of the region for this request.
-func (r *PublicDelegatedPrefixesService) Patch(project string, region string, publicDelegatedPrefix string, publicdelegatedprefix *PublicDelegatedPrefix) *PublicDelegatedPrefixesPatchCall {
- c := &PublicDelegatedPrefixesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+// - project: Project ID for this request.
+// - region: Name of the region scoping this request.
+func (r *RegionAutoscalersService) Patch(project string, region string, autoscaler *Autoscaler) *RegionAutoscalersPatchCall {
+ c := &RegionAutoscalersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.project = project
c.region = region
- c.publicDelegatedPrefix = publicDelegatedPrefix
- c.publicdelegatedprefix = publicdelegatedprefix
+ c.autoscaler = autoscaler
+ return c
+}
+
+// Autoscaler sets the optional parameter "autoscaler": Name of the autoscaler
+// to patch.
+func (c *RegionAutoscalersPatchCall) Autoscaler(autoscaler string) *RegionAutoscalersPatchCall {
+ c.urlParams_.Set("autoscaler", autoscaler)
return c
}
@@ -58415,7 +58181,7 @@ func (r *PublicDelegatedPrefixesService) Patch(project string, region string, pu
// prevents clients from accidentally creating duplicate commitments. The
// request ID must be a valid UUID with the exception that zero UUID is not
// supported ( 00000000-0000-0000-0000-000000000000).
-func (c *PublicDelegatedPrefixesPatchCall) RequestId(requestId string) *PublicDelegatedPrefixesPatchCall {
+func (c *RegionAutoscalersPatchCall) RequestId(requestId string) *RegionAutoscalersPatchCall {
c.urlParams_.Set("requestId", requestId)
return c
}
@@ -58423,36 +58189,36 @@ func (c *PublicDelegatedPrefixesPatchCall) RequestId(requestId string) *PublicDe
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
// details.
-func (c *PublicDelegatedPrefixesPatchCall) Fields(s ...googleapi.Field) *PublicDelegatedPrefixesPatchCall {
+func (c *RegionAutoscalersPatchCall) Fields(s ...googleapi.Field) *RegionAutoscalersPatchCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method.
-func (c *PublicDelegatedPrefixesPatchCall) Context(ctx context.Context) *PublicDelegatedPrefixesPatchCall {
+func (c *RegionAutoscalersPatchCall) Context(ctx context.Context) *RegionAutoscalersPatchCall {
c.ctx_ = ctx
return c
}
// Header returns a http.Header that can be modified by the caller to add
// headers to the request.
-func (c *PublicDelegatedPrefixesPatchCall) Header() http.Header {
+func (c *RegionAutoscalersPatchCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
-func (c *PublicDelegatedPrefixesPatchCall) doRequest(alt string) (*http.Response, error) {
+func (c *RegionAutoscalersPatchCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_)
var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.publicdelegatedprefix)
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler)
if err != nil {
return nil, err
}
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/autoscalers")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("PATCH", urls, body)
if err != nil {
@@ -58460,19 +58226,18 @@ func (c *PublicDelegatedPrefixesPatchCall) doRequest(alt string) (*http.Response
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
- "region": c.region,
- "publicDelegatedPrefix": c.publicDelegatedPrefix,
+ "project": c.project,
+ "region": c.region,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
-// Do executes the "compute.publicDelegatedPrefixes.patch" call.
+// Do executes the "compute.regionAutoscalers.patch" call.
// Any non-2xx status code is an error. Response headers are in either
// *Operation.ServerResponse.Header or (if a response was returned at all) in
// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
// whether the returned error was because http.StatusNotModified was returned.
-func (c *PublicDelegatedPrefixesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
+func (c *RegionAutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
@@ -58504,28 +58269,33 @@ func (c *PublicDelegatedPrefixesPatchCall) Do(opts ...googleapi.CallOption) (*Op
return ret, nil
}
-type PublicDelegatedPrefixesWithdrawCall struct {
- s *Service
- project string
- region string
- publicDelegatedPrefix string
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
+type RegionAutoscalersUpdateCall struct {
+ s *Service
+ project string
+ region string
+ autoscaler *Autoscaler
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+ header_ http.Header
}
-// Withdraw: Withdraws the specified PublicDelegatedPrefix in the given region.
+// Update: Updates an autoscaler in the specified project using the data
+// included in the request.
//
-// - project: Project ID for this request.
-// - publicDelegatedPrefix: The name of the public delegated prefix. It should
-// comply with RFC1035.
-// - region: The name of the region where the public delegated prefix is
-// located. It should comply with RFC1035.
-func (r *PublicDelegatedPrefixesService) Withdraw(project string, region string, publicDelegatedPrefix string) *PublicDelegatedPrefixesWithdrawCall {
- c := &PublicDelegatedPrefixesWithdrawCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+// - project: Project ID for this request.
+// - region: Name of the region scoping this request.
+func (r *RegionAutoscalersService) Update(project string, region string, autoscaler *Autoscaler) *RegionAutoscalersUpdateCall {
+ c := &RegionAutoscalersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.project = project
c.region = region
- c.publicDelegatedPrefix = publicDelegatedPrefix
+ c.autoscaler = autoscaler
+ return c
+}
+
+// Autoscaler sets the optional parameter "autoscaler": Name of the autoscaler
+// to update.
+func (c *RegionAutoscalersUpdateCall) Autoscaler(autoscaler string) *RegionAutoscalersUpdateCall {
+ c.urlParams_.Set("autoscaler", autoscaler)
return c
}
@@ -58539,7 +58309,7 @@ func (r *PublicDelegatedPrefixesService) Withdraw(project string, region string,
// prevents clients from accidentally creating duplicate commitments. The
// request ID must be a valid UUID with the exception that zero UUID is not
// supported ( 00000000-0000-0000-0000-000000000000).
-func (c *PublicDelegatedPrefixesWithdrawCall) RequestId(requestId string) *PublicDelegatedPrefixesWithdrawCall {
+func (c *RegionAutoscalersUpdateCall) RequestId(requestId string) *RegionAutoscalersUpdateCall {
c.urlParams_.Set("requestId", requestId)
return c
}
@@ -58547,52 +58317,55 @@ func (c *PublicDelegatedPrefixesWithdrawCall) RequestId(requestId string) *Publi
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
// details.
-func (c *PublicDelegatedPrefixesWithdrawCall) Fields(s ...googleapi.Field) *PublicDelegatedPrefixesWithdrawCall {
+func (c *RegionAutoscalersUpdateCall) Fields(s ...googleapi.Field) *RegionAutoscalersUpdateCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method.
-func (c *PublicDelegatedPrefixesWithdrawCall) Context(ctx context.Context) *PublicDelegatedPrefixesWithdrawCall {
+func (c *RegionAutoscalersUpdateCall) Context(ctx context.Context) *RegionAutoscalersUpdateCall {
c.ctx_ = ctx
return c
}
// Header returns a http.Header that can be modified by the caller to add
// headers to the request.
-func (c *PublicDelegatedPrefixesWithdrawCall) Header() http.Header {
+func (c *RegionAutoscalersUpdateCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
-func (c *PublicDelegatedPrefixesWithdrawCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_)
+func (c *RegionAutoscalersUpdateCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_)
var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler)
+ if err != nil {
+ return nil, err
+ }
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}/withdraw")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/autoscalers")
urls += "?" + c.urlParams_.Encode()
- req, err := http.NewRequest("POST", urls, body)
+ req, err := http.NewRequest("PUT", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
- "region": c.region,
- "publicDelegatedPrefix": c.publicDelegatedPrefix,
+ "project": c.project,
+ "region": c.region,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
-// Do executes the "compute.publicDelegatedPrefixes.withdraw" call.
+// Do executes the "compute.regionAutoscalers.update" call.
// Any non-2xx status code is an error. Response headers are in either
// *Operation.ServerResponse.Header or (if a response was returned at all) in
// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
// whether the returned error was because http.StatusNotModified was returned.
-func (c *PublicDelegatedPrefixesWithdrawCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
+func (c *RegionAutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
diff --git a/vendor/google.golang.org/api/compute/v1/compute3-gen.go b/vendor/google.golang.org/api/compute/v1/compute3-gen.go
index 71248bab0ccb5..1e08b6980c5aa 100644
--- a/vendor/google.golang.org/api/compute/v1/compute3-gen.go
+++ b/vendor/google.golang.org/api/compute/v1/compute3-gen.go
@@ -16,828 +16,6 @@ import (
gensupport "google.golang.org/api/internal/gensupport"
)
-type RegionAutoscalersDeleteCall struct {
- s *Service
- project string
- region string
- autoscaler string
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
-}
-
-// Delete: Deletes the specified autoscaler.
-//
-// - autoscaler: Name of the autoscaler to delete.
-// - project: Project ID for this request.
-// - region: Name of the region scoping this request.
-func (r *RegionAutoscalersService) Delete(project string, region string, autoscaler string) *RegionAutoscalersDeleteCall {
- c := &RegionAutoscalersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.project = project
- c.region = region
- c.autoscaler = autoscaler
- return c
-}
-
-// RequestId sets the optional parameter "requestId": An optional request ID to
-// identify requests. Specify a unique request ID so that if you must retry
-// your request, the server will know to ignore the request if it has already
-// been completed. For example, consider a situation where you make an initial
-// request and the request times out. If you make the request again with the
-// same request ID, the server can check if original operation with the same
-// request ID was received, and if so, will ignore the second request. This
-// prevents clients from accidentally creating duplicate commitments. The
-// request ID must be a valid UUID with the exception that zero UUID is not
-// supported ( 00000000-0000-0000-0000-000000000000).
-func (c *RegionAutoscalersDeleteCall) RequestId(requestId string) *RegionAutoscalersDeleteCall {
- c.urlParams_.Set("requestId", requestId)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
-// details.
-func (c *RegionAutoscalersDeleteCall) Fields(s ...googleapi.Field) *RegionAutoscalersDeleteCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method.
-func (c *RegionAutoscalersDeleteCall) Context(ctx context.Context) *RegionAutoscalersDeleteCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns a http.Header that can be modified by the caller to add
-// headers to the request.
-func (c *RegionAutoscalersDeleteCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *RegionAutoscalersDeleteCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_)
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/autoscalers/{autoscaler}")
- urls += "?" + c.urlParams_.Encode()
- req, err := http.NewRequest("DELETE", urls, body)
- if err != nil {
- return nil, err
- }
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
- "region": c.region,
- "autoscaler": c.autoscaler,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "compute.regionAutoscalers.delete" call.
-// Any non-2xx status code is an error. Response headers are in either
-// *Operation.ServerResponse.Header or (if a response was returned at all) in
-// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
-// whether the returned error was because http.StatusNotModified was returned.
-func (c *RegionAutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, gensupport.WrapError(&googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- })
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, gensupport.WrapError(err)
- }
- ret := &Operation{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-type RegionAutoscalersGetCall struct {
- s *Service
- project string
- region string
- autoscaler string
- urlParams_ gensupport.URLParams
- ifNoneMatch_ string
- ctx_ context.Context
- header_ http.Header
-}
-
-// Get: Returns the specified autoscaler.
-//
-// - autoscaler: Name of the autoscaler to return.
-// - project: Project ID for this request.
-// - region: Name of the region scoping this request.
-func (r *RegionAutoscalersService) Get(project string, region string, autoscaler string) *RegionAutoscalersGetCall {
- c := &RegionAutoscalersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.project = project
- c.region = region
- c.autoscaler = autoscaler
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
-// details.
-func (c *RegionAutoscalersGetCall) Fields(s ...googleapi.Field) *RegionAutoscalersGetCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// IfNoneMatch sets an optional parameter which makes the operation fail if the
-// object's ETag matches the given value. This is useful for getting updates
-// only after the object has changed since the last request.
-func (c *RegionAutoscalersGetCall) IfNoneMatch(entityTag string) *RegionAutoscalersGetCall {
- c.ifNoneMatch_ = entityTag
- return c
-}
-
-// Context sets the context to be used in this call's Do method.
-func (c *RegionAutoscalersGetCall) Context(ctx context.Context) *RegionAutoscalersGetCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns a http.Header that can be modified by the caller to add
-// headers to the request.
-func (c *RegionAutoscalersGetCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *RegionAutoscalersGetCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_)
- if c.ifNoneMatch_ != "" {
- reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
- }
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/autoscalers/{autoscaler}")
- urls += "?" + c.urlParams_.Encode()
- req, err := http.NewRequest("GET", urls, body)
- if err != nil {
- return nil, err
- }
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
- "region": c.region,
- "autoscaler": c.autoscaler,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "compute.regionAutoscalers.get" call.
-// Any non-2xx status code is an error. Response headers are in either
-// *Autoscaler.ServerResponse.Header or (if a response was returned at all) in
-// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
-// whether the returned error was because http.StatusNotModified was returned.
-func (c *RegionAutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, gensupport.WrapError(&googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- })
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, gensupport.WrapError(err)
- }
- ret := &Autoscaler{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-type RegionAutoscalersInsertCall struct {
- s *Service
- project string
- region string
- autoscaler *Autoscaler
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
-}
-
-// Insert: Creates an autoscaler in the specified project using the data
-// included in the request.
-//
-// - project: Project ID for this request.
-// - region: Name of the region scoping this request.
-func (r *RegionAutoscalersService) Insert(project string, region string, autoscaler *Autoscaler) *RegionAutoscalersInsertCall {
- c := &RegionAutoscalersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.project = project
- c.region = region
- c.autoscaler = autoscaler
- return c
-}
-
-// RequestId sets the optional parameter "requestId": An optional request ID to
-// identify requests. Specify a unique request ID so that if you must retry
-// your request, the server will know to ignore the request if it has already
-// been completed. For example, consider a situation where you make an initial
-// request and the request times out. If you make the request again with the
-// same request ID, the server can check if original operation with the same
-// request ID was received, and if so, will ignore the second request. This
-// prevents clients from accidentally creating duplicate commitments. The
-// request ID must be a valid UUID with the exception that zero UUID is not
-// supported ( 00000000-0000-0000-0000-000000000000).
-func (c *RegionAutoscalersInsertCall) RequestId(requestId string) *RegionAutoscalersInsertCall {
- c.urlParams_.Set("requestId", requestId)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
-// details.
-func (c *RegionAutoscalersInsertCall) Fields(s ...googleapi.Field) *RegionAutoscalersInsertCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method.
-func (c *RegionAutoscalersInsertCall) Context(ctx context.Context) *RegionAutoscalersInsertCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns a http.Header that can be modified by the caller to add
-// headers to the request.
-func (c *RegionAutoscalersInsertCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *RegionAutoscalersInsertCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_)
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler)
- if err != nil {
- return nil, err
- }
- c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/autoscalers")
- urls += "?" + c.urlParams_.Encode()
- req, err := http.NewRequest("POST", urls, body)
- if err != nil {
- return nil, err
- }
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
- "region": c.region,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "compute.regionAutoscalers.insert" call.
-// Any non-2xx status code is an error. Response headers are in either
-// *Operation.ServerResponse.Header or (if a response was returned at all) in
-// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
-// whether the returned error was because http.StatusNotModified was returned.
-func (c *RegionAutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, gensupport.WrapError(&googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- })
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, gensupport.WrapError(err)
- }
- ret := &Operation{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-type RegionAutoscalersListCall struct {
- s *Service
- project string
- region string
- urlParams_ gensupport.URLParams
- ifNoneMatch_ string
- ctx_ context.Context
- header_ http.Header
-}
-
-// List: Retrieves a list of autoscalers contained within the specified region.
-//
-// - project: Project ID for this request.
-// - region: Name of the region scoping this request.
-func (r *RegionAutoscalersService) List(project string, region string) *RegionAutoscalersListCall {
- c := &RegionAutoscalersListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.project = project
- c.region = region
- return c
-}
-
-// Filter sets the optional parameter "filter": A filter expression that
-// filters resources listed in the response. Most Compute resources support two
-// types of filter expressions: expressions that support regular expressions
-// and expressions that follow API improvement proposal AIP-160. These two
-// types of filter expressions cannot be mixed in one request. If you want to
-// use AIP-160, your expression must specify the field name, an operator, and
-// the value that you want to use for filtering. The value must be a string, a
-// number, or a boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`,
-// `>=` or `:`. For example, if you are filtering Compute Engine instances, you
-// can exclude instances named `example-instance` by specifying `name !=
-// example-instance`. The `:*` comparison can be used to test whether a key has
-// been defined. For example, to find all objects with `owner` label use: ```
-// labels.owner:* ``` You can also filter nested fields. For example, you could
-// specify `scheduling.automaticRestart = false` to include instances only if
-// they are not scheduled for automatic restarts. You can use filtering on
-// nested fields to filter based on resource labels. To filter on multiple
-// expressions, provide each separate expression within parentheses. For
-// example: ``` (scheduling.automaticRestart = true) (cpuPlatform = "Intel
-// Skylake") ``` By default, each expression is an `AND` expression. However,
-// you can include `AND` and `OR` expressions explicitly. For example: ```
-// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND
-// (scheduling.automaticRestart = true) ``` If you want to use a regular
-// expression, use the `eq` (equal) or `ne` (not equal) operator against a
-// single un-parenthesized expression with or without quotes or against
-// multiple parenthesized expressions. Examples: `fieldname eq unquoted
-// literal` `fieldname eq 'single quoted literal'` `fieldname eq "double quoted
-// literal" `(fieldname1 eq literal) (fieldname2 ne "literal")` The literal
-// value is interpreted as a regular expression using Google RE2 library
-// syntax. The literal value must match the entire field. For example, to
-// filter for instances that do not end with name "instance", you would use
-// `name ne .*instance`. You cannot combine constraints on multiple fields
-// using regular expressions.
-func (c *RegionAutoscalersListCall) Filter(filter string) *RegionAutoscalersListCall {
- c.urlParams_.Set("filter", filter)
- return c
-}
-
-// MaxResults sets the optional parameter "maxResults": The maximum number of
-// results per page that should be returned. If the number of available results
-// is larger than `maxResults`, Compute Engine returns a `nextPageToken` that
-// can be used to get the next page of results in subsequent list requests.
-// Acceptable values are `0` to `500`, inclusive. (Default: `500`)
-func (c *RegionAutoscalersListCall) MaxResults(maxResults int64) *RegionAutoscalersListCall {
- c.urlParams_.Set("maxResults", fmt.Sprint(maxResults))
- return c
-}
-
-// OrderBy sets the optional parameter "orderBy": Sorts list results by a
-// certain order. By default, results are returned in alphanumerical order
-// based on the resource name. You can also sort results in descending order
-// based on the creation timestamp using `orderBy="creationTimestamp desc".
-// This sorts results based on the `creationTimestamp` field in reverse
-// chronological order (newest result first). Use this to sort resources like
-// operations so that the newest operation is returned first. Currently, only
-// sorting by `name` or `creationTimestamp desc` is supported.
-func (c *RegionAutoscalersListCall) OrderBy(orderBy string) *RegionAutoscalersListCall {
- c.urlParams_.Set("orderBy", orderBy)
- return c
-}
-
-// PageToken sets the optional parameter "pageToken": Specifies a page token to
-// use. Set `pageToken` to the `nextPageToken` returned by a previous list
-// request to get the next page of results.
-func (c *RegionAutoscalersListCall) PageToken(pageToken string) *RegionAutoscalersListCall {
- c.urlParams_.Set("pageToken", pageToken)
- return c
-}
-
-// ReturnPartialSuccess sets the optional parameter "returnPartialSuccess":
-// Opt-in for partial success behavior which provides partial results in case
-// of failure. The default value is false. For example, when partial success
-// behavior is enabled, aggregatedList for a single zone scope either returns
-// all resources in the zone or no resources, with an error code.
-func (c *RegionAutoscalersListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionAutoscalersListCall {
- c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess))
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
-// details.
-func (c *RegionAutoscalersListCall) Fields(s ...googleapi.Field) *RegionAutoscalersListCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// IfNoneMatch sets an optional parameter which makes the operation fail if the
-// object's ETag matches the given value. This is useful for getting updates
-// only after the object has changed since the last request.
-func (c *RegionAutoscalersListCall) IfNoneMatch(entityTag string) *RegionAutoscalersListCall {
- c.ifNoneMatch_ = entityTag
- return c
-}
-
-// Context sets the context to be used in this call's Do method.
-func (c *RegionAutoscalersListCall) Context(ctx context.Context) *RegionAutoscalersListCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns a http.Header that can be modified by the caller to add
-// headers to the request.
-func (c *RegionAutoscalersListCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *RegionAutoscalersListCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_)
- if c.ifNoneMatch_ != "" {
- reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
- }
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/autoscalers")
- urls += "?" + c.urlParams_.Encode()
- req, err := http.NewRequest("GET", urls, body)
- if err != nil {
- return nil, err
- }
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
- "region": c.region,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "compute.regionAutoscalers.list" call.
-// Any non-2xx status code is an error. Response headers are in either
-// *RegionAutoscalerList.ServerResponse.Header or (if a response was returned
-// at all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
-// check whether the returned error was because http.StatusNotModified was
-// returned.
-func (c *RegionAutoscalersListCall) Do(opts ...googleapi.CallOption) (*RegionAutoscalerList, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, gensupport.WrapError(&googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- })
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, gensupport.WrapError(err)
- }
- ret := &RegionAutoscalerList{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-// Pages invokes f for each page of results.
-// A non-nil error returned from f will halt the iteration.
-// The provided context supersedes any context provided to the Context method.
-func (c *RegionAutoscalersListCall) Pages(ctx context.Context, f func(*RegionAutoscalerList) error) error {
- c.ctx_ = ctx
- defer c.PageToken(c.urlParams_.Get("pageToken"))
- for {
- x, err := c.Do()
- if err != nil {
- return err
- }
- if err := f(x); err != nil {
- return err
- }
- if x.NextPageToken == "" {
- return nil
- }
- c.PageToken(x.NextPageToken)
- }
-}
-
-type RegionAutoscalersPatchCall struct {
- s *Service
- project string
- region string
- autoscaler *Autoscaler
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
-}
-
-// Patch: Updates an autoscaler in the specified project using the data
-// included in the request. This method supports PATCH semantics and uses the
-// JSON merge patch format and processing rules.
-//
-// - project: Project ID for this request.
-// - region: Name of the region scoping this request.
-func (r *RegionAutoscalersService) Patch(project string, region string, autoscaler *Autoscaler) *RegionAutoscalersPatchCall {
- c := &RegionAutoscalersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.project = project
- c.region = region
- c.autoscaler = autoscaler
- return c
-}
-
-// Autoscaler sets the optional parameter "autoscaler": Name of the autoscaler
-// to patch.
-func (c *RegionAutoscalersPatchCall) Autoscaler(autoscaler string) *RegionAutoscalersPatchCall {
- c.urlParams_.Set("autoscaler", autoscaler)
- return c
-}
-
-// RequestId sets the optional parameter "requestId": An optional request ID to
-// identify requests. Specify a unique request ID so that if you must retry
-// your request, the server will know to ignore the request if it has already
-// been completed. For example, consider a situation where you make an initial
-// request and the request times out. If you make the request again with the
-// same request ID, the server can check if original operation with the same
-// request ID was received, and if so, will ignore the second request. This
-// prevents clients from accidentally creating duplicate commitments. The
-// request ID must be a valid UUID with the exception that zero UUID is not
-// supported ( 00000000-0000-0000-0000-000000000000).
-func (c *RegionAutoscalersPatchCall) RequestId(requestId string) *RegionAutoscalersPatchCall {
- c.urlParams_.Set("requestId", requestId)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
-// details.
-func (c *RegionAutoscalersPatchCall) Fields(s ...googleapi.Field) *RegionAutoscalersPatchCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method.
-func (c *RegionAutoscalersPatchCall) Context(ctx context.Context) *RegionAutoscalersPatchCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns a http.Header that can be modified by the caller to add
-// headers to the request.
-func (c *RegionAutoscalersPatchCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *RegionAutoscalersPatchCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_)
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler)
- if err != nil {
- return nil, err
- }
- c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/autoscalers")
- urls += "?" + c.urlParams_.Encode()
- req, err := http.NewRequest("PATCH", urls, body)
- if err != nil {
- return nil, err
- }
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
- "region": c.region,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "compute.regionAutoscalers.patch" call.
-// Any non-2xx status code is an error. Response headers are in either
-// *Operation.ServerResponse.Header or (if a response was returned at all) in
-// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
-// whether the returned error was because http.StatusNotModified was returned.
-func (c *RegionAutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, gensupport.WrapError(&googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- })
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, gensupport.WrapError(err)
- }
- ret := &Operation{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-type RegionAutoscalersUpdateCall struct {
- s *Service
- project string
- region string
- autoscaler *Autoscaler
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
-}
-
-// Update: Updates an autoscaler in the specified project using the data
-// included in the request.
-//
-// - project: Project ID for this request.
-// - region: Name of the region scoping this request.
-func (r *RegionAutoscalersService) Update(project string, region string, autoscaler *Autoscaler) *RegionAutoscalersUpdateCall {
- c := &RegionAutoscalersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.project = project
- c.region = region
- c.autoscaler = autoscaler
- return c
-}
-
-// Autoscaler sets the optional parameter "autoscaler": Name of the autoscaler
-// to update.
-func (c *RegionAutoscalersUpdateCall) Autoscaler(autoscaler string) *RegionAutoscalersUpdateCall {
- c.urlParams_.Set("autoscaler", autoscaler)
- return c
-}
-
-// RequestId sets the optional parameter "requestId": An optional request ID to
-// identify requests. Specify a unique request ID so that if you must retry
-// your request, the server will know to ignore the request if it has already
-// been completed. For example, consider a situation where you make an initial
-// request and the request times out. If you make the request again with the
-// same request ID, the server can check if original operation with the same
-// request ID was received, and if so, will ignore the second request. This
-// prevents clients from accidentally creating duplicate commitments. The
-// request ID must be a valid UUID with the exception that zero UUID is not
-// supported ( 00000000-0000-0000-0000-000000000000).
-func (c *RegionAutoscalersUpdateCall) RequestId(requestId string) *RegionAutoscalersUpdateCall {
- c.urlParams_.Set("requestId", requestId)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
-// details.
-func (c *RegionAutoscalersUpdateCall) Fields(s ...googleapi.Field) *RegionAutoscalersUpdateCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method.
-func (c *RegionAutoscalersUpdateCall) Context(ctx context.Context) *RegionAutoscalersUpdateCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns a http.Header that can be modified by the caller to add
-// headers to the request.
-func (c *RegionAutoscalersUpdateCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *RegionAutoscalersUpdateCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_)
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler)
- if err != nil {
- return nil, err
- }
- c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/autoscalers")
- urls += "?" + c.urlParams_.Encode()
- req, err := http.NewRequest("PUT", urls, body)
- if err != nil {
- return nil, err
- }
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
- "region": c.region,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "compute.regionAutoscalers.update" call.
-// Any non-2xx status code is an error. Response headers are in either
-// *Operation.ServerResponse.Header or (if a response was returned at all) in
-// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
-// whether the returned error was because http.StatusNotModified was returned.
-func (c *RegionAutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, gensupport.WrapError(&googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- })
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, gensupport.WrapError(err)
- }
- ret := &Operation{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
type RegionBackendServicesDeleteCall struct {
s *Service
project string
@@ -1650,8 +828,8 @@ type RegionBackendServicesListUsableCall struct {
header_ http.Header
}
-// ListUsable: Retrieves an aggregated list of all usable backend services in
-// the specified project in the given region.
+// ListUsable: Retrieves a list of all usable backend services in the specified
+// project in the given region.
//
// - project: Project ID for this request.
// - region: Name of the region scoping this request. It must be a string that
diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go
index e4e4e6bf7aec5..3bfd292b39a7e 100644
--- a/vendor/google.golang.org/api/internal/version.go
+++ b/vendor/google.golang.org/api/internal/version.go
@@ -5,4 +5,4 @@
package internal
// Version is the current tagged release of the library.
-const Version = "0.193.0"
+const Version = "0.201.0"
diff --git a/vendor/google.golang.org/api/storage/v1/storage-api.json b/vendor/google.golang.org/api/storage/v1/storage-api.json
index 5ee9276a0b870..09b7f6487aa07 100644
--- a/vendor/google.golang.org/api/storage/v1/storage-api.json
+++ b/vendor/google.golang.org/api/storage/v1/storage-api.json
@@ -32,6 +32,11 @@
"endpointUrl": "https://storage.europe-west3.rep.googleapis.com/",
"location": "europe-west3"
},
+ {
+ "description": "Regional Endpoint",
+ "endpointUrl": "https://storage.europe-west8.rep.googleapis.com/",
+ "location": "europe-west8"
+ },
{
"description": "Regional Endpoint",
"endpointUrl": "https://storage.europe-west9.rep.googleapis.com/",
@@ -41,9 +46,54 @@
"description": "Regional Endpoint",
"endpointUrl": "https://storage.me-central2.rep.googleapis.com/",
"location": "me-central2"
+ },
+ {
+ "description": "Regional Endpoint",
+ "endpointUrl": "https://storage.us-central1.rep.googleapis.com/",
+ "location": "us-central1"
+ },
+ {
+ "description": "Regional Endpoint",
+ "endpointUrl": "https://storage.us-east1.rep.googleapis.com/",
+ "location": "us-east1"
+ },
+ {
+ "description": "Regional Endpoint",
+ "endpointUrl": "https://storage.us-east4.rep.googleapis.com/",
+ "location": "us-east4"
+ },
+ {
+ "description": "Regional Endpoint",
+ "endpointUrl": "https://storage.us-east5.rep.googleapis.com/",
+ "location": "us-east5"
+ },
+ {
+ "description": "Regional Endpoint",
+ "endpointUrl": "https://storage.us-south1.rep.googleapis.com/",
+ "location": "us-south1"
+ },
+ {
+ "description": "Regional Endpoint",
+ "endpointUrl": "https://storage.us-west1.rep.googleapis.com/",
+ "location": "us-west1"
+ },
+ {
+ "description": "Regional Endpoint",
+ "endpointUrl": "https://storage.us-west2.rep.googleapis.com/",
+ "location": "us-west2"
+ },
+ {
+ "description": "Regional Endpoint",
+ "endpointUrl": "https://storage.us-west3.rep.googleapis.com/",
+ "location": "us-west3"
+ },
+ {
+ "description": "Regional Endpoint",
+ "endpointUrl": "https://storage.us-west4.rep.googleapis.com/",
+ "location": "us-west4"
}
],
- "etag": "\"34373939373134303235393739323331393435\"",
+ "etag": "\"3132333635343336333933383332343134323139\"",
"icons": {
"x16": "https://www.google.com/images/icons/product/cloud_storage-16.png",
"x32": "https://www.google.com/images/icons/product/cloud_storage-32.png"
@@ -1029,6 +1079,34 @@
"https://www.googleapis.com/auth/devstorage.full_control"
]
},
+ "relocate": {
+ "description": "Initiates a long-running Relocate Bucket operation on the specified bucket.",
+ "httpMethod": "POST",
+ "id": "storage.buckets.relocate",
+ "parameterOrder": [
+ "bucket"
+ ],
+ "parameters": {
+ "bucket": {
+ "description": "Name of the bucket to be moved.",
+ "location": "path",
+ "required": true,
+ "type": "string"
+ }
+ },
+ "path": "b/{bucket}/relocate",
+ "request": {
+ "$ref": "RelocateBucketRequest"
+ },
+ "response": {
+ "$ref": "GoogleLongrunningOperation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control",
+ "https://www.googleapis.com/auth/devstorage.read_write"
+ ]
+ },
"restore": {
"description": "Restores a soft-deleted bucket.",
"httpMethod": "POST",
@@ -2829,6 +2907,11 @@
"location": "query",
"type": "string"
},
+ "restoreToken": {
+ "description": "Restore token used to differentiate soft-deleted objects with the same name and generation. Only applicable for hierarchical namespace buckets and if softDeleted is set to true. This parameter is optional, and is only required in the rare case when there are multiple soft-deleted objects with the same name and generation.",
+ "location": "query",
+ "type": "string"
+ },
"softDeleted": {
"description": "If true, only soft-deleted object versions will be listed. The default is false. For more information, see [Soft Delete](https://cloud.google.com/storage/docs/soft-delete).",
"location": "query",
@@ -3304,6 +3387,11 @@
"location": "query",
"type": "string"
},
+ "restoreToken": {
+ "description": "Restore token used to differentiate sof-deleted objects with the same name and generation. Only applicable for hierarchical namespace buckets. This parameter is optional, and is only required in the rare case when there are multiple soft-deleted objects with the same name and generation.",
+ "location": "query",
+ "type": "string"
+ },
"userProject": {
"description": "The project to be billed for this request. Required for Requester Pays buckets.",
"location": "query",
@@ -3781,6 +3869,38 @@
},
"operations": {
"methods": {
+ "advanceRelocateBucket": {
+ "description": "Starts asynchronous advancement of the relocate bucket operation in the case of required write downtime, to allow it to lock the bucket at the source location, and proceed with the bucket location swap. The server makes a best effort to advance the relocate bucket operation, but success is not guaranteed.",
+ "httpMethod": "POST",
+ "id": "storage.buckets.operations.advanceRelocateBucket",
+ "parameterOrder": [
+ "bucket",
+ "operationId"
+ ],
+ "parameters": {
+ "bucket": {
+ "description": "Name of the bucket to advance the relocate for.",
+ "location": "path",
+ "required": true,
+ "type": "string"
+ },
+ "operationId": {
+ "description": "ID of the operation resource.",
+ "location": "path",
+ "required": true,
+ "type": "string"
+ }
+ },
+ "path": "b/{bucket}/operations/{operationId}/advanceRelocateBucket",
+ "request": {
+ "$ref": "AdvanceRelocateBucketOperationRequest"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control",
+ "https://www.googleapis.com/auth/devstorage.read_write"
+ ]
+ },
"cancel": {
"description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed.",
"httpMethod": "POST",
@@ -4136,9 +4256,26 @@
}
}
},
- "revision": "20240809",
+ "revision": "20241008",
"rootUrl": "https://storage.googleapis.com/",
"schemas": {
+ "AdvanceRelocateBucketOperationRequest": {
+ "description": "An AdvanceRelocateBucketOperation request.",
+ "id": "AdvanceRelocateBucketOperationRequest",
+ "properties": {
+ "expireTime": {
+ "description": "Specifies the time when the relocation will revert to the sync stage if the relocation hasn't succeeded.",
+ "format": "date-time",
+ "type": "string"
+ },
+ "ttl": {
+ "description": "Specifies the duration after which the relocation will revert to the sync stage if the relocation hasn't succeeded. Optional, if not supplied, a default value of 12h will be used.",
+ "format": "google-duration",
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
"AnywhereCache": {
"description": "An Anywhere Cache instance.",
"id": "AnywhereCache",
@@ -4349,6 +4486,11 @@
"format": "int64",
"type": "string"
},
+ "hardDeleteTime": {
+ "description": "The hard delete time of the bucket in RFC 3339 format.",
+ "format": "date-time",
+ "type": "string"
+ },
"hierarchicalNamespace": {
"description": "The bucket's hierarchical namespace configuration.",
"properties": {
@@ -4675,6 +4817,11 @@
},
"type": "object"
},
+ "softDeleteTime": {
+ "description": "The soft delete time of the bucket in RFC 3339 format.",
+ "format": "date-time",
+ "type": "string"
+ },
"storageClass": {
"description": "The bucket's default storage class, used whenever no storageClass is specified for a newly-created object. This defines how objects in the bucket are stored and determines the SLA and the cost of storage. Values include MULTI_REGIONAL, REGIONAL, STANDARD, NEARLINE, COLDLINE, ARCHIVE, and DURABLE_REDUCED_AVAILABILITY. If this value is not specified when the bucket is created, it will default to STANDARD. For more information, see [Storage Classes](https://cloud.google.com/storage/docs/storage-classes).",
"type": "string"
@@ -5587,6 +5734,10 @@
},
"type": "object"
},
+ "restoreToken": {
+ "description": "Restore token used to differentiate deleted objects with the same name and generation. This field is only returned for deleted objects in hierarchical namespace buckets.",
+ "type": "string"
+ },
"retention": {
"description": "A collection of object level retention parameters.",
"properties": {
@@ -5855,6 +6006,34 @@
},
"type": "object"
},
+ "RelocateBucketRequest": {
+ "description": "A Relocate Bucket request.",
+ "id": "RelocateBucketRequest",
+ "properties": {
+ "destinationCustomPlacementConfig": {
+ "description": "The bucket's new custom placement configuration if relocating to a Custom Dual Region.",
+ "properties": {
+ "dataLocations": {
+ "description": "The list of regional locations in which data is placed.",
+ "items": {
+ "type": "string"
+ },
+ "type": "array"
+ }
+ },
+ "type": "object"
+ },
+ "destinationLocation": {
+ "description": "The new location the bucket will be relocated to.",
+ "type": "string"
+ },
+ "validateOnly": {
+ "description": "If true, validate the operation, but do not actually relocate the bucket.",
+ "type": "boolean"
+ }
+ },
+ "type": "object"
+ },
"RewriteResponse": {
"description": "A rewrite response.",
"id": "RewriteResponse",
diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go
index 8a16d773485f3..2c11b2d8d61f7 100644
--- a/vendor/google.golang.org/api/storage/v1/storage-gen.go
+++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go
@@ -343,6 +343,34 @@ type ProjectsServiceAccountService struct {
s *Service
}
+// AdvanceRelocateBucketOperationRequest: An AdvanceRelocateBucketOperation
+// request.
+type AdvanceRelocateBucketOperationRequest struct {
+ // ExpireTime: Specifies the time when the relocation will revert to the sync
+ // stage if the relocation hasn't succeeded.
+ ExpireTime string `json:"expireTime,omitempty"`
+ // Ttl: Specifies the duration after which the relocation will revert to the
+ // sync stage if the relocation hasn't succeeded. Optional, if not supplied, a
+ // default value of 12h will be used.
+ Ttl string `json:"ttl,omitempty"`
+ // ForceSendFields is a list of field names (e.g. "ExpireTime") to
+ // unconditionally include in API requests. By default, fields with empty or
+ // default values are omitted from API requests. See
+ // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more
+ // details.
+ ForceSendFields []string `json:"-"`
+ // NullFields is a list of field names (e.g. "ExpireTime") to include in API
+ // requests with the JSON null value. By default, fields with empty values are
+ // omitted from API requests. See
+ // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details.
+ NullFields []string `json:"-"`
+}
+
+func (s AdvanceRelocateBucketOperationRequest) MarshalJSON() ([]byte, error) {
+ type NoMethod AdvanceRelocateBucketOperationRequest
+ return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
+}
+
// AnywhereCache: An Anywhere Cache instance.
type AnywhereCache struct {
// AdmissionPolicy: The cache-level entry admission policy.
@@ -463,6 +491,8 @@ type Bucket struct {
Etag string `json:"etag,omitempty"`
// Generation: The generation of this bucket.
Generation int64 `json:"generation,omitempty,string"`
+ // HardDeleteTime: The hard delete time of the bucket in RFC 3339 format.
+ HardDeleteTime string `json:"hardDeleteTime,omitempty"`
// HierarchicalNamespace: The bucket's hierarchical namespace configuration.
HierarchicalNamespace *BucketHierarchicalNamespace `json:"hierarchicalNamespace,omitempty"`
// IamConfiguration: The bucket's IAM configuration.
@@ -525,6 +555,8 @@ type Bucket struct {
// of time that soft-deleted objects will be retained, and cannot be
// permanently deleted.
SoftDeletePolicy *BucketSoftDeletePolicy `json:"softDeletePolicy,omitempty"`
+ // SoftDeleteTime: The soft delete time of the bucket in RFC 3339 format.
+ SoftDeleteTime string `json:"softDeleteTime,omitempty"`
// StorageClass: The bucket's default storage class, used whenever no
// storageClass is specified for a newly-created object. This defines how
// objects in the bucket are stored and determines the SLA and the cost of
@@ -2232,6 +2264,10 @@ type Object struct {
// Owner: The owner of the object. This will always be the uploader of the
// object.
Owner *ObjectOwner `json:"owner,omitempty"`
+ // RestoreToken: Restore token used to differentiate deleted objects with the
+ // same name and generation. This field is only returned for deleted objects in
+ // hierarchical namespace buckets.
+ RestoreToken string `json:"restoreToken,omitempty"`
// Retention: A collection of object level retention parameters.
Retention *ObjectRetention `json:"retention,omitempty"`
// RetentionExpirationTime: A server-determined value that specifies the
@@ -2635,6 +2671,59 @@ func (s PolicyBindings) MarshalJSON() ([]byte, error) {
return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
}
+// RelocateBucketRequest: A Relocate Bucket request.
+type RelocateBucketRequest struct {
+ // DestinationCustomPlacementConfig: The bucket's new custom placement
+ // configuration if relocating to a Custom Dual Region.
+ DestinationCustomPlacementConfig *RelocateBucketRequestDestinationCustomPlacementConfig `json:"destinationCustomPlacementConfig,omitempty"`
+ // DestinationLocation: The new location the bucket will be relocated to.
+ DestinationLocation string `json:"destinationLocation,omitempty"`
+ // ValidateOnly: If true, validate the operation, but do not actually relocate
+ // the bucket.
+ ValidateOnly bool `json:"validateOnly,omitempty"`
+ // ForceSendFields is a list of field names (e.g.
+ // "DestinationCustomPlacementConfig") to unconditionally include in API
+ // requests. By default, fields with empty or default values are omitted from
+ // API requests. See
+ // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more
+ // details.
+ ForceSendFields []string `json:"-"`
+ // NullFields is a list of field names (e.g.
+ // "DestinationCustomPlacementConfig") to include in API requests with the JSON
+ // null value. By default, fields with empty values are omitted from API
+ // requests. See https://pkg.go.dev/google.golang.org/api#hdr-NullFields for
+ // more details.
+ NullFields []string `json:"-"`
+}
+
+func (s RelocateBucketRequest) MarshalJSON() ([]byte, error) {
+ type NoMethod RelocateBucketRequest
+ return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
+}
+
+// RelocateBucketRequestDestinationCustomPlacementConfig: The bucket's new
+// custom placement configuration if relocating to a Custom Dual Region.
+type RelocateBucketRequestDestinationCustomPlacementConfig struct {
+ // DataLocations: The list of regional locations in which data is placed.
+ DataLocations []string `json:"dataLocations,omitempty"`
+ // ForceSendFields is a list of field names (e.g. "DataLocations") to
+ // unconditionally include in API requests. By default, fields with empty or
+ // default values are omitted from API requests. See
+ // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more
+ // details.
+ ForceSendFields []string `json:"-"`
+ // NullFields is a list of field names (e.g. "DataLocations") to include in API
+ // requests with the JSON null value. By default, fields with empty values are
+ // omitted from API requests. See
+ // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details.
+ NullFields []string `json:"-"`
+}
+
+func (s RelocateBucketRequestDestinationCustomPlacementConfig) MarshalJSON() ([]byte, error) {
+ type NoMethod RelocateBucketRequestDestinationCustomPlacementConfig
+ return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
+}
+
// RewriteResponse: A rewrite response.
type RewriteResponse struct {
// Done: true if the copy is finished; otherwise, false if the copy is in
@@ -5326,6 +5415,109 @@ func (c *BucketsPatchCall) Do(opts ...googleapi.CallOption) (*Bucket, error) {
return ret, nil
}
+type BucketsRelocateCall struct {
+ s *Service
+ bucket string
+ relocatebucketrequest *RelocateBucketRequest
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+ header_ http.Header
+}
+
+// Relocate: Initiates a long-running Relocate Bucket operation on the
+// specified bucket.
+//
+// - bucket: Name of the bucket to be moved.
+func (r *BucketsService) Relocate(bucket string, relocatebucketrequest *RelocateBucketRequest) *BucketsRelocateCall {
+ c := &BucketsRelocateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.bucket = bucket
+ c.relocatebucketrequest = relocatebucketrequest
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
+// details.
+func (c *BucketsRelocateCall) Fields(s ...googleapi.Field) *BucketsRelocateCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// Context sets the context to be used in this call's Do method.
+func (c *BucketsRelocateCall) Context(ctx context.Context) *BucketsRelocateCall {
+ c.ctx_ = ctx
+ return c
+}
+
+// Header returns a http.Header that can be modified by the caller to add
+// headers to the request.
+func (c *BucketsRelocateCall) Header() http.Header {
+ if c.header_ == nil {
+ c.header_ = make(http.Header)
+ }
+ return c.header_
+}
+
+func (c *BucketsRelocateCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_)
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.relocatebucketrequest)
+ if err != nil {
+ return nil, err
+ }
+ c.urlParams_.Set("alt", alt)
+ c.urlParams_.Set("prettyPrint", "false")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/relocate")
+ urls += "?" + c.urlParams_.Encode()
+ req, err := http.NewRequest("POST", urls, body)
+ if err != nil {
+ return nil, err
+ }
+ req.Header = reqHeaders
+ googleapi.Expand(req.URL, map[string]string{
+ "bucket": c.bucket,
+ })
+ return gensupport.SendRequest(c.ctx_, c.s.client, req)
+}
+
+// Do executes the "storage.buckets.relocate" call.
+// Any non-2xx status code is an error. Response headers are in either
+// *GoogleLongrunningOperation.ServerResponse.Header or (if a response was
+// returned at all) in error.(*googleapi.Error).Header. Use
+// googleapi.IsNotModified to check whether the returned error was because
+// http.StatusNotModified was returned.
+func (c *BucketsRelocateCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunningOperation, error) {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, gensupport.WrapError(&googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ })
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, gensupport.WrapError(err)
+ }
+ ret := &GoogleLongrunningOperation{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ target := &ret
+ if err := gensupport.DecodeResponse(target, res); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
type BucketsRestoreCall struct {
s *Service
bucket string
@@ -9957,6 +10149,17 @@ func (c *ObjectsGetCall) Projection(projection string) *ObjectsGetCall {
return c
}
+// RestoreToken sets the optional parameter "restoreToken": Restore token used
+// to differentiate soft-deleted objects with the same name and generation.
+// Only applicable for hierarchical namespace buckets and if softDeleted is set
+// to true. This parameter is optional, and is only required in the rare case
+// when there are multiple soft-deleted objects with the same name and
+// generation.
+func (c *ObjectsGetCall) RestoreToken(restoreToken string) *ObjectsGetCall {
+ c.urlParams_.Set("restoreToken", restoreToken)
+ return c
+}
+
// SoftDeleted sets the optional parameter "softDeleted": If true, only
// soft-deleted object versions will be listed. The default is false. For more
// information, see Soft Delete
@@ -11052,6 +11255,16 @@ func (c *ObjectsRestoreCall) Projection(projection string) *ObjectsRestoreCall {
return c
}
+// RestoreToken sets the optional parameter "restoreToken": Restore token used
+// to differentiate sof-deleted objects with the same name and generation. Only
+// applicable for hierarchical namespace buckets. This parameter is optional,
+// and is only required in the rare case when there are multiple soft-deleted
+// objects with the same name and generation.
+func (c *ObjectsRestoreCall) RestoreToken(restoreToken string) *ObjectsRestoreCall {
+ c.urlParams_.Set("restoreToken", restoreToken)
+ return c
+}
+
// UserProject sets the optional parameter "userProject": The project to be
// billed for this request. Required for Requester Pays buckets.
func (c *ObjectsRestoreCall) UserProject(userProject string) *ObjectsRestoreCall {
@@ -12070,6 +12283,92 @@ func (c *ObjectsWatchAllCall) Do(opts ...googleapi.CallOption) (*Channel, error)
return ret, nil
}
+type OperationsAdvanceRelocateBucketCall struct {
+ s *Service
+ bucket string
+ operationId string
+ advancerelocatebucketoperationrequest *AdvanceRelocateBucketOperationRequest
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+ header_ http.Header
+}
+
+// AdvanceRelocateBucket: Starts asynchronous advancement of the relocate
+// bucket operation in the case of required write downtime, to allow it to lock
+// the bucket at the source location, and proceed with the bucket location
+// swap. The server makes a best effort to advance the relocate bucket
+// operation, but success is not guaranteed.
+//
+// - bucket: Name of the bucket to advance the relocate for.
+// - operationId: ID of the operation resource.
+func (r *OperationsService) AdvanceRelocateBucket(bucket string, operationId string, advancerelocatebucketoperationrequest *AdvanceRelocateBucketOperationRequest) *OperationsAdvanceRelocateBucketCall {
+ c := &OperationsAdvanceRelocateBucketCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.bucket = bucket
+ c.operationId = operationId
+ c.advancerelocatebucketoperationrequest = advancerelocatebucketoperationrequest
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
+// details.
+func (c *OperationsAdvanceRelocateBucketCall) Fields(s ...googleapi.Field) *OperationsAdvanceRelocateBucketCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// Context sets the context to be used in this call's Do method.
+func (c *OperationsAdvanceRelocateBucketCall) Context(ctx context.Context) *OperationsAdvanceRelocateBucketCall {
+ c.ctx_ = ctx
+ return c
+}
+
+// Header returns a http.Header that can be modified by the caller to add
+// headers to the request.
+func (c *OperationsAdvanceRelocateBucketCall) Header() http.Header {
+ if c.header_ == nil {
+ c.header_ = make(http.Header)
+ }
+ return c.header_
+}
+
+func (c *OperationsAdvanceRelocateBucketCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_)
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.advancerelocatebucketoperationrequest)
+ if err != nil {
+ return nil, err
+ }
+ c.urlParams_.Set("alt", alt)
+ c.urlParams_.Set("prettyPrint", "false")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/operations/{operationId}/advanceRelocateBucket")
+ urls += "?" + c.urlParams_.Encode()
+ req, err := http.NewRequest("POST", urls, body)
+ if err != nil {
+ return nil, err
+ }
+ req.Header = reqHeaders
+ googleapi.Expand(req.URL, map[string]string{
+ "bucket": c.bucket,
+ "operationId": c.operationId,
+ })
+ return gensupport.SendRequest(c.ctx_, c.s.client, req)
+}
+
+// Do executes the "storage.buckets.operations.advanceRelocateBucket" call.
+func (c *OperationsAdvanceRelocateBucketCall) Do(opts ...googleapi.CallOption) error {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("json")
+ if err != nil {
+ return err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return gensupport.WrapError(err)
+ }
+ return nil
+}
+
type OperationsCancelCall struct {
s *Service
bucket string
diff --git a/vendor/google.golang.org/api/transport/grpc/dial.go b/vendor/google.golang.org/api/transport/grpc/dial.go
index d2a4f76645aa0..ff3539d898f05 100644
--- a/vendor/google.golang.org/api/transport/grpc/dial.go
+++ b/vendor/google.golang.org/api/transport/grpc/dial.go
@@ -247,6 +247,7 @@ func dialPoolNewAuth(ctx context.Context, secure bool, poolSize int, ds *interna
DefaultScopes: ds.DefaultScopes,
SkipValidation: skipValidation,
},
+ UniverseDomain: ds.UniverseDomain,
})
return pool, err
}
diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go
index 2e2b15c6e0c28..d5b213e0f08df 100644
--- a/vendor/google.golang.org/api/transport/http/dial.go
+++ b/vendor/google.golang.org/api/transport/http/dial.go
@@ -130,6 +130,7 @@ func newClientNewAuth(ctx context.Context, base http.RoundTripper, ds *internal.
DefaultScopes: ds.DefaultScopes,
SkipValidation: skipValidation,
},
+ UniverseDomain: ds.UniverseDomain,
})
if err != nil {
return nil, err
diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go
index fe19e8f97a711..aa69fb4d509ff 100644
--- a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go
@@ -719,6 +719,8 @@ type PythonSettings struct {
// Some settings.
Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"`
+ // Experimental features to be included during client library generation.
+ ExperimentalFeatures *PythonSettings_ExperimentalFeatures `protobuf:"bytes,2,opt,name=experimental_features,json=experimentalFeatures,proto3" json:"experimental_features,omitempty"`
}
func (x *PythonSettings) Reset() {
@@ -760,6 +762,13 @@ func (x *PythonSettings) GetCommon() *CommonLanguageSettings {
return nil
}
+func (x *PythonSettings) GetExperimentalFeatures() *PythonSettings_ExperimentalFeatures {
+ if x != nil {
+ return x.ExperimentalFeatures
+ }
+ return nil
+}
+
// Settings for Node client libraries.
type NodeSettings struct {
state protoimpl.MessageState
@@ -1114,6 +1123,60 @@ func (x *MethodSettings) GetAutoPopulatedFields() []string {
return nil
}
+// Experimental features to be included during client library generation.
+// These fields will be deprecated once the feature graduates and is enabled
+// by default.
+type PythonSettings_ExperimentalFeatures struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Enables generation of asynchronous REST clients if `rest` transport is
+ // enabled. By default, asynchronous REST clients will not be generated.
+ // This feature will be enabled by default 1 month after launching the
+ // feature in preview packages.
+ RestAsyncIoEnabled bool `protobuf:"varint,1,opt,name=rest_async_io_enabled,json=restAsyncIoEnabled,proto3" json:"rest_async_io_enabled,omitempty"`
+}
+
+func (x *PythonSettings_ExperimentalFeatures) Reset() {
+ *x = PythonSettings_ExperimentalFeatures{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_client_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *PythonSettings_ExperimentalFeatures) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PythonSettings_ExperimentalFeatures) ProtoMessage() {}
+
+func (x *PythonSettings_ExperimentalFeatures) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_client_proto_msgTypes[13]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PythonSettings_ExperimentalFeatures.ProtoReflect.Descriptor instead.
+func (*PythonSettings_ExperimentalFeatures) Descriptor() ([]byte, []int) {
+ return file_google_api_client_proto_rawDescGZIP(), []int{6, 0}
+}
+
+func (x *PythonSettings_ExperimentalFeatures) GetRestAsyncIoEnabled() bool {
+ if x != nil {
+ return x.RestAsyncIoEnabled
+ }
+ return false
+}
+
// Describes settings to use when generating API methods that use the
// long-running operation pattern.
// All default values below are from those used in the client library
@@ -1142,7 +1205,7 @@ type MethodSettings_LongRunning struct {
func (x *MethodSettings_LongRunning) Reset() {
*x = MethodSettings_LongRunning{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_api_client_proto_msgTypes[15]
+ mi := &file_google_api_client_proto_msgTypes[16]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1155,7 +1218,7 @@ func (x *MethodSettings_LongRunning) String() string {
func (*MethodSettings_LongRunning) ProtoMessage() {}
func (x *MethodSettings_LongRunning) ProtoReflect() protoreflect.Message {
- mi := &file_google_api_client_proto_msgTypes[15]
+ mi := &file_google_api_client_proto_msgTypes[16]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1460,132 +1523,143 @@ var file_google_api_client_proto_rawDesc = []byte{
0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43,
0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74,
- 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x4c, 0x0a,
- 0x0e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12,
- 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d,
- 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69,
- 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x4a, 0x0a, 0x0c, 0x4e,
- 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63,
- 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c,
- 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52,
- 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xae, 0x04, 0x0a, 0x0e, 0x44, 0x6f, 0x74, 0x6e,
- 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f,
+ 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xfd, 0x01,
+ 0x0a, 0x0e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73,
+ 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f,
+ 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74,
+ 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x64, 0x0a, 0x15,
+ 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x5f, 0x66, 0x65, 0x61,
+ 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53,
+ 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x45, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65,
+ 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x52, 0x14, 0x65, 0x78,
+ 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72,
+ 0x65, 0x73, 0x1a, 0x49, 0x0a, 0x14, 0x45, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74,
+ 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x31, 0x0a, 0x15, 0x72, 0x65,
+ 0x73, 0x74, 0x5f, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x6f, 0x5f, 0x65, 0x6e, 0x61, 0x62,
+ 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x72, 0x65, 0x73, 0x74, 0x41,
+ 0x73, 0x79, 0x6e, 0x63, 0x49, 0x6f, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x4a, 0x0a,
+ 0x0c, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a,
+ 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f,
+ 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67,
+ 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xae, 0x04, 0x0a, 0x0e, 0x44, 0x6f,
+ 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06,
+ 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e,
+ 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73,
+ 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x5a, 0x0a, 0x10, 0x72, 0x65, 0x6e, 0x61,
+ 0x6d, 0x65, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e,
+ 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52,
+ 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e,
+ 0x74, 0x72, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x73, 0x12, 0x5d, 0x0a, 0x11, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f,
+ 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74,
+ 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61,
+ 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72,
+ 0x79, 0x52, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72,
+ 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10,
+ 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73,
+ 0x12, 0x38, 0x0a, 0x18, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73,
+ 0x70, 0x61, 0x63, 0x65, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03,
+ 0x28, 0x09, 0x52, 0x16, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70,
+ 0x61, 0x63, 0x65, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x16, 0x68, 0x61,
+ 0x6e, 0x64, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74,
+ 0x75, 0x72, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x15, 0x68, 0x61, 0x6e, 0x64,
+ 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65,
+ 0x73, 0x1a, 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64,
+ 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10,
+ 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79,
+ 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4a, 0x0a, 0x0c, 0x52, 0x75,
+ 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f,
0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61,
0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06,
- 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x5a, 0x0a, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65,
- 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b,
- 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f,
- 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e,
- 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72,
- 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
- 0x65, 0x73, 0x12, 0x5d, 0x0a, 0x11, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x72, 0x65,
- 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65,
- 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65,
- 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52,
- 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
- 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73,
- 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x69, 0x67,
- 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x38,
- 0x0a, 0x18, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61,
- 0x63, 0x65, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09,
- 0x52, 0x16, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63,
- 0x65, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x16, 0x68, 0x61, 0x6e, 0x64,
- 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
- 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x15, 0x68, 0x61, 0x6e, 0x64, 0x77, 0x72,
- 0x69, 0x74, 0x74, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x1a,
- 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
- 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c,
- 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a,
- 0x02, 0x38, 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65,
- 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03,
- 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14,
- 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76,
- 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4a, 0x0a, 0x0c, 0x52, 0x75, 0x62, 0x79,
- 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d,
- 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67,
- 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f,
- 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x48, 0x0a, 0x0a, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e,
- 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e,
- 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65,
- 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xc2,
- 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67,
- 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x49, 0x0a,
- 0x0c, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69,
- 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e,
- 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0b, 0x6c, 0x6f, 0x6e,
- 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x61, 0x75, 0x74, 0x6f,
- 0x5f, 0x70, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64,
- 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x61, 0x75, 0x74, 0x6f, 0x50, 0x6f, 0x70,
- 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x94, 0x02, 0x0a,
- 0x0b, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x47, 0x0a, 0x12,
- 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c,
- 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x48, 0x0a, 0x0a, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74,
+ 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70,
+ 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65,
+ 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e,
+ 0x22, 0xc2, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69,
+ 0x6e, 0x67, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12,
+ 0x49, 0x0a, 0x0c, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61,
+ 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67,
+ 0x73, 0x2e, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0b, 0x6c,
+ 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x61, 0x75,
+ 0x74, 0x6f, 0x5f, 0x70, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65,
+ 0x6c, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x61, 0x75, 0x74, 0x6f, 0x50,
+ 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x94,
+ 0x02, 0x0a, 0x0b, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x47,
+ 0x0a, 0x12, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64,
+ 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, 0x6f,
+ 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c, 0x5f,
+ 0x64, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61,
+ 0x79, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e, 0x6d,
+ 0x61, 0x78, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c,
+ 0x6d, 0x61, 0x78, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a, 0x12,
+ 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f,
+ 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c,
- 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65,
- 0x6c, 0x61, 0x79, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x4d,
- 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e, 0x6d, 0x61, 0x78,
- 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6d, 0x61,
- 0x78, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a, 0x12, 0x74, 0x6f,
- 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74,
- 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54, 0x69, 0x6d, 0x65,
- 0x6f, 0x75, 0x74, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69,
- 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52,
- 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, 0x54, 0x49, 0x4f, 0x4e,
- 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09,
- 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x44, 0x53,
- 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, 0x10, 0x03, 0x12, 0x0f,
- 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10, 0x04, 0x12,
- 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x07, 0x0a,
- 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45, 0x4e, 0x45, 0x52, 0x41,
- 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07, 0x2a, 0x67, 0x0a, 0x18, 0x43, 0x6c, 0x69,
- 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f,
- 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x49, 0x4e, 0x41, 0x54,
- 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10,
- 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, 0x42, 0x10, 0x0a, 0x12, 0x13, 0x0a,
- 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x52,
- 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, 0x69, 0x67,
- 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x6d,
- 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x3a, 0x43,
- 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x1f,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18,
- 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x48,
- 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, 0x63, 0x6f,
- 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x61, 0x75,
- 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x3a, 0x44, 0x0a, 0x0b, 0x61, 0x70, 0x69, 0x5f,
- 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
- 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1, 0xba, 0xab, 0xfa, 0x01, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x69,
- 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69,
- 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a,
- 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f,
- 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x33,
+ 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54, 0x69,
+ 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74,
+ 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49,
+ 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, 0x54, 0x49,
+ 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00,
+ 0x12, 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41,
+ 0x44, 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, 0x10, 0x03,
+ 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10,
+ 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12,
+ 0x07, 0x0a, 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45, 0x4e, 0x45,
+ 0x52, 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07, 0x2a, 0x67, 0x0a, 0x18, 0x43,
+ 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74,
+ 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x4c, 0x49, 0x45, 0x4e,
+ 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x49, 0x4e,
+ 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45,
+ 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, 0x42, 0x10, 0x0a, 0x12,
+ 0x13, 0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47,
+ 0x45, 0x52, 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73,
+ 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f,
+ 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52,
+ 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65,
+ 0x3a, 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74,
+ 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c,
+ 0x74, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73,
+ 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f,
+ 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x3a, 0x44, 0x0a, 0x0b, 0x61, 0x70,
+ 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1, 0xba, 0xab, 0xfa, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
+ 0x42, 0x69, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61,
+ 0x70, 0x69, 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50,
+ 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
+ 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e,
+ 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x33,
}
var (
@@ -1601,34 +1675,35 @@ func file_google_api_client_proto_rawDescGZIP() []byte {
}
var file_google_api_client_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
-var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 16)
+var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 17)
var file_google_api_client_proto_goTypes = []interface{}{
- (ClientLibraryOrganization)(0), // 0: google.api.ClientLibraryOrganization
- (ClientLibraryDestination)(0), // 1: google.api.ClientLibraryDestination
- (*CommonLanguageSettings)(nil), // 2: google.api.CommonLanguageSettings
- (*ClientLibrarySettings)(nil), // 3: google.api.ClientLibrarySettings
- (*Publishing)(nil), // 4: google.api.Publishing
- (*JavaSettings)(nil), // 5: google.api.JavaSettings
- (*CppSettings)(nil), // 6: google.api.CppSettings
- (*PhpSettings)(nil), // 7: google.api.PhpSettings
- (*PythonSettings)(nil), // 8: google.api.PythonSettings
- (*NodeSettings)(nil), // 9: google.api.NodeSettings
- (*DotnetSettings)(nil), // 10: google.api.DotnetSettings
- (*RubySettings)(nil), // 11: google.api.RubySettings
- (*GoSettings)(nil), // 12: google.api.GoSettings
- (*MethodSettings)(nil), // 13: google.api.MethodSettings
- nil, // 14: google.api.JavaSettings.ServiceClassNamesEntry
- nil, // 15: google.api.DotnetSettings.RenamedServicesEntry
- nil, // 16: google.api.DotnetSettings.RenamedResourcesEntry
- (*MethodSettings_LongRunning)(nil), // 17: google.api.MethodSettings.LongRunning
- (api.LaunchStage)(0), // 18: google.api.LaunchStage
- (*durationpb.Duration)(nil), // 19: google.protobuf.Duration
- (*descriptorpb.MethodOptions)(nil), // 20: google.protobuf.MethodOptions
- (*descriptorpb.ServiceOptions)(nil), // 21: google.protobuf.ServiceOptions
+ (ClientLibraryOrganization)(0), // 0: google.api.ClientLibraryOrganization
+ (ClientLibraryDestination)(0), // 1: google.api.ClientLibraryDestination
+ (*CommonLanguageSettings)(nil), // 2: google.api.CommonLanguageSettings
+ (*ClientLibrarySettings)(nil), // 3: google.api.ClientLibrarySettings
+ (*Publishing)(nil), // 4: google.api.Publishing
+ (*JavaSettings)(nil), // 5: google.api.JavaSettings
+ (*CppSettings)(nil), // 6: google.api.CppSettings
+ (*PhpSettings)(nil), // 7: google.api.PhpSettings
+ (*PythonSettings)(nil), // 8: google.api.PythonSettings
+ (*NodeSettings)(nil), // 9: google.api.NodeSettings
+ (*DotnetSettings)(nil), // 10: google.api.DotnetSettings
+ (*RubySettings)(nil), // 11: google.api.RubySettings
+ (*GoSettings)(nil), // 12: google.api.GoSettings
+ (*MethodSettings)(nil), // 13: google.api.MethodSettings
+ nil, // 14: google.api.JavaSettings.ServiceClassNamesEntry
+ (*PythonSettings_ExperimentalFeatures)(nil), // 15: google.api.PythonSettings.ExperimentalFeatures
+ nil, // 16: google.api.DotnetSettings.RenamedServicesEntry
+ nil, // 17: google.api.DotnetSettings.RenamedResourcesEntry
+ (*MethodSettings_LongRunning)(nil), // 18: google.api.MethodSettings.LongRunning
+ (api.LaunchStage)(0), // 19: google.api.LaunchStage
+ (*durationpb.Duration)(nil), // 20: google.protobuf.Duration
+ (*descriptorpb.MethodOptions)(nil), // 21: google.protobuf.MethodOptions
+ (*descriptorpb.ServiceOptions)(nil), // 22: google.protobuf.ServiceOptions
}
var file_google_api_client_proto_depIdxs = []int32{
1, // 0: google.api.CommonLanguageSettings.destinations:type_name -> google.api.ClientLibraryDestination
- 18, // 1: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage
+ 19, // 1: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage
5, // 2: google.api.ClientLibrarySettings.java_settings:type_name -> google.api.JavaSettings
6, // 3: google.api.ClientLibrarySettings.cpp_settings:type_name -> google.api.CppSettings
7, // 4: google.api.ClientLibrarySettings.php_settings:type_name -> google.api.PhpSettings
@@ -1645,25 +1720,26 @@ var file_google_api_client_proto_depIdxs = []int32{
2, // 15: google.api.CppSettings.common:type_name -> google.api.CommonLanguageSettings
2, // 16: google.api.PhpSettings.common:type_name -> google.api.CommonLanguageSettings
2, // 17: google.api.PythonSettings.common:type_name -> google.api.CommonLanguageSettings
- 2, // 18: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings
- 2, // 19: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings
- 15, // 20: google.api.DotnetSettings.renamed_services:type_name -> google.api.DotnetSettings.RenamedServicesEntry
- 16, // 21: google.api.DotnetSettings.renamed_resources:type_name -> google.api.DotnetSettings.RenamedResourcesEntry
- 2, // 22: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings
- 2, // 23: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings
- 17, // 24: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning
- 19, // 25: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration
- 19, // 26: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration
- 19, // 27: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration
- 20, // 28: google.api.method_signature:extendee -> google.protobuf.MethodOptions
- 21, // 29: google.api.default_host:extendee -> google.protobuf.ServiceOptions
- 21, // 30: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions
- 21, // 31: google.api.api_version:extendee -> google.protobuf.ServiceOptions
- 32, // [32:32] is the sub-list for method output_type
- 32, // [32:32] is the sub-list for method input_type
- 32, // [32:32] is the sub-list for extension type_name
- 28, // [28:32] is the sub-list for extension extendee
- 0, // [0:28] is the sub-list for field type_name
+ 15, // 18: google.api.PythonSettings.experimental_features:type_name -> google.api.PythonSettings.ExperimentalFeatures
+ 2, // 19: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings
+ 2, // 20: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings
+ 16, // 21: google.api.DotnetSettings.renamed_services:type_name -> google.api.DotnetSettings.RenamedServicesEntry
+ 17, // 22: google.api.DotnetSettings.renamed_resources:type_name -> google.api.DotnetSettings.RenamedResourcesEntry
+ 2, // 23: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings
+ 2, // 24: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings
+ 18, // 25: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning
+ 20, // 26: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration
+ 20, // 27: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration
+ 20, // 28: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration
+ 21, // 29: google.api.method_signature:extendee -> google.protobuf.MethodOptions
+ 22, // 30: google.api.default_host:extendee -> google.protobuf.ServiceOptions
+ 22, // 31: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions
+ 22, // 32: google.api.api_version:extendee -> google.protobuf.ServiceOptions
+ 33, // [33:33] is the sub-list for method output_type
+ 33, // [33:33] is the sub-list for method input_type
+ 33, // [33:33] is the sub-list for extension type_name
+ 29, // [29:33] is the sub-list for extension extendee
+ 0, // [0:29] is the sub-list for field type_name
}
func init() { file_google_api_client_proto_init() }
@@ -1816,7 +1892,19 @@ func file_google_api_client_proto_init() {
return nil
}
}
- file_google_api_client_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
+ file_google_api_client_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*PythonSettings_ExperimentalFeatures); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_client_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*MethodSettings_LongRunning); i {
case 0:
return &v.state
@@ -1835,7 +1923,7 @@ func file_google_api_client_proto_init() {
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_google_api_client_proto_rawDesc,
NumEnums: 2,
- NumMessages: 16,
+ NumMessages: 17,
NumExtensions: 4,
NumServices: 0,
},
diff --git a/vendor/google.golang.org/grpc/MAINTAINERS.md b/vendor/google.golang.org/grpc/MAINTAINERS.md
index 6a8a07781ae34..5d4096d46a048 100644
--- a/vendor/google.golang.org/grpc/MAINTAINERS.md
+++ b/vendor/google.golang.org/grpc/MAINTAINERS.md
@@ -9,21 +9,28 @@ for general contribution guidelines.
## Maintainers (in alphabetical order)
+- [aranjans](https://github.com/aranjans), Google LLC
+- [arjan-bal](https://github.com/arjan-bal), Google LLC
+- [arvindbr8](https://github.com/arvindbr8), Google LLC
- [atollena](https://github.com/atollena), Datadog, Inc.
-- [cesarghali](https://github.com/cesarghali), Google LLC
- [dfawley](https://github.com/dfawley), Google LLC
- [easwars](https://github.com/easwars), Google LLC
-- [menghanl](https://github.com/menghanl), Google LLC
-- [srini100](https://github.com/srini100), Google LLC
+- [erm-g](https://github.com/erm-g), Google LLC
+- [gtcooke94](https://github.com/gtcooke94), Google LLC
+- [purnesh42h](https://github.com/purnesh42h), Google LLC
+- [zasweq](https://github.com/zasweq), Google LLC
## Emeritus Maintainers (in alphabetical order)
-- [adelez](https://github.com/adelez), Google LLC
-- [canguler](https://github.com/canguler), Google LLC
-- [iamqizhao](https://github.com/iamqizhao), Google LLC
-- [jadekler](https://github.com/jadekler), Google LLC
-- [jtattermusch](https://github.com/jtattermusch), Google LLC
-- [lyuxuan](https://github.com/lyuxuan), Google LLC
-- [makmukhi](https://github.com/makmukhi), Google LLC
-- [matt-kwong](https://github.com/matt-kwong), Google LLC
-- [nicolasnoble](https://github.com/nicolasnoble), Google LLC
-- [yongni](https://github.com/yongni), Google LLC
+- [adelez](https://github.com/adelez)
+- [canguler](https://github.com/canguler)
+- [cesarghali](https://github.com/cesarghali)
+- [iamqizhao](https://github.com/iamqizhao)
+- [jeanbza](https://github.com/jeanbza)
+- [jtattermusch](https://github.com/jtattermusch)
+- [lyuxuan](https://github.com/lyuxuan)
+- [makmukhi](https://github.com/makmukhi)
+- [matt-kwong](https://github.com/matt-kwong)
+- [menghanl](https://github.com/menghanl)
+- [nicolasnoble](https://github.com/nicolasnoble)
+- [srini100](https://github.com/srini100)
+- [yongni](https://github.com/yongni)
diff --git a/vendor/google.golang.org/grpc/SECURITY.md b/vendor/google.golang.org/grpc/SECURITY.md
index be6e108705c48..abab279379ba8 100644
--- a/vendor/google.golang.org/grpc/SECURITY.md
+++ b/vendor/google.golang.org/grpc/SECURITY.md
@@ -1,3 +1,3 @@
# Security Policy
-For information on gRPC Security Policy and reporting potentional security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md).
+For information on gRPC Security Policy and reporting potential security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md).
diff --git a/vendor/google.golang.org/grpc/backoff/backoff.go b/vendor/google.golang.org/grpc/backoff/backoff.go
index 0787d0b50ce94..d7b40b7cb66f1 100644
--- a/vendor/google.golang.org/grpc/backoff/backoff.go
+++ b/vendor/google.golang.org/grpc/backoff/backoff.go
@@ -39,7 +39,7 @@ type Config struct {
MaxDelay time.Duration
}
-// DefaultConfig is a backoff configuration with the default values specfied
+// DefaultConfig is a backoff configuration with the default values specified
// at https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
//
// This should be useful for callers who want to configure backoff with
diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go
index f391744f7299b..b181f386a1ba6 100644
--- a/vendor/google.golang.org/grpc/balancer/balancer.go
+++ b/vendor/google.golang.org/grpc/balancer/balancer.go
@@ -30,6 +30,7 @@ import (
"google.golang.org/grpc/channelz"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/credentials"
+ estats "google.golang.org/grpc/experimental/stats"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal"
"google.golang.org/grpc/metadata"
@@ -72,8 +73,21 @@ func unregisterForTesting(name string) {
delete(m, name)
}
+// connectedAddress returns the connected address for a SubConnState. The
+// address is only valid if the state is READY.
+func connectedAddress(scs SubConnState) resolver.Address {
+ return scs.connectedAddress
+}
+
+// setConnectedAddress sets the connected address for a SubConnState.
+func setConnectedAddress(scs *SubConnState, addr resolver.Address) {
+ scs.connectedAddress = addr
+}
+
func init() {
internal.BalancerUnregister = unregisterForTesting
+ internal.ConnectedAddress = connectedAddress
+ internal.SetConnectedAddress = setConnectedAddress
}
// Get returns the resolver builder registered with the given name.
@@ -243,6 +257,10 @@ type BuildOptions struct {
// same resolver.Target as passed to the resolver. See the documentation for
// the resolver.Target type for details about what it contains.
Target resolver.Target
+ // MetricsRecorder is the metrics recorder that balancers can use to record
+ // metrics. Balancer implementations which do not register metrics on
+ // metrics registry and record on them can ignore this field.
+ MetricsRecorder estats.MetricsRecorder
}
// Builder creates a balancer.
@@ -410,6 +428,9 @@ type SubConnState struct {
// ConnectionError is set if the ConnectivityState is TransientFailure,
// describing the reason the SubConn failed. Otherwise, it is nil.
ConnectionError error
+ // connectedAddr contains the connected address when ConnectivityState is
+ // Ready. Otherwise, it is indeterminate.
+ connectedAddress resolver.Address
}
// ClientConnState describes the state of a ClientConn relevant to the
diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go
index a7f1eeec8e6ae..2b87bd79c7573 100644
--- a/vendor/google.golang.org/grpc/balancer/base/balancer.go
+++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go
@@ -36,7 +36,7 @@ type baseBuilder struct {
config Config
}
-func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
+func (bb *baseBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer {
bal := &baseBalancer{
cc: cc,
pickerBuilder: bb.pickerBuilder,
@@ -259,6 +259,6 @@ type errPicker struct {
err error // Pick() always returns this err.
}
-func (p *errPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
+func (p *errPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
return balancer.PickResult{}, p.err
}
diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go
index 0adc98866c081..52f54e6a016ca 100644
--- a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go
+++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go
@@ -19,8 +19,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.34.1
-// protoc v4.25.2
+// protoc-gen-go v1.34.2
+// protoc v5.27.1
// source: grpc/lb/v1/load_balancer.proto
package grpc_lb_v1
@@ -780,7 +780,7 @@ func file_grpc_lb_v1_load_balancer_proto_rawDescGZIP() []byte {
}
var file_grpc_lb_v1_load_balancer_proto_msgTypes = make([]protoimpl.MessageInfo, 9)
-var file_grpc_lb_v1_load_balancer_proto_goTypes = []interface{}{
+var file_grpc_lb_v1_load_balancer_proto_goTypes = []any{
(*LoadBalanceRequest)(nil), // 0: grpc.lb.v1.LoadBalanceRequest
(*InitialLoadBalanceRequest)(nil), // 1: grpc.lb.v1.InitialLoadBalanceRequest
(*ClientStatsPerToken)(nil), // 2: grpc.lb.v1.ClientStatsPerToken
@@ -818,7 +818,7 @@ func file_grpc_lb_v1_load_balancer_proto_init() {
return
}
if !protoimpl.UnsafeEnabled {
- file_grpc_lb_v1_load_balancer_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_lb_v1_load_balancer_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*LoadBalanceRequest); i {
case 0:
return &v.state
@@ -830,7 +830,7 @@ func file_grpc_lb_v1_load_balancer_proto_init() {
return nil
}
}
- file_grpc_lb_v1_load_balancer_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_lb_v1_load_balancer_proto_msgTypes[1].Exporter = func(v any, i int) any {
switch v := v.(*InitialLoadBalanceRequest); i {
case 0:
return &v.state
@@ -842,7 +842,7 @@ func file_grpc_lb_v1_load_balancer_proto_init() {
return nil
}
}
- file_grpc_lb_v1_load_balancer_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_lb_v1_load_balancer_proto_msgTypes[2].Exporter = func(v any, i int) any {
switch v := v.(*ClientStatsPerToken); i {
case 0:
return &v.state
@@ -854,7 +854,7 @@ func file_grpc_lb_v1_load_balancer_proto_init() {
return nil
}
}
- file_grpc_lb_v1_load_balancer_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_lb_v1_load_balancer_proto_msgTypes[3].Exporter = func(v any, i int) any {
switch v := v.(*ClientStats); i {
case 0:
return &v.state
@@ -866,7 +866,7 @@ func file_grpc_lb_v1_load_balancer_proto_init() {
return nil
}
}
- file_grpc_lb_v1_load_balancer_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_lb_v1_load_balancer_proto_msgTypes[4].Exporter = func(v any, i int) any {
switch v := v.(*LoadBalanceResponse); i {
case 0:
return &v.state
@@ -878,7 +878,7 @@ func file_grpc_lb_v1_load_balancer_proto_init() {
return nil
}
}
- file_grpc_lb_v1_load_balancer_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_lb_v1_load_balancer_proto_msgTypes[5].Exporter = func(v any, i int) any {
switch v := v.(*FallbackResponse); i {
case 0:
return &v.state
@@ -890,7 +890,7 @@ func file_grpc_lb_v1_load_balancer_proto_init() {
return nil
}
}
- file_grpc_lb_v1_load_balancer_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_lb_v1_load_balancer_proto_msgTypes[6].Exporter = func(v any, i int) any {
switch v := v.(*InitialLoadBalanceResponse); i {
case 0:
return &v.state
@@ -902,7 +902,7 @@ func file_grpc_lb_v1_load_balancer_proto_init() {
return nil
}
}
- file_grpc_lb_v1_load_balancer_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_lb_v1_load_balancer_proto_msgTypes[7].Exporter = func(v any, i int) any {
switch v := v.(*ServerList); i {
case 0:
return &v.state
@@ -914,7 +914,7 @@ func file_grpc_lb_v1_load_balancer_proto_init() {
return nil
}
}
- file_grpc_lb_v1_load_balancer_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_lb_v1_load_balancer_proto_msgTypes[8].Exporter = func(v any, i int) any {
switch v := v.(*Server); i {
case 0:
return &v.state
@@ -927,11 +927,11 @@ func file_grpc_lb_v1_load_balancer_proto_init() {
}
}
}
- file_grpc_lb_v1_load_balancer_proto_msgTypes[0].OneofWrappers = []interface{}{
+ file_grpc_lb_v1_load_balancer_proto_msgTypes[0].OneofWrappers = []any{
(*LoadBalanceRequest_InitialRequest)(nil),
(*LoadBalanceRequest_ClientStats)(nil),
}
- file_grpc_lb_v1_load_balancer_proto_msgTypes[4].OneofWrappers = []interface{}{
+ file_grpc_lb_v1_load_balancer_proto_msgTypes[4].OneofWrappers = []any{
(*LoadBalanceResponse_InitialResponse)(nil),
(*LoadBalanceResponse_ServerList)(nil),
(*LoadBalanceResponse_FallbackResponse)(nil),
diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go
index 57a792a7b4887..84e6a25056b9b 100644
--- a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go
+++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go
@@ -19,8 +19,8 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
-// - protoc-gen-go-grpc v1.4.0
-// - protoc v4.25.2
+// - protoc-gen-go-grpc v1.5.1
+// - protoc v5.27.1
// source: grpc/lb/v1/load_balancer.proto
package grpc_lb_v1
@@ -34,8 +34,8 @@ import (
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
-// Requires gRPC-Go v1.62.0 or later.
-const _ = grpc.SupportPackageIsVersion8
+// Requires gRPC-Go v1.64.0 or later.
+const _ = grpc.SupportPackageIsVersion9
const (
LoadBalancer_BalanceLoad_FullMethodName = "/grpc.lb.v1.LoadBalancer/BalanceLoad"
@@ -46,7 +46,7 @@ const (
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type LoadBalancerClient interface {
// Bidirectional rpc to get a list of servers.
- BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (LoadBalancer_BalanceLoadClient, error)
+ BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[LoadBalanceRequest, LoadBalanceResponse], error)
}
type loadBalancerClient struct {
@@ -57,53 +57,38 @@ func NewLoadBalancerClient(cc grpc.ClientConnInterface) LoadBalancerClient {
return &loadBalancerClient{cc}
}
-func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (LoadBalancer_BalanceLoadClient, error) {
+func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[LoadBalanceRequest, LoadBalanceResponse], error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
stream, err := c.cc.NewStream(ctx, &LoadBalancer_ServiceDesc.Streams[0], LoadBalancer_BalanceLoad_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
- x := &loadBalancerBalanceLoadClient{ClientStream: stream}
+ x := &grpc.GenericClientStream[LoadBalanceRequest, LoadBalanceResponse]{ClientStream: stream}
return x, nil
}
-type LoadBalancer_BalanceLoadClient interface {
- Send(*LoadBalanceRequest) error
- Recv() (*LoadBalanceResponse, error)
- grpc.ClientStream
-}
-
-type loadBalancerBalanceLoadClient struct {
- grpc.ClientStream
-}
-
-func (x *loadBalancerBalanceLoadClient) Send(m *LoadBalanceRequest) error {
- return x.ClientStream.SendMsg(m)
-}
-
-func (x *loadBalancerBalanceLoadClient) Recv() (*LoadBalanceResponse, error) {
- m := new(LoadBalanceResponse)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
+// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
+type LoadBalancer_BalanceLoadClient = grpc.BidiStreamingClient[LoadBalanceRequest, LoadBalanceResponse]
// LoadBalancerServer is the server API for LoadBalancer service.
// All implementations should embed UnimplementedLoadBalancerServer
-// for forward compatibility
+// for forward compatibility.
type LoadBalancerServer interface {
// Bidirectional rpc to get a list of servers.
- BalanceLoad(LoadBalancer_BalanceLoadServer) error
+ BalanceLoad(grpc.BidiStreamingServer[LoadBalanceRequest, LoadBalanceResponse]) error
}
-// UnimplementedLoadBalancerServer should be embedded to have forward compatible implementations.
-type UnimplementedLoadBalancerServer struct {
-}
+// UnimplementedLoadBalancerServer should be embedded to have
+// forward compatible implementations.
+//
+// NOTE: this should be embedded by value instead of pointer to avoid a nil
+// pointer dereference when methods are called.
+type UnimplementedLoadBalancerServer struct{}
-func (UnimplementedLoadBalancerServer) BalanceLoad(LoadBalancer_BalanceLoadServer) error {
+func (UnimplementedLoadBalancerServer) BalanceLoad(grpc.BidiStreamingServer[LoadBalanceRequest, LoadBalanceResponse]) error {
return status.Errorf(codes.Unimplemented, "method BalanceLoad not implemented")
}
+func (UnimplementedLoadBalancerServer) testEmbeddedByValue() {}
// UnsafeLoadBalancerServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to LoadBalancerServer will
@@ -113,34 +98,22 @@ type UnsafeLoadBalancerServer interface {
}
func RegisterLoadBalancerServer(s grpc.ServiceRegistrar, srv LoadBalancerServer) {
+ // If the following call panics, it indicates UnimplementedLoadBalancerServer was
+ // embedded by pointer and is nil. This will cause panics if an
+ // unimplemented method is ever invoked, so we test this at initialization
+ // time to prevent it from happening at runtime later due to I/O.
+ if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
+ t.testEmbeddedByValue()
+ }
s.RegisterService(&LoadBalancer_ServiceDesc, srv)
}
func _LoadBalancer_BalanceLoad_Handler(srv interface{}, stream grpc.ServerStream) error {
- return srv.(LoadBalancerServer).BalanceLoad(&loadBalancerBalanceLoadServer{ServerStream: stream})
-}
-
-type LoadBalancer_BalanceLoadServer interface {
- Send(*LoadBalanceResponse) error
- Recv() (*LoadBalanceRequest, error)
- grpc.ServerStream
-}
-
-type loadBalancerBalanceLoadServer struct {
- grpc.ServerStream
+ return srv.(LoadBalancerServer).BalanceLoad(&grpc.GenericServerStream[LoadBalanceRequest, LoadBalanceResponse]{ServerStream: stream})
}
-func (x *loadBalancerBalanceLoadServer) Send(m *LoadBalanceResponse) error {
- return x.ServerStream.SendMsg(m)
-}
-
-func (x *loadBalancerBalanceLoadServer) Recv() (*LoadBalanceRequest, error) {
- m := new(LoadBalanceRequest)
- if err := x.ServerStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
+// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
+type LoadBalancer_BalanceLoadServer = grpc.BidiStreamingServer[LoadBalanceRequest, LoadBalanceResponse]
// LoadBalancer_ServiceDesc is the grpc.ServiceDesc for LoadBalancer service.
// It's only intended for direct use with grpc.RegisterService,
diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go
index 47a3e938dcf56..c098762741312 100644
--- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go
+++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go
@@ -219,7 +219,7 @@ type lbBalancer struct {
// All backends addresses, with metadata set to nil. This list contains all
// backend addresses in the same order and with the same duplicates as in
// serverlist. When generating picker, a SubConn slice with the same order
- // but with only READY SCs will be gerenated.
+ // but with only READY SCs will be generated.
backendAddrsWithoutMetadata []resolver.Address
// Roundrobin functionalities.
state connectivity.State
diff --git a/vendor/google.golang.org/grpc/balancer/leastrequest/leastrequest.go b/vendor/google.golang.org/grpc/balancer/leastrequest/leastrequest.go
index c248a3a83c328..ddd9bd269bf41 100644
--- a/vendor/google.golang.org/grpc/balancer/leastrequest/leastrequest.go
+++ b/vendor/google.golang.org/grpc/balancer/leastrequest/leastrequest.go
@@ -112,7 +112,9 @@ type scWithRPCCount struct {
}
func (lrb *leastRequestBalancer) Build(info base.PickerBuildInfo) balancer.Picker {
- logger.Infof("least-request: Build called with info: %v", info)
+ if logger.V(2) {
+ logger.Infof("least-request: Build called with info: %v", info)
+ }
if len(info.ReadySCs) == 0 {
return base.NewErrPicker(balancer.ErrNoSubConnAvailable)
}
diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go
index 07527603f1d4e..4d69b4052f8e3 100644
--- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go
+++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go
@@ -50,7 +50,7 @@ const (
type pickfirstBuilder struct{}
-func (pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
+func (pickfirstBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer {
b := &pickfirstBalancer{cc: cc}
b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b))
return b
@@ -155,7 +155,7 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState
// Endpoints not set, process addresses until we migrate resolver
// emissions fully to Endpoints. The top channel does wrap emitted
// addresses with endpoints, however some balancers such as weighted
- // target do not forwarrd the corresponding correct endpoints down/split
+ // target do not forward the corresponding correct endpoints down/split
// endpoints properly. Once all balancers correctly forward endpoints
// down, can delete this else conditional.
addrs = state.ResolverState.Addresses
diff --git a/vendor/google.golang.org/grpc/balancer/rls/balancer.go b/vendor/google.golang.org/grpc/balancer/rls/balancer.go
index 3ac28271618bb..5ae4d2e131670 100644
--- a/vendor/google.golang.org/grpc/balancer/rls/balancer.go
+++ b/vendor/google.golang.org/grpc/balancer/rls/balancer.go
@@ -30,6 +30,7 @@ import (
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/connectivity"
+ estats "google.golang.org/grpc/experimental/stats"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal"
"google.golang.org/grpc/internal/backoff"
@@ -77,6 +78,42 @@ var (
clientConnUpdateHook = func() {}
dataCachePurgeHook = func() {}
resetBackoffHook = func() {}
+
+ cacheEntriesMetric = estats.RegisterInt64Gauge(estats.MetricDescriptor{
+ Name: "grpc.lb.rls.cache_entries",
+ Description: "EXPERIMENTAL. Number of entries in the RLS cache.",
+ Unit: "entry",
+ Labels: []string{"grpc.target", "grpc.lb.rls.server_target", "grpc.lb.rls.instance_uuid"},
+ Default: false,
+ })
+ cacheSizeMetric = estats.RegisterInt64Gauge(estats.MetricDescriptor{
+ Name: "grpc.lb.rls.cache_size",
+ Description: "EXPERIMENTAL. The current size of the RLS cache.",
+ Unit: "By",
+ Labels: []string{"grpc.target", "grpc.lb.rls.server_target", "grpc.lb.rls.instance_uuid"},
+ Default: false,
+ })
+ defaultTargetPicksMetric = estats.RegisterInt64Count(estats.MetricDescriptor{
+ Name: "grpc.lb.rls.default_target_picks",
+ Description: "EXPERIMENTAL. Number of LB picks sent to the default target.",
+ Unit: "pick",
+ Labels: []string{"grpc.target", "grpc.lb.rls.server_target", "grpc.lb.rls.data_plane_target", "grpc.lb.pick_result"},
+ Default: false,
+ })
+ targetPicksMetric = estats.RegisterInt64Count(estats.MetricDescriptor{
+ Name: "grpc.lb.rls.target_picks",
+ Description: "EXPERIMENTAL. Number of LB picks sent to each RLS target. Note that if the default target is also returned by the RLS server, RPCs sent to that target from the cache will be counted in this metric, not in grpc.rls.default_target_picks.",
+ Unit: "pick",
+ Labels: []string{"grpc.target", "grpc.lb.rls.server_target", "grpc.lb.rls.data_plane_target", "grpc.lb.pick_result"},
+ Default: false,
+ })
+ failedPicksMetric = estats.RegisterInt64Count(estats.MetricDescriptor{
+ Name: "grpc.lb.rls.failed_picks",
+ Description: "EXPERIMENTAL. Number of LB picks failed due to either a failed RLS request or the RLS channel being throttled.",
+ Unit: "pick",
+ Labels: []string{"grpc.target", "grpc.lb.rls.server_target"},
+ Default: false,
+ })
)
func init() {
@@ -103,7 +140,7 @@ func (rlsBB) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.
updateCh: buffer.NewUnbounded(),
}
lb.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[rls-experimental-lb %p] ", lb))
- lb.dataCache = newDataCache(maxCacheSize, lb.logger)
+ lb.dataCache = newDataCache(maxCacheSize, lb.logger, opts.MetricsRecorder, opts.Target.String())
lb.bg = balancergroup.New(balancergroup.Options{
CC: cc,
BuildOpts: opts,
@@ -285,27 +322,27 @@ func (b *rlsBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error
// Update the copy of the config in the LB policy before releasing the lock.
b.lbCfg = newCfg
-
- // Enqueue an event which will notify us when the above update has been
- // propagated to all child policies, and the child policies have all
- // processed their updates, and we have sent a picker update.
- done := make(chan struct{})
- b.updateCh.Put(resumePickerUpdates{done: done})
b.stateMu.Unlock()
- <-done
+ // We cannot do cache operations above because `cacheMu` needs to be grabbed
+ // before `stateMu` if we are to hold both locks at the same time.
+ b.cacheMu.Lock()
+ b.dataCache.updateRLSServerTarget(newCfg.lookupService)
if resizeCache {
// If the new config changes reduces the size of the data cache, we
// might have to evict entries to get the cache size down to the newly
- // specified size.
- //
- // And we cannot do this operation above (where we compute the
- // `resizeCache` boolean) because `cacheMu` needs to be grabbed before
- // `stateMu` if we are to hold both locks at the same time.
- b.cacheMu.Lock()
+ // specified size. If we do evict an entry with valid backoff timer,
+ // the new picker needs to be sent to the channel to re-process any
+ // RPCs queued as a result of this backoff timer.
b.dataCache.resize(newCfg.cacheSizeBytes)
- b.cacheMu.Unlock()
}
+ b.cacheMu.Unlock()
+ // Enqueue an event which will notify us when the above update has been
+ // propagated to all child policies, and the child policies have all
+ // processed their updates, and we have sent a picker update.
+ done := make(chan struct{})
+ b.updateCh.Put(resumePickerUpdates{done: done})
+ <-done
return nil
}
@@ -490,15 +527,19 @@ func (b *rlsBalancer) sendNewPickerLocked() {
if b.defaultPolicy != nil {
b.defaultPolicy.acquireRef()
}
+
picker := &rlsPicker{
- kbm: b.lbCfg.kbMap,
- origEndpoint: b.bopts.Target.Endpoint(),
- lb: b,
- defaultPolicy: b.defaultPolicy,
- ctrlCh: b.ctrlCh,
- maxAge: b.lbCfg.maxAge,
- staleAge: b.lbCfg.staleAge,
- bg: b.bg,
+ kbm: b.lbCfg.kbMap,
+ origEndpoint: b.bopts.Target.Endpoint(),
+ lb: b,
+ defaultPolicy: b.defaultPolicy,
+ ctrlCh: b.ctrlCh,
+ maxAge: b.lbCfg.maxAge,
+ staleAge: b.lbCfg.staleAge,
+ bg: b.bg,
+ rlsServerTarget: b.lbCfg.lookupService,
+ grpcTarget: b.bopts.Target.String(),
+ metricsRecorder: b.bopts.MetricsRecorder,
}
picker.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[rls-picker %p] ", picker))
state := balancer.State{
diff --git a/vendor/google.golang.org/grpc/balancer/rls/cache.go b/vendor/google.golang.org/grpc/balancer/rls/cache.go
index d7a6a1a436c6f..7fe796c9587a4 100644
--- a/vendor/google.golang.org/grpc/balancer/rls/cache.go
+++ b/vendor/google.golang.org/grpc/balancer/rls/cache.go
@@ -22,6 +22,8 @@ import (
"container/list"
"time"
+ "github.com/google/uuid"
+ estats "google.golang.org/grpc/experimental/stats"
"google.golang.org/grpc/internal/backoff"
internalgrpclog "google.golang.org/grpc/internal/grpclog"
"google.golang.org/grpc/internal/grpcsync"
@@ -47,7 +49,7 @@ type cacheEntry struct {
// headerData is received in the RLS response and is to be sent in the
// X-Google-RLS-Data header for matching RPCs.
headerData string
- // expiryTime is the absolute time at which this cache entry entry stops
+ // expiryTime is the absolute time at which this cache entry stops
// being valid. When an RLS request succeeds, this is set to the current
// time plus the max_age field from the LB policy config.
expiryTime time.Time
@@ -163,24 +165,39 @@ func (l *lru) getLeastRecentlyUsed() cacheKey {
//
// It is not safe for concurrent access.
type dataCache struct {
- maxSize int64 // Maximum allowed size.
- currentSize int64 // Current size.
- keys *lru // Cache keys maintained in lru order.
- entries map[cacheKey]*cacheEntry
- logger *internalgrpclog.PrefixLogger
- shutdown *grpcsync.Event
+ maxSize int64 // Maximum allowed size.
+ currentSize int64 // Current size.
+ keys *lru // Cache keys maintained in lru order.
+ entries map[cacheKey]*cacheEntry
+ logger *internalgrpclog.PrefixLogger
+ shutdown *grpcsync.Event
+ rlsServerTarget string
+
+ // Read only after initialization.
+ grpcTarget string
+ uuid string
+ metricsRecorder estats.MetricsRecorder
}
-func newDataCache(size int64, logger *internalgrpclog.PrefixLogger) *dataCache {
+func newDataCache(size int64, logger *internalgrpclog.PrefixLogger, metricsRecorder estats.MetricsRecorder, grpcTarget string) *dataCache {
return &dataCache{
- maxSize: size,
- keys: newLRU(),
- entries: make(map[cacheKey]*cacheEntry),
- logger: logger,
- shutdown: grpcsync.NewEvent(),
+ maxSize: size,
+ keys: newLRU(),
+ entries: make(map[cacheKey]*cacheEntry),
+ logger: logger,
+ shutdown: grpcsync.NewEvent(),
+ grpcTarget: grpcTarget,
+ uuid: uuid.New().String(),
+ metricsRecorder: metricsRecorder,
}
}
+// updateRLSServerTarget updates the RLS Server Target the RLS Balancer is
+// configured with.
+func (dc *dataCache) updateRLSServerTarget(rlsServerTarget string) {
+ dc.rlsServerTarget = rlsServerTarget
+}
+
// resize changes the maximum allowed size of the data cache.
//
// The return value indicates if an entry with a valid backoff timer was
@@ -223,7 +240,7 @@ func (dc *dataCache) resize(size int64) (backoffCancelled bool) {
backoffCancelled = true
}
}
- dc.deleteAndcleanup(key, entry)
+ dc.deleteAndCleanup(key, entry)
}
dc.maxSize = size
return backoffCancelled
@@ -249,7 +266,7 @@ func (dc *dataCache) evictExpiredEntries() bool {
if entry.expiryTime.After(now) || entry.backoffExpiryTime.After(now) {
continue
}
- dc.deleteAndcleanup(key, entry)
+ dc.deleteAndCleanup(key, entry)
evicted = true
}
return evicted
@@ -310,6 +327,8 @@ func (dc *dataCache) addEntry(key cacheKey, entry *cacheEntry) (backoffCancelled
if dc.currentSize > dc.maxSize {
backoffCancelled = dc.resize(dc.maxSize)
}
+ cacheSizeMetric.Record(dc.metricsRecorder, dc.currentSize, dc.grpcTarget, dc.rlsServerTarget, dc.uuid)
+ cacheEntriesMetric.Record(dc.metricsRecorder, int64(len(dc.entries)), dc.grpcTarget, dc.rlsServerTarget, dc.uuid)
return backoffCancelled, true
}
@@ -319,6 +338,7 @@ func (dc *dataCache) updateEntrySize(entry *cacheEntry, newSize int64) {
dc.currentSize -= entry.size
entry.size = newSize
dc.currentSize += entry.size
+ cacheSizeMetric.Record(dc.metricsRecorder, dc.currentSize, dc.grpcTarget, dc.rlsServerTarget, dc.uuid)
}
func (dc *dataCache) getEntry(key cacheKey) *cacheEntry {
@@ -339,7 +359,7 @@ func (dc *dataCache) removeEntryForTesting(key cacheKey) {
if !ok {
return
}
- dc.deleteAndcleanup(key, entry)
+ dc.deleteAndCleanup(key, entry)
}
// deleteAndCleanup performs actions required at the time of deleting an entry
@@ -347,15 +367,17 @@ func (dc *dataCache) removeEntryForTesting(key cacheKey) {
// - the entry is removed from the map of entries
// - current size of the data cache is update
// - the key is removed from the LRU
-func (dc *dataCache) deleteAndcleanup(key cacheKey, entry *cacheEntry) {
+func (dc *dataCache) deleteAndCleanup(key cacheKey, entry *cacheEntry) {
delete(dc.entries, key)
dc.currentSize -= entry.size
dc.keys.removeEntry(key)
+ cacheSizeMetric.Record(dc.metricsRecorder, dc.currentSize, dc.grpcTarget, dc.rlsServerTarget, dc.uuid)
+ cacheEntriesMetric.Record(dc.metricsRecorder, int64(len(dc.entries)), dc.grpcTarget, dc.rlsServerTarget, dc.uuid)
}
func (dc *dataCache) stop() {
for key, entry := range dc.entries {
- dc.deleteAndcleanup(key, entry)
+ dc.deleteAndCleanup(key, entry)
}
dc.shutdown.Fire()
}
diff --git a/vendor/google.golang.org/grpc/balancer/rls/internal/adaptive/lookback.go b/vendor/google.golang.org/grpc/balancer/rls/internal/adaptive/lookback.go
index 13b316b7fa233..1ab874c356fef 100644
--- a/vendor/google.golang.org/grpc/balancer/rls/internal/adaptive/lookback.go
+++ b/vendor/google.golang.org/grpc/balancer/rls/internal/adaptive/lookback.go
@@ -82,10 +82,3 @@ func (l *lookback) advance(t time.Time) int64 {
l.head = nh
return nh
}
-
-func min(x int64, y int64) int64 {
- if x < y {
- return x
- }
- return y
-}
diff --git a/vendor/google.golang.org/grpc/balancer/rls/internal/keys/builder.go b/vendor/google.golang.org/grpc/balancer/rls/internal/keys/builder.go
index d010f74456fed..cc5ce510ad902 100644
--- a/vendor/google.golang.org/grpc/balancer/rls/internal/keys/builder.go
+++ b/vendor/google.golang.org/grpc/balancer/rls/internal/keys/builder.go
@@ -218,7 +218,7 @@ type matcher struct {
names []string
}
-// Equal reports if m and are are equivalent headerKeys.
+// Equal reports if m and a are equivalent headerKeys.
func (m matcher) Equal(a matcher) bool {
if m.key != a.key {
return false
diff --git a/vendor/google.golang.org/grpc/balancer/rls/picker.go b/vendor/google.golang.org/grpc/balancer/rls/picker.go
index 8f617a4e42e01..e5c86f2906875 100644
--- a/vendor/google.golang.org/grpc/balancer/rls/picker.go
+++ b/vendor/google.golang.org/grpc/balancer/rls/picker.go
@@ -29,6 +29,7 @@ import (
"google.golang.org/grpc/balancer/rls/internal/keys"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/connectivity"
+ estats "google.golang.org/grpc/experimental/stats"
internalgrpclog "google.golang.org/grpc/internal/grpclog"
rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1"
"google.golang.org/grpc/metadata"
@@ -61,12 +62,15 @@ type rlsPicker struct {
// The picker is given its own copy of the below fields from the RLS LB policy
// to avoid having to grab the mutex on the latter.
- defaultPolicy *childPolicyWrapper // Child policy for the default target.
- ctrlCh *controlChannel // Control channel to the RLS server.
- maxAge time.Duration // Cache max age from LB config.
- staleAge time.Duration // Cache stale age from LB config.
- bg exitIdler
- logger *internalgrpclog.PrefixLogger
+ rlsServerTarget string
+ grpcTarget string
+ metricsRecorder estats.MetricsRecorder
+ defaultPolicy *childPolicyWrapper // Child policy for the default target.
+ ctrlCh *controlChannel // Control channel to the RLS server.
+ maxAge time.Duration // Cache max age from LB config.
+ staleAge time.Duration // Cache stale age from LB config.
+ bg exitIdler
+ logger *internalgrpclog.PrefixLogger
}
// isFullMethodNameValid return true if name is of the form `/service/method`.
@@ -85,7 +89,17 @@ func (p *rlsPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
reqKeys := p.kbm.RLSKey(md, p.origEndpoint, info.FullMethodName)
p.lb.cacheMu.Lock()
- defer p.lb.cacheMu.Unlock()
+ var pr balancer.PickResult
+ var err error
+
+ // Record metrics without the cache mutex held, to prevent lock contention
+ // between concurrent RPC's and their Pick calls. Metrics Recording can
+ // potentially be expensive.
+ metricsCallback := func() {}
+ defer func() {
+ p.lb.cacheMu.Unlock()
+ metricsCallback()
+ }()
// Lookup data cache and pending request map using request path and keys.
cacheKey := cacheKey{path: info.FullMethodName, keys: reqKeys.Str}
@@ -98,7 +112,8 @@ func (p *rlsPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
case dcEntry == nil && pendingEntry == nil:
throttled := p.sendRouteLookupRequestLocked(cacheKey, &backoffState{bs: defaultBackoffStrategy}, reqKeys.Map, rlspb.RouteLookupRequest_REASON_MISS, "")
if throttled {
- return p.useDefaultPickIfPossible(info, errRLSThrottled)
+ pr, metricsCallback, err = p.useDefaultPickIfPossible(info, errRLSThrottled)
+ return pr, err
}
return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
@@ -113,8 +128,8 @@ func (p *rlsPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
p.sendRouteLookupRequestLocked(cacheKey, dcEntry.backoffState, reqKeys.Map, rlspb.RouteLookupRequest_REASON_STALE, dcEntry.headerData)
}
// Delegate to child policies.
- res, err := p.delegateToChildPoliciesLocked(dcEntry, info)
- return res, err
+ pr, metricsCallback, err = p.delegateToChildPoliciesLocked(dcEntry, info)
+ return pr, err
}
// We get here only if the data cache entry has expired. If entry is in
@@ -126,67 +141,108 @@ func (p *rlsPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
// message received from the control plane is still fine, as it could be
// useful for debugging purposes.
st := dcEntry.status
- return p.useDefaultPickIfPossible(info, status.Error(codes.Unavailable, fmt.Sprintf("most recent error from RLS server: %v", st.Error())))
+ pr, metricsCallback, err = p.useDefaultPickIfPossible(info, status.Error(codes.Unavailable, fmt.Sprintf("most recent error from RLS server: %v", st.Error())))
+ return pr, err
}
// We get here only if the entry has expired and is not in backoff.
throttled := p.sendRouteLookupRequestLocked(cacheKey, dcEntry.backoffState, reqKeys.Map, rlspb.RouteLookupRequest_REASON_MISS, "")
if throttled {
- return p.useDefaultPickIfPossible(info, errRLSThrottled)
+ pr, metricsCallback, err = p.useDefaultPickIfPossible(info, errRLSThrottled)
+ return pr, err
}
return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
// Data cache hit. Pending request exists.
default:
if dcEntry.expiryTime.After(now) {
- res, err := p.delegateToChildPoliciesLocked(dcEntry, info)
- return res, err
+ pr, metricsCallback, err = p.delegateToChildPoliciesLocked(dcEntry, info)
+ return pr, err
}
// Data cache entry has expired and pending request exists. Queue pick.
return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
}
}
+// errToPickResult is a helper function which converts the error value returned
+// by Pick() to a string that represents the pick result.
+func errToPickResult(err error) string {
+ if err == nil {
+ return "complete"
+ }
+ if errors.Is(err, balancer.ErrNoSubConnAvailable) {
+ return "queue"
+ }
+ if _, ok := status.FromError(err); ok {
+ return "drop"
+ }
+ return "fail"
+}
+
// delegateToChildPoliciesLocked is a helper function which iterates through the
// list of child policy wrappers in a cache entry and attempts to find a child
// policy to which this RPC can be routed to. If all child policies are in
-// TRANSIENT_FAILURE, we delegate to the last child policy arbitrarily.
-func (p *rlsPicker) delegateToChildPoliciesLocked(dcEntry *cacheEntry, info balancer.PickInfo) (balancer.PickResult, error) {
+// TRANSIENT_FAILURE, we delegate to the last child policy arbitrarily. Returns
+// a function to be invoked to record metrics.
+func (p *rlsPicker) delegateToChildPoliciesLocked(dcEntry *cacheEntry, info balancer.PickInfo) (balancer.PickResult, func(), error) {
const rlsDataHeaderName = "x-google-rls-data"
for i, cpw := range dcEntry.childPolicyWrappers {
state := (*balancer.State)(atomic.LoadPointer(&cpw.state))
// Delegate to the child policy if it is not in TRANSIENT_FAILURE, or if
// it is the last one (which handles the case of delegating to the last
- // child picker if all child polcies are in TRANSIENT_FAILURE).
+ // child picker if all child policies are in TRANSIENT_FAILURE).
if state.ConnectivityState != connectivity.TransientFailure || i == len(dcEntry.childPolicyWrappers)-1 {
// Any header data received from the RLS server is stored in the
// cache entry and needs to be sent to the actual backend in the
// X-Google-RLS-Data header.
res, err := state.Picker.Pick(info)
if err != nil {
- return res, err
+ pr := errToPickResult(err)
+ return res, func() {
+ if pr == "queue" {
+ // Don't record metrics for queued Picks.
+ return
+ }
+ targetPicksMetric.Record(p.metricsRecorder, 1, p.grpcTarget, p.rlsServerTarget, cpw.target, pr)
+ }, err
}
+
if res.Metadata == nil {
res.Metadata = metadata.Pairs(rlsDataHeaderName, dcEntry.headerData)
} else {
res.Metadata.Append(rlsDataHeaderName, dcEntry.headerData)
}
- return res, nil
+ return res, func() {
+ targetPicksMetric.Record(p.metricsRecorder, 1, p.grpcTarget, p.rlsServerTarget, cpw.target, "complete")
+ }, nil
}
}
+
// In the unlikely event that we have a cache entry with no targets, we end up
// queueing the RPC.
- return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
+ return balancer.PickResult{}, func() {}, balancer.ErrNoSubConnAvailable
}
// useDefaultPickIfPossible is a helper method which delegates to the default
-// target if one is configured, or fails the pick with the given error.
-func (p *rlsPicker) useDefaultPickIfPossible(info balancer.PickInfo, errOnNoDefault error) (balancer.PickResult, error) {
+// target if one is configured, or fails the pick with the given error. Returns
+// a function to be invoked to record metrics.
+func (p *rlsPicker) useDefaultPickIfPossible(info balancer.PickInfo, errOnNoDefault error) (balancer.PickResult, func(), error) {
if p.defaultPolicy != nil {
state := (*balancer.State)(atomic.LoadPointer(&p.defaultPolicy.state))
- return state.Picker.Pick(info)
+ res, err := state.Picker.Pick(info)
+ pr := errToPickResult(err)
+ return res, func() {
+ if pr == "queue" {
+ // Don't record metrics for queued Picks.
+ return
+ }
+ defaultTargetPicksMetric.Record(p.metricsRecorder, 1, p.grpcTarget, p.rlsServerTarget, p.defaultPolicy.target, pr)
+ }, err
}
- return balancer.PickResult{}, errOnNoDefault
+
+ return balancer.PickResult{}, func() {
+ failedPicksMetric.Record(p.metricsRecorder, 1, p.grpcTarget, p.rlsServerTarget)
+ }, errOnNoDefault
}
// sendRouteLookupRequestLocked adds an entry to the pending request map and
diff --git a/vendor/google.golang.org/grpc/balancer/weightedroundrobin/balancer.go b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/balancer.go
index 36606e79e4442..88bf64ec4ec49 100644
--- a/vendor/google.golang.org/grpc/balancer/weightedroundrobin/balancer.go
+++ b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/balancer.go
@@ -32,7 +32,9 @@ import (
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/balancer/base"
"google.golang.org/grpc/balancer/weightedroundrobin/internal"
+ "google.golang.org/grpc/balancer/weightedtarget"
"google.golang.org/grpc/connectivity"
+ estats "google.golang.org/grpc/experimental/stats"
"google.golang.org/grpc/internal/grpclog"
iserviceconfig "google.golang.org/grpc/internal/serviceconfig"
"google.golang.org/grpc/orca"
@@ -45,6 +47,43 @@ import (
// Name is the name of the weighted round robin balancer.
const Name = "weighted_round_robin"
+var (
+ rrFallbackMetric = estats.RegisterInt64Count(estats.MetricDescriptor{
+ Name: "grpc.lb.wrr.rr_fallback",
+ Description: "EXPERIMENTAL. Number of scheduler updates in which there were not enough endpoints with valid weight, which caused the WRR policy to fall back to RR behavior.",
+ Unit: "update",
+ Labels: []string{"grpc.target"},
+ OptionalLabels: []string{"grpc.lb.locality"},
+ Default: false,
+ })
+
+ endpointWeightNotYetUsableMetric = estats.RegisterInt64Count(estats.MetricDescriptor{
+ Name: "grpc.lb.wrr.endpoint_weight_not_yet_usable",
+ Description: "EXPERIMENTAL. Number of endpoints from each scheduler update that don't yet have usable weight information (i.e., either the load report has not yet been received, or it is within the blackout period).",
+ Unit: "endpoint",
+ Labels: []string{"grpc.target"},
+ OptionalLabels: []string{"grpc.lb.locality"},
+ Default: false,
+ })
+
+ endpointWeightStaleMetric = estats.RegisterInt64Count(estats.MetricDescriptor{
+ Name: "grpc.lb.wrr.endpoint_weight_stale",
+ Description: "EXPERIMENTAL. Number of endpoints from each scheduler update whose latest weight is older than the expiration period.",
+ Unit: "endpoint",
+ Labels: []string{"grpc.target"},
+ OptionalLabels: []string{"grpc.lb.locality"},
+ Default: false,
+ })
+ endpointWeightsMetric = estats.RegisterFloat64Histo(estats.MetricDescriptor{
+ Name: "grpc.lb.wrr.endpoint_weights",
+ Description: "EXPERIMENTAL. Weight of each endpoint, recorded on every scheduler update. Endpoints without usable weights will be recorded as weight 0.",
+ Unit: "endpoint",
+ Labels: []string{"grpc.target"},
+ OptionalLabels: []string{"grpc.lb.locality"},
+ Default: false,
+ })
+)
+
func init() {
balancer.Register(bb{})
}
@@ -58,7 +97,10 @@ func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Ba
csEvltr: &balancer.ConnectivityStateEvaluator{},
scMap: make(map[balancer.SubConn]*weightedSubConn),
connectivityState: connectivity.Connecting,
+ target: bOpts.Target.String(),
+ metricsRecorder: bOpts.MetricsRecorder,
}
+
b.logger = prefixLogger(b)
b.logger.Infof("Created")
return b
@@ -101,8 +143,11 @@ func (bb) Name() string {
// wrrBalancer implements the weighted round robin LB policy.
type wrrBalancer struct {
- cc balancer.ClientConn
- logger *grpclog.PrefixLogger
+ // The following fields are immutable.
+ cc balancer.ClientConn
+ logger *grpclog.PrefixLogger
+ target string
+ metricsRecorder estats.MetricsRecorder
// The following fields are only accessed on calls into the LB policy, and
// do not need a mutex.
@@ -114,6 +159,7 @@ type wrrBalancer struct {
resolverErr error // the last error reported by the resolver; cleared on successful resolution
connErr error // the last connection error; cleared upon leaving TransientFailure
stopPicker func()
+ locality string
}
func (b *wrrBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error {
@@ -125,6 +171,7 @@ func (b *wrrBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error
}
b.cfg = cfg
+ b.locality = weightedtarget.LocalityFromResolverState(ccs.ResolverState)
b.updateAddresses(ccs.ResolverState.Addresses)
if len(ccs.ResolverState.Addresses) == 0 {
@@ -171,6 +218,10 @@ func (b *wrrBalancer) updateAddresses(addrs []resolver.Address) {
// Initially, we set load reports to off, because they are not
// running upon initial weightedSubConn creation.
cfg: &lbConfig{EnableOOBLoadReport: false},
+
+ metricsRecorder: b.metricsRecorder,
+ target: b.target,
+ locality: b.locality,
}
b.subConns.Set(addr, wsc)
b.scMap[sc] = wsc
@@ -318,9 +369,12 @@ func (b *wrrBalancer) regeneratePicker() {
}
p := &picker{
- v: rand.Uint32(), // start the scheduler at a random point
- cfg: b.cfg,
- subConns: b.readySubConns(),
+ v: rand.Uint32(), // start the scheduler at a random point
+ cfg: b.cfg,
+ subConns: b.readySubConns(),
+ metricsRecorder: b.metricsRecorder,
+ locality: b.locality,
+ target: b.target,
}
var ctx context.Context
ctx, b.stopPicker = context.WithCancel(context.Background())
@@ -339,16 +393,20 @@ type picker struct {
v uint32 // incrementing value used by the scheduler; accessed atomically
cfg *lbConfig // active config when picker created
subConns []*weightedSubConn // all READY subconns
+
+ // The following fields are immutable.
+ target string
+ locality string
+ metricsRecorder estats.MetricsRecorder
}
-// scWeights returns a slice containing the weights from p.subConns in the same
-// order as p.subConns.
-func (p *picker) scWeights() []float64 {
+func (p *picker) scWeights(recordMetrics bool) []float64 {
ws := make([]float64, len(p.subConns))
now := internal.TimeNow()
for i, wsc := range p.subConns {
- ws[i] = wsc.weight(now, time.Duration(p.cfg.WeightExpirationPeriod), time.Duration(p.cfg.BlackoutPeriod))
+ ws[i] = wsc.weight(now, time.Duration(p.cfg.WeightExpirationPeriod), time.Duration(p.cfg.BlackoutPeriod), recordMetrics)
}
+
return ws
}
@@ -357,7 +415,7 @@ func (p *picker) inc() uint32 {
}
func (p *picker) regenerateScheduler() {
- s := newScheduler(p.scWeights(), p.inc)
+ s := p.newScheduler(true)
atomic.StorePointer(&p.scheduler, unsafe.Pointer(&s))
}
@@ -367,6 +425,7 @@ func (p *picker) start(ctx context.Context) {
// No need to regenerate weights with only one backend.
return
}
+
go func() {
ticker := time.NewTicker(time.Duration(p.cfg.WeightUpdatePeriod))
defer ticker.Stop()
@@ -381,7 +440,7 @@ func (p *picker) start(ctx context.Context) {
}()
}
-func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
+func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
// Read the scheduler atomically. All scheduler operations are threadsafe,
// and if the scheduler is replaced during this usage, we want to use the
// scheduler that was live when the pick started.
@@ -404,8 +463,12 @@ func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
// When needed, it also tracks connectivity state, listens for metrics updates
// by implementing the orca.OOBListener interface and manages that listener.
type weightedSubConn struct {
+ // The following fields are immutable.
balancer.SubConn
- logger *grpclog.PrefixLogger
+ logger *grpclog.PrefixLogger
+ target string
+ metricsRecorder estats.MetricsRecorder
+ locality string
// The following fields are only accessed on calls into the LB policy, and
// do not need a mutex.
@@ -450,7 +513,7 @@ func (w *weightedSubConn) OnLoadReport(load *v3orcapb.OrcaLoadReport) {
}
w.lastUpdated = internal.TimeNow()
- if w.nonEmptySince == (time.Time{}) {
+ if w.nonEmptySince.Equal(time.Time{}) {
w.nonEmptySince = w.lastUpdated
}
}
@@ -495,14 +558,17 @@ func (w *weightedSubConn) updateConnectivityState(cs connectivity.State) connect
w.SubConn.Connect()
case connectivity.Ready:
// If we transition back to READY state, reset nonEmptySince so that we
- // apply the blackout period after we start receiving load data. Note
- // that we cannot guarantee that we will never receive lingering
- // callbacks for backend metric reports from the previous connection
- // after the new connection has been established, but they should be
- // masked by new backend metric reports from the new connection by the
- // time the blackout period ends.
+ // apply the blackout period after we start receiving load data. Also
+ // reset lastUpdated to trigger endpoint weight not yet usable in the
+ // case endpoint gets asked what weight it is before receiving a new
+ // load report. Note that we cannot guarantee that we will never receive
+ // lingering callbacks for backend metric reports from the previous
+ // connection after the new connection has been established, but they
+ // should be masked by new backend metric reports from the new
+ // connection by the time the blackout period ends.
w.mu.Lock()
w.nonEmptySince = time.Time{}
+ w.lastUpdated = time.Time{}
w.mu.Unlock()
case connectivity.Shutdown:
if w.stopORCAListener != nil {
@@ -527,21 +593,44 @@ func (w *weightedSubConn) updateConnectivityState(cs connectivity.State) connect
// weight returns the current effective weight of the subconn, taking into
// account the parameters. Returns 0 for blacked out or expired data, which
-// will cause the backend weight to be treated as the mean of the weights of
-// the other backends.
-func (w *weightedSubConn) weight(now time.Time, weightExpirationPeriod, blackoutPeriod time.Duration) float64 {
+// will cause the backend weight to be treated as the mean of the weights of the
+// other backends. If forScheduler is set to true, this function will emit
+// metrics through the metrics registry.
+func (w *weightedSubConn) weight(now time.Time, weightExpirationPeriod, blackoutPeriod time.Duration, recordMetrics bool) (weight float64) {
w.mu.Lock()
defer w.mu.Unlock()
+
+ if recordMetrics {
+ defer func() {
+ endpointWeightsMetric.Record(w.metricsRecorder, weight, w.target, w.locality)
+ }()
+ }
+
+ // The SubConn has not received a load report (i.e. just turned READY with
+ // no load report).
+ if w.lastUpdated.Equal(time.Time{}) {
+ endpointWeightNotYetUsableMetric.Record(w.metricsRecorder, 1, w.target, w.locality)
+ return 0
+ }
+
// If the most recent update was longer ago than the expiration period,
// reset nonEmptySince so that we apply the blackout period again if we
// start getting data again in the future, and return 0.
if now.Sub(w.lastUpdated) >= weightExpirationPeriod {
+ if recordMetrics {
+ endpointWeightStaleMetric.Record(w.metricsRecorder, 1, w.target, w.locality)
+ }
w.nonEmptySince = time.Time{}
return 0
}
+
// If we don't have at least blackoutPeriod worth of data, return 0.
- if blackoutPeriod != 0 && (w.nonEmptySince == (time.Time{}) || now.Sub(w.nonEmptySince) < blackoutPeriod) {
+ if blackoutPeriod != 0 && (w.nonEmptySince.Equal(time.Time{}) || now.Sub(w.nonEmptySince) < blackoutPeriod) {
+ if recordMetrics {
+ endpointWeightNotYetUsableMetric.Record(w.metricsRecorder, 1, w.target, w.locality)
+ }
return 0
}
+
return w.weightVal
}
diff --git a/vendor/google.golang.org/grpc/balancer/weightedroundrobin/scheduler.go b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/scheduler.go
index f389678b4e825..56aa15da10d27 100644
--- a/vendor/google.golang.org/grpc/balancer/weightedroundrobin/scheduler.go
+++ b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/scheduler.go
@@ -31,13 +31,17 @@ type scheduler interface {
// len(scWeights)-1 are zero or there is only a single subconn, otherwise it
// will return an Earliest Deadline First (EDF) scheduler implementation that
// selects the subchannels according to their weights.
-func newScheduler(scWeights []float64, inc func() uint32) scheduler {
+func (p *picker) newScheduler(recordMetrics bool) scheduler {
+ scWeights := p.scWeights(recordMetrics)
n := len(scWeights)
if n == 0 {
return nil
}
if n == 1 {
- return &rrScheduler{numSCs: 1, inc: inc}
+ if recordMetrics {
+ rrFallbackMetric.Record(p.metricsRecorder, 1, p.target, p.locality)
+ }
+ return &rrScheduler{numSCs: 1, inc: p.inc}
}
sum := float64(0)
numZero := 0
@@ -51,8 +55,12 @@ func newScheduler(scWeights []float64, inc func() uint32) scheduler {
numZero++
}
}
+
if numZero >= n-1 {
- return &rrScheduler{numSCs: uint32(n), inc: inc}
+ if recordMetrics {
+ rrFallbackMetric.Record(p.metricsRecorder, 1, p.target, p.locality)
+ }
+ return &rrScheduler{numSCs: uint32(n), inc: p.inc}
}
unscaledMean := sum / float64(n-numZero)
scalingFactor := maxWeight / max
@@ -74,11 +82,11 @@ func newScheduler(scWeights []float64, inc func() uint32) scheduler {
}
if allEqual {
- return &rrScheduler{numSCs: uint32(n), inc: inc}
+ return &rrScheduler{numSCs: uint32(n), inc: p.inc}
}
logger.Infof("using edf scheduler with weights: %v", weights)
- return &edfScheduler{weights: weights, inc: inc}
+ return &edfScheduler{weights: weights, inc: p.inc}
}
const maxWeight = math.MaxUint16
diff --git a/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedaggregator/aggregator.go b/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedaggregator/aggregator.go
index 27279257ed138..bcc8aca8b4912 100644
--- a/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedaggregator/aggregator.go
+++ b/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedaggregator/aggregator.go
@@ -89,7 +89,7 @@ func New(cc balancer.ClientConn, logger *grpclog.PrefixLogger, newWRR func() wrr
}
// Start starts the aggregator. It can be called after Stop to restart the
-// aggretator.
+// aggregator.
func (wbsa *Aggregator) Start() {
wbsa.mu.Lock()
defer wbsa.mu.Unlock()
diff --git a/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedtarget.go b/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedtarget.go
index 220f4e555674a..dfd1ef26dcb02 100644
--- a/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedtarget.go
+++ b/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedtarget.go
@@ -84,6 +84,17 @@ type weightedTargetBalancer struct {
targets map[string]Target
}
+type localityKeyType string
+
+const localityKey = localityKeyType("locality")
+
+// LocalityFromResolverState returns the locality from the resolver.State
+// provided, or an empty string if not present.
+func LocalityFromResolverState(state resolver.State) string {
+ locality, _ := state.Attributes.Value(localityKey).(string)
+ return locality
+}
+
// UpdateClientConnState takes the new targets in balancer group,
// creates/deletes sub-balancers and sends them update. addresses are split into
// groups based on hierarchy path.
@@ -142,7 +153,7 @@ func (b *weightedTargetBalancer) UpdateClientConnState(s balancer.ClientConnStat
ResolverState: resolver.State{
Addresses: addressesSplit[name],
ServiceConfig: s.ResolverState.ServiceConfig,
- Attributes: s.ResolverState.Attributes,
+ Attributes: s.ResolverState.Attributes.WithValue(localityKey, name),
},
BalancerConfig: newT.ChildPolicy.Config,
})
diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go
index 4161fdf47a8b1..8ad6ce2f0950a 100644
--- a/vendor/google.golang.org/grpc/balancer_wrapper.go
+++ b/vendor/google.golang.org/grpc/balancer_wrapper.go
@@ -25,12 +25,15 @@ import (
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/connectivity"
+ "google.golang.org/grpc/internal"
"google.golang.org/grpc/internal/balancer/gracefulswitch"
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/internal/grpcsync"
"google.golang.org/grpc/resolver"
)
+var setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnState, resolver.Address))
+
// ccBalancerWrapper sits between the ClientConn and the Balancer.
//
// ccBalancerWrapper implements methods corresponding to the ones on the
@@ -79,6 +82,7 @@ func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper {
CustomUserAgent: cc.dopts.copts.UserAgent,
ChannelzParent: cc.channelz,
Target: cc.parsedTarget,
+ MetricsRecorder: cc.metricsRecorderList,
},
serializer: grpcsync.NewCallbackSerializer(ctx),
serializerCancel: cancel,
@@ -92,7 +96,7 @@ func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper {
// it is safe to call into the balancer here.
func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error {
errCh := make(chan error)
- ok := ccb.serializer.Schedule(func(ctx context.Context) {
+ uccs := func(ctx context.Context) {
defer close(errCh)
if ctx.Err() != nil || ccb.balancer == nil {
return
@@ -107,17 +111,23 @@ func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnStat
logger.Infof("error from balancer.UpdateClientConnState: %v", err)
}
errCh <- err
- })
- if !ok {
- return nil
}
+ onFailure := func() { close(errCh) }
+
+ // UpdateClientConnState can race with Close, and when the latter wins, the
+ // serializer is closed, and the attempt to schedule the callback will fail.
+ // It is acceptable to ignore this failure. But since we want to handle the
+ // state update in a blocking fashion (when we successfully schedule the
+ // callback), we have to use the ScheduleOr method and not the MaybeSchedule
+ // method on the serializer.
+ ccb.serializer.ScheduleOr(uccs, onFailure)
return <-errCh
}
// resolverError is invoked by grpc to push a resolver error to the underlying
// balancer. The call to the balancer is executed from the serializer.
func (ccb *ccBalancerWrapper) resolverError(err error) {
- ccb.serializer.Schedule(func(ctx context.Context) {
+ ccb.serializer.TrySchedule(func(ctx context.Context) {
if ctx.Err() != nil || ccb.balancer == nil {
return
}
@@ -133,7 +143,7 @@ func (ccb *ccBalancerWrapper) close() {
ccb.closed = true
ccb.mu.Unlock()
channelz.Info(logger, ccb.cc.channelz, "ccBalancerWrapper: closing")
- ccb.serializer.Schedule(func(context.Context) {
+ ccb.serializer.TrySchedule(func(context.Context) {
if ccb.balancer == nil {
return
}
@@ -145,7 +155,7 @@ func (ccb *ccBalancerWrapper) close() {
// exitIdle invokes the balancer's exitIdle method in the serializer.
func (ccb *ccBalancerWrapper) exitIdle() {
- ccb.serializer.Schedule(func(ctx context.Context) {
+ ccb.serializer.TrySchedule(func(ctx context.Context) {
if ctx.Err() != nil || ccb.balancer == nil {
return
}
@@ -182,7 +192,7 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer
return acbw, nil
}
-func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) {
+func (ccb *ccBalancerWrapper) RemoveSubConn(balancer.SubConn) {
// The graceful switch balancer will never call this.
logger.Errorf("ccb RemoveSubConn(%v) called unexpectedly, sc")
}
@@ -252,15 +262,29 @@ type acBalancerWrapper struct {
// updateState is invoked by grpc to push a subConn state update to the
// underlying balancer.
-func (acbw *acBalancerWrapper) updateState(s connectivity.State, err error) {
- acbw.ccb.serializer.Schedule(func(ctx context.Context) {
+func (acbw *acBalancerWrapper) updateState(s connectivity.State, curAddr resolver.Address, err error) {
+ acbw.ccb.serializer.TrySchedule(func(ctx context.Context) {
if ctx.Err() != nil || acbw.ccb.balancer == nil {
return
}
// Even though it is optional for balancers, gracefulswitch ensures
// opts.StateListener is set, so this cannot ever be nil.
// TODO: delete this comment when UpdateSubConnState is removed.
- acbw.stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err})
+ scs := balancer.SubConnState{ConnectivityState: s, ConnectionError: err}
+ if s == connectivity.Ready {
+ setConnectedAddress(&scs, curAddr)
+ }
+ acbw.stateListener(scs)
+ acbw.ac.mu.Lock()
+ defer acbw.ac.mu.Unlock()
+ if s == connectivity.Ready {
+ // When changing states to READY, reset stateReadyChan. Wait until
+ // after we notify the LB policy's listener(s) in order to prevent
+ // ac.getTransport() from unblocking before the LB policy starts
+ // tracking the subchannel as READY.
+ close(acbw.ac.stateReadyChan)
+ acbw.ac.stateReadyChan = make(chan struct{})
+ }
})
}
@@ -318,8 +342,8 @@ func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (
pData := acbw.producers[pb]
if pData == nil {
// Not found; create a new one and add it to the producers map.
- p, close := pb.Build(acbw)
- pData = &refCountedProducer{producer: p, close: close}
+ p, closeFn := pb.Build(acbw)
+ pData = &refCountedProducer{producer: p, close: closeFn}
acbw.producers[pb] = pData
}
// Account for this new reference.
diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
index 63c639e4fe933..55bffaa77ef0f 100644
--- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
+++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
@@ -18,8 +18,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.34.1
-// protoc v4.25.2
+// protoc-gen-go v1.34.2
+// protoc v5.27.1
// source: grpc/binlog/v1/binarylog.proto
package grpc_binarylog_v1
@@ -1015,7 +1015,7 @@ func file_grpc_binlog_v1_binarylog_proto_rawDescGZIP() []byte {
var file_grpc_binlog_v1_binarylog_proto_enumTypes = make([]protoimpl.EnumInfo, 3)
var file_grpc_binlog_v1_binarylog_proto_msgTypes = make([]protoimpl.MessageInfo, 8)
-var file_grpc_binlog_v1_binarylog_proto_goTypes = []interface{}{
+var file_grpc_binlog_v1_binarylog_proto_goTypes = []any{
(GrpcLogEntry_EventType)(0), // 0: grpc.binarylog.v1.GrpcLogEntry.EventType
(GrpcLogEntry_Logger)(0), // 1: grpc.binarylog.v1.GrpcLogEntry.Logger
(Address_Type)(0), // 2: grpc.binarylog.v1.Address.Type
@@ -1058,7 +1058,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() {
return
}
if !protoimpl.UnsafeEnabled {
- file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*GrpcLogEntry); i {
case 0:
return &v.state
@@ -1070,7 +1070,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() {
return nil
}
}
- file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v any, i int) any {
switch v := v.(*ClientHeader); i {
case 0:
return &v.state
@@ -1082,7 +1082,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() {
return nil
}
}
- file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v any, i int) any {
switch v := v.(*ServerHeader); i {
case 0:
return &v.state
@@ -1094,7 +1094,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() {
return nil
}
}
- file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v any, i int) any {
switch v := v.(*Trailer); i {
case 0:
return &v.state
@@ -1106,7 +1106,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() {
return nil
}
}
- file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v any, i int) any {
switch v := v.(*Message); i {
case 0:
return &v.state
@@ -1118,7 +1118,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() {
return nil
}
}
- file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v any, i int) any {
switch v := v.(*Metadata); i {
case 0:
return &v.state
@@ -1130,7 +1130,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() {
return nil
}
}
- file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v any, i int) any {
switch v := v.(*MetadataEntry); i {
case 0:
return &v.state
@@ -1142,7 +1142,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() {
return nil
}
}
- file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v any, i int) any {
switch v := v.(*Address); i {
case 0:
return &v.state
@@ -1155,7 +1155,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() {
}
}
}
- file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []interface{}{
+ file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []any{
(*GrpcLogEntry_ClientHeader)(nil),
(*GrpcLogEntry_ServerHeader)(nil),
(*GrpcLogEntry_Message)(nil),
diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go
index 423be7b43b00c..9c8850e3fdd5b 100644
--- a/vendor/google.golang.org/grpc/clientconn.go
+++ b/vendor/google.golang.org/grpc/clientconn.go
@@ -24,6 +24,7 @@ import (
"fmt"
"math"
"net/url"
+ "slices"
"strings"
"sync"
"sync/atomic"
@@ -39,6 +40,7 @@ import (
"google.golang.org/grpc/internal/grpcsync"
"google.golang.org/grpc/internal/idle"
iresolver "google.golang.org/grpc/internal/resolver"
+ "google.golang.org/grpc/internal/stats"
"google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/resolver"
@@ -194,8 +196,11 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error)
cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelz)
cc.pickerWrapper = newPickerWrapper(cc.dopts.copts.StatsHandlers)
+ cc.metricsRecorderList = stats.NewMetricsRecorderList(cc.dopts.copts.StatsHandlers)
+
cc.initIdleStateLocked() // Safe to call without the lock, since nothing else has a reference to cc.
cc.idlenessMgr = idle.NewManager((*idler)(cc), cc.dopts.idleTimeout)
+
return cc, nil
}
@@ -590,13 +595,14 @@ type ClientConn struct {
cancel context.CancelFunc // Cancelled on close.
// The following are initialized at dial time, and are read-only after that.
- target string // User's dial target.
- parsedTarget resolver.Target // See initParsedTargetAndResolverBuilder().
- authority string // See initAuthority().
- dopts dialOptions // Default and user specified dial options.
- channelz *channelz.Channel // Channelz object.
- resolverBuilder resolver.Builder // See initParsedTargetAndResolverBuilder().
- idlenessMgr *idle.Manager
+ target string // User's dial target.
+ parsedTarget resolver.Target // See initParsedTargetAndResolverBuilder().
+ authority string // See initAuthority().
+ dopts dialOptions // Default and user specified dial options.
+ channelz *channelz.Channel // Channelz object.
+ resolverBuilder resolver.Builder // See initParsedTargetAndResolverBuilder().
+ idlenessMgr *idle.Manager
+ metricsRecorderList *stats.MetricsRecorderList
// The following provide their own synchronization, and therefore don't
// require cc.mu to be held to access them.
@@ -626,11 +632,6 @@ type ClientConn struct {
// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or
// ctx expires. A true value is returned in former case and false in latter.
-//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a
-// later release.
func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool {
ch := cc.csMgr.getNotifyChan()
if cc.csMgr.getState() != sourceState {
@@ -645,11 +646,6 @@ func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connec
}
// GetState returns the connectivity.State of ClientConn.
-//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a later
-// release.
func (cc *ClientConn) GetState() connectivity.State {
return cc.csMgr.getState()
}
@@ -812,17 +808,11 @@ func (cc *ClientConn) applyFailingLBLocked(sc *serviceconfig.ParseResult) {
cc.csMgr.updateState(connectivity.TransientFailure)
}
-// Makes a copy of the input addresses slice and clears out the balancer
-// attributes field. Addresses are passed during subconn creation and address
-// update operations. In both cases, we will clear the balancer attributes by
-// calling this function, and therefore we will be able to use the Equal method
-// provided by the resolver.Address type for comparison.
-func copyAddressesWithoutBalancerAttributes(in []resolver.Address) []resolver.Address {
+// Makes a copy of the input addresses slice. Addresses are passed during
+// subconn creation and address update operations.
+func copyAddresses(in []resolver.Address) []resolver.Address {
out := make([]resolver.Address, len(in))
- for i := range in {
- out[i] = in[i]
- out[i].BalancerAttributes = nil
- }
+ copy(out, in)
return out
}
@@ -835,14 +825,14 @@ func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer.
}
ac := &addrConn{
- state: connectivity.Idle,
- cc: cc,
- addrs: copyAddressesWithoutBalancerAttributes(addrs),
- scopts: opts,
- dopts: cc.dopts,
- channelz: channelz.RegisterSubChannel(cc.channelz, ""),
- resetBackoff: make(chan struct{}),
- stateChan: make(chan struct{}),
+ state: connectivity.Idle,
+ cc: cc,
+ addrs: copyAddresses(addrs),
+ scopts: opts,
+ dopts: cc.dopts,
+ channelz: channelz.RegisterSubChannel(cc.channelz, ""),
+ resetBackoff: make(chan struct{}),
+ stateReadyChan: make(chan struct{}),
}
ac.ctx, ac.cancel = context.WithCancel(cc.ctx)
// Start with our address set to the first address; this may be updated if
@@ -918,28 +908,29 @@ func (ac *addrConn) connect() error {
ac.mu.Unlock()
return nil
}
- ac.mu.Unlock()
- ac.resetTransport()
+ ac.resetTransportAndUnlock()
return nil
}
-func equalAddresses(a, b []resolver.Address) bool {
- if len(a) != len(b) {
- return false
- }
- for i, v := range a {
- if !v.Equal(b[i]) {
- return false
- }
- }
- return true
+// equalAddressIgnoringBalAttributes returns true is a and b are considered equal.
+// This is different from the Equal method on the resolver.Address type which
+// considers all fields to determine equality. Here, we only consider fields
+// that are meaningful to the subConn.
+func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool {
+ return a.Addr == b.Addr && a.ServerName == b.ServerName &&
+ a.Attributes.Equal(b.Attributes) &&
+ a.Metadata == b.Metadata
+}
+
+func equalAddressesIgnoringBalAttributes(a, b []resolver.Address) bool {
+ return slices.EqualFunc(a, b, func(a, b resolver.Address) bool { return equalAddressIgnoringBalAttributes(&a, &b) })
}
// updateAddrs updates ac.addrs with the new addresses list and handles active
// connections or connection attempts.
func (ac *addrConn) updateAddrs(addrs []resolver.Address) {
- addrs = copyAddressesWithoutBalancerAttributes(addrs)
+ addrs = copyAddresses(addrs)
limit := len(addrs)
if limit > 5 {
limit = 5
@@ -947,7 +938,7 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) {
channelz.Infof(logger, ac.channelz, "addrConn: updateAddrs addrs (%d of %d): %v", limit, len(addrs), addrs[:limit])
ac.mu.Lock()
- if equalAddresses(ac.addrs, addrs) {
+ if equalAddressesIgnoringBalAttributes(ac.addrs, addrs) {
ac.mu.Unlock()
return
}
@@ -966,7 +957,7 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) {
// Try to find the connected address.
for _, a := range addrs {
a.ServerName = ac.cc.getServerName(a)
- if a.Equal(ac.curAddr) {
+ if equalAddressIgnoringBalAttributes(&a, &ac.curAddr) {
// We are connected to a valid address, so do nothing but
// update the addresses.
ac.mu.Unlock()
@@ -992,11 +983,9 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) {
ac.updateConnectivityState(connectivity.Idle, nil)
}
- ac.mu.Unlock()
-
// Since we were connecting/connected, we should start a new connection
// attempt.
- go ac.resetTransport()
+ go ac.resetTransportAndUnlock()
}
// getServerName determines the serverName to be used in the connection
@@ -1190,8 +1179,8 @@ type addrConn struct {
addrs []resolver.Address // All addresses that the resolver resolved to.
// Use updateConnectivityState for updating addrConn's connectivity state.
- state connectivity.State
- stateChan chan struct{} // closed and recreated on every state change.
+ state connectivity.State
+ stateReadyChan chan struct{} // closed and recreated on every READY state change.
backoffIdx int // Needs to be stateful for resetConnectBackoff.
resetBackoff chan struct{}
@@ -1204,9 +1193,6 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error)
if ac.state == s {
return
}
- // When changing states, reset the state change channel.
- close(ac.stateChan)
- ac.stateChan = make(chan struct{})
ac.state = s
ac.channelz.ChannelMetrics.State.Store(&s)
if lastErr == nil {
@@ -1214,7 +1200,7 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error)
} else {
channelz.Infof(logger, ac.channelz, "Subchannel Connectivity change to %v, last error: %s", s, lastErr)
}
- ac.acbw.updateState(s, lastErr)
+ ac.acbw.updateState(s, ac.curAddr, lastErr)
}
// adjustParams updates parameters used to create transports upon
@@ -1231,8 +1217,10 @@ func (ac *addrConn) adjustParams(r transport.GoAwayReason) {
}
}
-func (ac *addrConn) resetTransport() {
- ac.mu.Lock()
+// resetTransportAndUnlock unconditionally connects the addrConn.
+//
+// ac.mu must be held by the caller, and this function will guarantee it is released.
+func (ac *addrConn) resetTransportAndUnlock() {
acCtx := ac.ctx
if acCtx.Err() != nil {
ac.mu.Unlock()
@@ -1522,7 +1510,7 @@ func (ac *addrConn) getReadyTransport() transport.ClientTransport {
func (ac *addrConn) getTransport(ctx context.Context) (transport.ClientTransport, error) {
for ctx.Err() == nil {
ac.mu.Lock()
- t, state, sc := ac.transport, ac.state, ac.stateChan
+ t, state, sc := ac.transport, ac.state, ac.stateReadyChan
ac.mu.Unlock()
if state == connectivity.Ready {
return t, nil
@@ -1585,7 +1573,7 @@ func (ac *addrConn) tearDown(err error) {
} else {
// Hard close the transport when the channel is entering idle or is
// being shutdown. In the case where the channel is being shutdown,
- // closing of transports is also taken care of by cancelation of cc.ctx.
+ // closing of transports is also taken care of by cancellation of cc.ctx.
// But in the case where the channel is entering idle, we need to
// explicitly close the transports here. Instead of distinguishing
// between these two cases, it is simpler to close the transport
diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go
index 411e3dfd47ccd..e840858b77b18 100644
--- a/vendor/google.golang.org/grpc/codec.go
+++ b/vendor/google.golang.org/grpc/codec.go
@@ -21,18 +21,73 @@ package grpc
import (
"google.golang.org/grpc/encoding"
_ "google.golang.org/grpc/encoding/proto" // to register the Codec for "proto"
+ "google.golang.org/grpc/mem"
)
-// baseCodec contains the functionality of both Codec and encoding.Codec, but
-// omits the name/string, which vary between the two and are not needed for
-// anything besides the registry in the encoding package.
+// baseCodec captures the new encoding.CodecV2 interface without the Name
+// function, allowing it to be implemented by older Codec and encoding.Codec
+// implementations. The omitted Name function is only needed for the register in
+// the encoding package and is not part of the core functionality.
type baseCodec interface {
- Marshal(v any) ([]byte, error)
- Unmarshal(data []byte, v any) error
+ Marshal(v any) (mem.BufferSlice, error)
+ Unmarshal(data mem.BufferSlice, v any) error
+}
+
+// getCodec returns an encoding.CodecV2 for the codec of the given name (if
+// registered). Initially checks the V2 registry with encoding.GetCodecV2 and
+// returns the V2 codec if it is registered. Otherwise, it checks the V1 registry
+// with encoding.GetCodec and if it is registered wraps it with newCodecV1Bridge
+// to turn it into an encoding.CodecV2. Returns nil otherwise.
+func getCodec(name string) encoding.CodecV2 {
+ if codecV1 := encoding.GetCodec(name); codecV1 != nil {
+ return newCodecV1Bridge(codecV1)
+ }
+
+ return encoding.GetCodecV2(name)
+}
+
+func newCodecV0Bridge(c Codec) baseCodec {
+ return codecV0Bridge{codec: c}
+}
+
+func newCodecV1Bridge(c encoding.Codec) encoding.CodecV2 {
+ return codecV1Bridge{
+ codecV0Bridge: codecV0Bridge{codec: c},
+ name: c.Name(),
+ }
+}
+
+var _ baseCodec = codecV0Bridge{}
+
+type codecV0Bridge struct {
+ codec interface {
+ Marshal(v any) ([]byte, error)
+ Unmarshal(data []byte, v any) error
+ }
+}
+
+func (c codecV0Bridge) Marshal(v any) (mem.BufferSlice, error) {
+ data, err := c.codec.Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ return mem.BufferSlice{mem.NewBuffer(&data, nil)}, nil
+}
+
+func (c codecV0Bridge) Unmarshal(data mem.BufferSlice, v any) (err error) {
+ return c.codec.Unmarshal(data.Materialize(), v)
}
-var _ baseCodec = Codec(nil)
-var _ baseCodec = encoding.Codec(nil)
+var _ encoding.CodecV2 = codecV1Bridge{}
+
+type codecV1Bridge struct {
+ codecV0Bridge
+ name string
+}
+
+func (c codecV1Bridge) Name() string {
+ return c.name
+}
// Codec defines the interface gRPC uses to encode and decode messages.
// Note that implementations of this interface must be thread safe;
diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go
index 43726e877b8b3..7e4bfee888615 100644
--- a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go
+++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go
@@ -49,7 +49,7 @@ func (k KeySizeError) Error() string {
// newRekeyAEAD creates a new instance of aes128gcm with rekeying.
// The key argument should be 44 bytes, the first 32 bytes are used as a key
-// for HKDF-expand and the remainining 12 bytes are used as a random mask for
+// for HKDF-expand and the remaining 12 bytes are used as a random mask for
// the counter.
func newRekeyAEAD(key []byte) (*rekeyAEAD, error) {
k := len(key)
diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go
index 6a9035ea254f8..b5bbb5497aa39 100644
--- a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go
+++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go
@@ -51,7 +51,7 @@ type aes128gcmRekey struct {
// NewAES128GCMRekey creates an instance that uses aes128gcm with rekeying
// for ALTS record. The key argument should be 44 bytes, the first 32 bytes
-// are used as a key for HKDF-expand and the remainining 12 bytes are used
+// are used as a key for HKDF-expand and the remaining 12 bytes are used
// as a random mask for the counter.
func NewAES128GCMRekey(side core.Side, key []byte) (ALTSRecordCrypto, error) {
inCounter := NewInCounter(side, overflowLenAES128GCMRekey)
diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go
index 0d64fb37a1255..f1ea7bb208119 100644
--- a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go
+++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go
@@ -266,10 +266,3 @@ func (p *conn) Write(b []byte) (n int, err error) {
}
return n, nil
}
-
-func min(a, b int) int {
- if a < b {
- return a
- }
- return b
-}
diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go
index 6c867dd850153..50721f690acbf 100644
--- a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go
+++ b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go
@@ -128,7 +128,7 @@ type altsHandshaker struct {
// NewClientHandshaker creates a core.Handshaker that performs a client-side
// ALTS handshake by acting as a proxy between the peer and the ALTS handshaker
// service in the metadata server.
-func NewClientHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, opts *ClientHandshakerOptions) (core.Handshaker, error) {
+func NewClientHandshaker(_ context.Context, conn *grpc.ClientConn, c net.Conn, opts *ClientHandshakerOptions) (core.Handshaker, error) {
return &altsHandshaker{
stream: nil,
conn: c,
@@ -141,7 +141,7 @@ func NewClientHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn,
// NewServerHandshaker creates a core.Handshaker that performs a server-side
// ALTS handshake by acting as a proxy between the peer and the ALTS handshaker
// service in the metadata server.
-func NewServerHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, opts *ServerHandshakerOptions) (core.Handshaker, error) {
+func NewServerHandshaker(_ context.Context, conn *grpc.ClientConn, c net.Conn, opts *ServerHandshakerOptions) (core.Handshaker, error) {
return &altsHandshaker{
stream: nil,
conn: c,
diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go
index e1cdafb980cd8..b3af035907292 100644
--- a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go
+++ b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go
@@ -34,8 +34,6 @@ var (
// to a corresponding connection to a hypervisor handshaker service
// instance.
hsConnMap = make(map[string]*grpc.ClientConn)
- // hsDialer will be reassigned in tests.
- hsDialer = grpc.Dial
)
// Dial dials the handshake service in the hypervisor. If a connection has
@@ -50,7 +48,7 @@ func Dial(hsAddress string) (*grpc.ClientConn, error) {
// Create a new connection to the handshaker service. Note that
// this connection stays open until the application is closed.
var err error
- hsConn, err = hsDialer(hsAddress, grpc.WithTransportCredentials(insecure.NewCredentials()))
+ hsConn, err = grpc.Dial(hsAddress, grpc.WithTransportCredentials(insecure.NewCredentials()))
if err != nil {
return nil, err
}
diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go
index 38cb5cf0d744c..b7de8f05b7637 100644
--- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go
+++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go
@@ -17,8 +17,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.34.1
-// protoc v4.25.2
+// protoc-gen-go v1.34.2
+// protoc v5.27.1
// source: grpc/gcp/altscontext.proto
package grpc_gcp
@@ -201,7 +201,7 @@ func file_grpc_gcp_altscontext_proto_rawDescGZIP() []byte {
}
var file_grpc_gcp_altscontext_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
-var file_grpc_gcp_altscontext_proto_goTypes = []interface{}{
+var file_grpc_gcp_altscontext_proto_goTypes = []any{
(*AltsContext)(nil), // 0: grpc.gcp.AltsContext
nil, // 1: grpc.gcp.AltsContext.PeerAttributesEntry
(SecurityLevel)(0), // 2: grpc.gcp.SecurityLevel
@@ -225,7 +225,7 @@ func file_grpc_gcp_altscontext_proto_init() {
}
file_grpc_gcp_transport_security_common_proto_init()
if !protoimpl.UnsafeEnabled {
- file_grpc_gcp_altscontext_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_gcp_altscontext_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*AltsContext); i {
case 0:
return &v.state
diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go
index 55fc7f65f10d4..79b5dad476c7f 100644
--- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go
+++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go
@@ -17,8 +17,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.34.1
-// protoc v4.25.2
+// protoc-gen-go v1.34.2
+// protoc v5.27.1
// source: grpc/gcp/handshaker.proto
package grpc_gcp
@@ -533,7 +533,7 @@ type StartServerHandshakeReq struct {
// to handshake_parameters is the integer value of HandshakeProtocol enum.
HandshakeParameters map[int32]*ServerHandshakeParameters `protobuf:"bytes,2,rep,name=handshake_parameters,json=handshakeParameters,proto3" json:"handshake_parameters,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Bytes in out_frames returned from the peer's HandshakerResp. It is possible
- // that the peer's out_frames are split into multiple HandshakReq messages.
+ // that the peer's out_frames are split into multiple HandshakeReq messages.
InBytes []byte `protobuf:"bytes,3,opt,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"`
// (Optional) Local endpoint information of the connection to the client,
// such as local IP address, port number, and network protocol.
@@ -1071,7 +1071,7 @@ var file_grpc_gcp_handshaker_proto_rawDesc = []byte{
0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20,
0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x10,
0x0a, 0x0e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66,
- 0x22, 0xf6, 0x04, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74,
+ 0x22, 0xfb, 0x04, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74,
0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x12, 0x5b, 0x0a, 0x1b,
0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69,
0x74, 0x79, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28,
@@ -1108,139 +1108,140 @@ var file_grpc_gcp_handshaker_proto_rawDesc = []byte{
0x6f, 0x6e, 0x73, 0x52, 0x0b, 0x72, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73,
0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x69,
0x7a, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x46, 0x72, 0x61,
- 0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73,
- 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x61, 0x63,
- 0x63, 0x65, 0x73, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xaa, 0x01, 0x0a, 0x19, 0x53, 0x65,
- 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72,
- 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x63, 0x6f, 0x72,
- 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28,
- 0x09, 0x52, 0x0f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f,
- 0x6c, 0x73, 0x12, 0x3d, 0x0a, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e,
- 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67,
- 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79,
- 0x52, 0x0f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65,
- 0x73, 0x12, 0x19, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
- 0x48, 0x00, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x88, 0x01, 0x01, 0x42, 0x08, 0x0a, 0x06,
- 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xa5, 0x04, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74,
+ 0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x26, 0x0a, 0x0c, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73,
+ 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0x80, 0x01,
+ 0x01, 0x52, 0x0b, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xaf,
+ 0x01, 0x0a, 0x19, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61,
+ 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x29, 0x0a, 0x10,
+ 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73,
+ 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x50, 0x72,
+ 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x12, 0x3d, 0x0a, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c,
+ 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65,
+ 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e,
+ 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x1e, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0x80, 0x01, 0x01, 0x48, 0x00, 0x52, 0x05, 0x74, 0x6f,
+ 0x6b, 0x65, 0x6e, 0x88, 0x01, 0x01, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e,
+ 0x22, 0xa5, 0x04, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72,
+ 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x12, 0x33, 0x0a, 0x15,
+ 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x61, 0x70, 0x70,
+ 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c,
+ 0x73, 0x12, 0x6d, 0x0a, 0x14, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x5f, 0x70,
+ 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x3a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74,
0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52,
- 0x65, 0x71, 0x12, 0x33, 0x0a, 0x15, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28,
- 0x09, 0x52, 0x14, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72,
- 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x12, 0x6d, 0x0a, 0x14, 0x68, 0x61, 0x6e, 0x64, 0x73,
- 0x68, 0x61, 0x6b, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18,
- 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70,
- 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64,
- 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61,
- 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72,
- 0x79, 0x52, 0x13, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61,
- 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74,
- 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65,
- 0x73, 0x12, 0x39, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f,
- 0x69, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63,
- 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0d, 0x6c,
- 0x6f, 0x63, 0x61, 0x6c, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x0f,
- 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18,
- 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70,
- 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0e, 0x72, 0x65, 0x6d, 0x6f, 0x74,
- 0x65, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x40, 0x0a, 0x0c, 0x72, 0x70, 0x63,
- 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x1d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x52, 0x70, 0x63, 0x50, 0x72,
- 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0b,
- 0x72, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6d,
- 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x07, 0x20,
+ 0x65, 0x71, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61,
+ 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x13, 0x68, 0x61, 0x6e,
+ 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73,
+ 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x0e, 0x6c,
+ 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x04, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x45,
+ 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x45, 0x6e,
+ 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x0f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65,
+ 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f,
+ 0x69, 0x6e, 0x74, 0x52, 0x0e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x45, 0x6e, 0x64, 0x70, 0x6f,
+ 0x69, 0x6e, 0x74, 0x12, 0x40, 0x0a, 0x0c, 0x72, 0x70, 0x63, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69,
+ 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x72, 0x70, 0x63,
+ 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x52, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c,
+ 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0b, 0x72, 0x70, 0x63, 0x56, 0x65, 0x72,
+ 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61,
+ 0x6d, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d,
+ 0x61, 0x78, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x1a, 0x6b, 0x0a, 0x18, 0x48,
+ 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65,
+ 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x39, 0x0a, 0x05, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e,
+ 0x67, 0x63, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68,
+ 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52, 0x05, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x62, 0x0a, 0x17, 0x4e, 0x65, 0x78, 0x74,
+ 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
+ 0x52, 0x65, 0x71, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x2c,
+ 0x0a, 0x12, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63,
+ 0x79, 0x5f, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x10, 0x6e, 0x65, 0x74, 0x77,
+ 0x6f, 0x72, 0x6b, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4d, 0x73, 0x22, 0xe5, 0x01, 0x0a,
+ 0x0d, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x71, 0x12, 0x46,
+ 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e,
+ 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73,
+ 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e,
+ 0x74, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x46, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72,
+ 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67,
+ 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x65, 0x72,
+ 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x48,
+ 0x00, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x37,
+ 0x0a, 0x04, 0x6e, 0x65, 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67,
+ 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x4e, 0x65, 0x78, 0x74, 0x48, 0x61, 0x6e, 0x64,
+ 0x73, 0x68, 0x61, 0x6b, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x48,
+ 0x00, 0x52, 0x04, 0x6e, 0x65, 0x78, 0x74, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, 0x71, 0x5f, 0x6f,
+ 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x9a, 0x03, 0x0a, 0x10, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61,
+ 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x31, 0x0a, 0x14, 0x61, 0x70, 0x70,
+ 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f,
+ 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x27, 0x0a, 0x0f,
+ 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x50, 0x72, 0x6f,
+ 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x19, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x64, 0x61, 0x74,
+ 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6b, 0x65, 0x79, 0x44, 0x61, 0x74, 0x61,
+ 0x12, 0x37, 0x0a, 0x0d, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74,
+ 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
+ 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0c, 0x70, 0x65, 0x65,
+ 0x72, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x39, 0x0a, 0x0e, 0x6c, 0x6f, 0x63,
+ 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65,
+ 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e,
+ 0x74, 0x69, 0x74, 0x79, 0x12, 0x2a, 0x0a, 0x11, 0x6b, 0x65, 0x65, 0x70, 0x5f, 0x63, 0x68, 0x61,
+ 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x0f, 0x6b, 0x65, 0x65, 0x70, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x4f, 0x70, 0x65, 0x6e,
+ 0x12, 0x49, 0x0a, 0x11, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x72, 0x70, 0x63, 0x5f, 0x76, 0x65, 0x72,
+ 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x72,
+ 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x52, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63,
+ 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0f, 0x70, 0x65, 0x65, 0x72,
+ 0x52, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6d,
+ 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20,
0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x53, 0x69, 0x7a,
- 0x65, 0x1a, 0x6b, 0x0a, 0x18, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61,
- 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
- 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
- 0x39, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23,
- 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72,
- 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74,
- 0x65, 0x72, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x62,
- 0x0a, 0x17, 0x4e, 0x65, 0x78, 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x4d,
- 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f,
- 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42,
- 0x79, 0x74, 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f,
- 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d,
- 0x52, 0x10, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79,
- 0x4d, 0x73, 0x22, 0xe5, 0x01, 0x0a, 0x0d, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65,
- 0x72, 0x52, 0x65, 0x71, 0x12, 0x46, 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73,
- 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70,
- 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e,
- 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52,
- 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x46, 0x0a, 0x0c,
- 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x74,
- 0x61, 0x72, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61,
- 0x6b, 0x65, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53,
- 0x74, 0x61, 0x72, 0x74, 0x12, 0x37, 0x0a, 0x04, 0x6e, 0x65, 0x78, 0x74, 0x18, 0x03, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x4e, 0x65,
- 0x78, 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61,
- 0x67, 0x65, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x04, 0x6e, 0x65, 0x78, 0x74, 0x42, 0x0b, 0x0a,
- 0x09, 0x72, 0x65, 0x71, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x9a, 0x03, 0x0a, 0x10, 0x48,
- 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12,
- 0x31, 0x0a, 0x14, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x61,
- 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63,
- 0x6f, 0x6c, 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x63,
- 0x6f, 0x72, 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x19, 0x0a, 0x08, 0x6b,
- 0x65, 0x79, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6b,
- 0x65, 0x79, 0x44, 0x61, 0x74, 0x61, 0x12, 0x37, 0x0a, 0x0d, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69,
- 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e,
- 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74,
- 0x79, 0x52, 0x0c, 0x70, 0x65, 0x65, 0x72, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12,
- 0x39, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74,
- 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
- 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63,
- 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x2a, 0x0a, 0x11, 0x6b, 0x65,
- 0x65, 0x70, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x18,
- 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x6b, 0x65, 0x65, 0x70, 0x43, 0x68, 0x61, 0x6e, 0x6e,
- 0x65, 0x6c, 0x4f, 0x70, 0x65, 0x6e, 0x12, 0x49, 0x0a, 0x11, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x72,
- 0x70, 0x63, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x52, 0x70, 0x63,
- 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73,
- 0x52, 0x0f, 0x70, 0x65, 0x65, 0x72, 0x52, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
- 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73,
- 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x46, 0x72,
- 0x61, 0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x40, 0x0a, 0x10, 0x48, 0x61, 0x6e, 0x64, 0x73,
- 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63,
- 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12,
- 0x18, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0xbe, 0x01, 0x0a, 0x0e, 0x48, 0x61,
- 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1d, 0x0a, 0x0a,
- 0x6f, 0x75, 0x74, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c,
- 0x52, 0x09, 0x6f, 0x75, 0x74, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x62,
- 0x79, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x0d, 0x52, 0x0d, 0x62, 0x79, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d,
- 0x65, 0x64, 0x12, 0x32, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x03, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61,
- 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06,
- 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
- 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63,
- 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74,
- 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2a, 0x4a, 0x0a, 0x11, 0x48, 0x61,
- 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12,
- 0x22, 0x0a, 0x1e, 0x48, 0x41, 0x4e, 0x44, 0x53, 0x48, 0x41, 0x4b, 0x45, 0x5f, 0x50, 0x52, 0x4f,
- 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45,
- 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x54, 0x4c, 0x53, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04,
- 0x41, 0x4c, 0x54, 0x53, 0x10, 0x02, 0x2a, 0x45, 0x0a, 0x0f, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72,
- 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x20, 0x0a, 0x1c, 0x4e, 0x45, 0x54,
- 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x55, 0x4e,
- 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x54,
- 0x43, 0x50, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x55, 0x44, 0x50, 0x10, 0x02, 0x32, 0x5b, 0x0a,
- 0x11, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69,
- 0x63, 0x65, 0x12, 0x46, 0x0a, 0x0b, 0x44, 0x6f, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b,
- 0x65, 0x12, 0x17, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e,
- 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x71, 0x1a, 0x18, 0x2e, 0x67, 0x72, 0x70,
- 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72,
- 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x6b, 0x0a, 0x15, 0x69, 0x6f,
- 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x61, 0x6c, 0x74, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72,
- 0x6e, 0x61, 0x6c, 0x42, 0x0f, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x50,
- 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67,
- 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x63,
- 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2f, 0x61, 0x6c, 0x74, 0x73, 0x2f,
- 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67,
- 0x72, 0x70, 0x63, 0x5f, 0x67, 0x63, 0x70, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x65, 0x22, 0x40, 0x0a, 0x10, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53,
+ 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x74,
+ 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61,
+ 0x69, 0x6c, 0x73, 0x22, 0xbe, 0x01, 0x0a, 0x0e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b,
+ 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1d, 0x0a, 0x0a, 0x6f, 0x75, 0x74, 0x5f, 0x66, 0x72,
+ 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x6f, 0x75, 0x74, 0x46,
+ 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x63,
+ 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0d, 0x62,
+ 0x79, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x12, 0x32, 0x0a, 0x06,
+ 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67,
+ 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b,
+ 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74,
+ 0x12, 0x32, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64,
+ 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74,
+ 0x61, 0x74, 0x75, 0x73, 0x2a, 0x4a, 0x0a, 0x11, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b,
+ 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x22, 0x0a, 0x1e, 0x48, 0x41, 0x4e,
+ 0x44, 0x53, 0x48, 0x41, 0x4b, 0x45, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f,
+ 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a,
+ 0x03, 0x54, 0x4c, 0x53, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x41, 0x4c, 0x54, 0x53, 0x10, 0x02,
+ 0x2a, 0x45, 0x0a, 0x0f, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f,
+ 0x63, 0x6f, 0x6c, 0x12, 0x20, 0x0a, 0x1c, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x50,
+ 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46,
+ 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x54, 0x43, 0x50, 0x10, 0x01, 0x12, 0x07,
+ 0x0a, 0x03, 0x55, 0x44, 0x50, 0x10, 0x02, 0x32, 0x5b, 0x0a, 0x11, 0x48, 0x61, 0x6e, 0x64, 0x73,
+ 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x46, 0x0a, 0x0b,
+ 0x44, 0x6f, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x12, 0x17, 0x2e, 0x67, 0x72,
+ 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65,
+ 0x72, 0x52, 0x65, 0x71, 0x1a, 0x18, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e,
+ 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00,
+ 0x28, 0x01, 0x30, 0x01, 0x42, 0x6b, 0x0a, 0x15, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e,
+ 0x61, 0x6c, 0x74, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x42, 0x0f, 0x48,
+ 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
+ 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e,
+ 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74,
+ 0x69, 0x61, 0x6c, 0x73, 0x2f, 0x61, 0x6c, 0x74, 0x73, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e,
+ 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x67, 0x63,
+ 0x70, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@@ -1257,7 +1258,7 @@ func file_grpc_gcp_handshaker_proto_rawDescGZIP() []byte {
var file_grpc_gcp_handshaker_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
var file_grpc_gcp_handshaker_proto_msgTypes = make([]protoimpl.MessageInfo, 12)
-var file_grpc_gcp_handshaker_proto_goTypes = []interface{}{
+var file_grpc_gcp_handshaker_proto_goTypes = []any{
(HandshakeProtocol)(0), // 0: grpc.gcp.HandshakeProtocol
(NetworkProtocol)(0), // 1: grpc.gcp.NetworkProtocol
(*Endpoint)(nil), // 2: grpc.gcp.Endpoint
@@ -1313,7 +1314,7 @@ func file_grpc_gcp_handshaker_proto_init() {
}
file_grpc_gcp_transport_security_common_proto_init()
if !protoimpl.UnsafeEnabled {
- file_grpc_gcp_handshaker_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_gcp_handshaker_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*Endpoint); i {
case 0:
return &v.state
@@ -1325,7 +1326,7 @@ func file_grpc_gcp_handshaker_proto_init() {
return nil
}
}
- file_grpc_gcp_handshaker_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_gcp_handshaker_proto_msgTypes[1].Exporter = func(v any, i int) any {
switch v := v.(*Identity); i {
case 0:
return &v.state
@@ -1337,7 +1338,7 @@ func file_grpc_gcp_handshaker_proto_init() {
return nil
}
}
- file_grpc_gcp_handshaker_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_gcp_handshaker_proto_msgTypes[2].Exporter = func(v any, i int) any {
switch v := v.(*StartClientHandshakeReq); i {
case 0:
return &v.state
@@ -1349,7 +1350,7 @@ func file_grpc_gcp_handshaker_proto_init() {
return nil
}
}
- file_grpc_gcp_handshaker_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_gcp_handshaker_proto_msgTypes[3].Exporter = func(v any, i int) any {
switch v := v.(*ServerHandshakeParameters); i {
case 0:
return &v.state
@@ -1361,7 +1362,7 @@ func file_grpc_gcp_handshaker_proto_init() {
return nil
}
}
- file_grpc_gcp_handshaker_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_gcp_handshaker_proto_msgTypes[4].Exporter = func(v any, i int) any {
switch v := v.(*StartServerHandshakeReq); i {
case 0:
return &v.state
@@ -1373,7 +1374,7 @@ func file_grpc_gcp_handshaker_proto_init() {
return nil
}
}
- file_grpc_gcp_handshaker_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_gcp_handshaker_proto_msgTypes[5].Exporter = func(v any, i int) any {
switch v := v.(*NextHandshakeMessageReq); i {
case 0:
return &v.state
@@ -1385,7 +1386,7 @@ func file_grpc_gcp_handshaker_proto_init() {
return nil
}
}
- file_grpc_gcp_handshaker_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_gcp_handshaker_proto_msgTypes[6].Exporter = func(v any, i int) any {
switch v := v.(*HandshakerReq); i {
case 0:
return &v.state
@@ -1397,7 +1398,7 @@ func file_grpc_gcp_handshaker_proto_init() {
return nil
}
}
- file_grpc_gcp_handshaker_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_gcp_handshaker_proto_msgTypes[7].Exporter = func(v any, i int) any {
switch v := v.(*HandshakerResult); i {
case 0:
return &v.state
@@ -1409,7 +1410,7 @@ func file_grpc_gcp_handshaker_proto_init() {
return nil
}
}
- file_grpc_gcp_handshaker_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_gcp_handshaker_proto_msgTypes[8].Exporter = func(v any, i int) any {
switch v := v.(*HandshakerStatus); i {
case 0:
return &v.state
@@ -1421,7 +1422,7 @@ func file_grpc_gcp_handshaker_proto_init() {
return nil
}
}
- file_grpc_gcp_handshaker_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_gcp_handshaker_proto_msgTypes[9].Exporter = func(v any, i int) any {
switch v := v.(*HandshakerResp); i {
case 0:
return &v.state
@@ -1434,12 +1435,12 @@ func file_grpc_gcp_handshaker_proto_init() {
}
}
}
- file_grpc_gcp_handshaker_proto_msgTypes[1].OneofWrappers = []interface{}{
+ file_grpc_gcp_handshaker_proto_msgTypes[1].OneofWrappers = []any{
(*Identity_ServiceAccount)(nil),
(*Identity_Hostname)(nil),
}
- file_grpc_gcp_handshaker_proto_msgTypes[3].OneofWrappers = []interface{}{}
- file_grpc_gcp_handshaker_proto_msgTypes[6].OneofWrappers = []interface{}{
+ file_grpc_gcp_handshaker_proto_msgTypes[3].OneofWrappers = []any{}
+ file_grpc_gcp_handshaker_proto_msgTypes[6].OneofWrappers = []any{
(*HandshakerReq_ClientStart)(nil),
(*HandshakerReq_ServerStart)(nil),
(*HandshakerReq_Next)(nil),
diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go
index 358074b64946a..34443b1d2dcfd 100644
--- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go
+++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go
@@ -17,8 +17,8 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
-// - protoc-gen-go-grpc v1.4.0
-// - protoc v4.25.2
+// - protoc-gen-go-grpc v1.5.1
+// - protoc v5.27.1
// source: grpc/gcp/handshaker.proto
package grpc_gcp
@@ -75,7 +75,7 @@ type HandshakerService_DoHandshakeClient = grpc.BidiStreamingClient[HandshakerRe
// HandshakerServiceServer is the server API for HandshakerService service.
// All implementations must embed UnimplementedHandshakerServiceServer
-// for forward compatibility
+// for forward compatibility.
type HandshakerServiceServer interface {
// Handshaker service accepts a stream of handshaker request, returning a
// stream of handshaker response. Client is expected to send exactly one
@@ -87,14 +87,18 @@ type HandshakerServiceServer interface {
mustEmbedUnimplementedHandshakerServiceServer()
}
-// UnimplementedHandshakerServiceServer must be embedded to have forward compatible implementations.
-type UnimplementedHandshakerServiceServer struct {
-}
+// UnimplementedHandshakerServiceServer must be embedded to have
+// forward compatible implementations.
+//
+// NOTE: this should be embedded by value instead of pointer to avoid a nil
+// pointer dereference when methods are called.
+type UnimplementedHandshakerServiceServer struct{}
func (UnimplementedHandshakerServiceServer) DoHandshake(grpc.BidiStreamingServer[HandshakerReq, HandshakerResp]) error {
return status.Errorf(codes.Unimplemented, "method DoHandshake not implemented")
}
func (UnimplementedHandshakerServiceServer) mustEmbedUnimplementedHandshakerServiceServer() {}
+func (UnimplementedHandshakerServiceServer) testEmbeddedByValue() {}
// UnsafeHandshakerServiceServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to HandshakerServiceServer will
@@ -104,6 +108,13 @@ type UnsafeHandshakerServiceServer interface {
}
func RegisterHandshakerServiceServer(s grpc.ServiceRegistrar, srv HandshakerServiceServer) {
+ // If the following call panics, it indicates UnimplementedHandshakerServiceServer was
+ // embedded by pointer and is nil. This will cause panics if an
+ // unimplemented method is ever invoked, so we test this at initialization
+ // time to prevent it from happening at runtime later due to I/O.
+ if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
+ t.testEmbeddedByValue()
+ }
s.RegisterService(&HandshakerService_ServiceDesc, srv)
}
diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go
index 18cc9cfbd5993..6956c14f6a987 100644
--- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go
+++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go
@@ -17,8 +17,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.34.1
-// protoc v4.25.2
+// protoc-gen-go v1.34.2
+// protoc v5.27.1
// source: grpc/gcp/transport_security_common.proto
package grpc_gcp
@@ -253,7 +253,7 @@ func file_grpc_gcp_transport_security_common_proto_rawDescGZIP() []byte {
var file_grpc_gcp_transport_security_common_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_grpc_gcp_transport_security_common_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
-var file_grpc_gcp_transport_security_common_proto_goTypes = []interface{}{
+var file_grpc_gcp_transport_security_common_proto_goTypes = []any{
(SecurityLevel)(0), // 0: grpc.gcp.SecurityLevel
(*RpcProtocolVersions)(nil), // 1: grpc.gcp.RpcProtocolVersions
(*RpcProtocolVersions_Version)(nil), // 2: grpc.gcp.RpcProtocolVersions.Version
@@ -274,7 +274,7 @@ func file_grpc_gcp_transport_security_common_proto_init() {
return
}
if !protoimpl.UnsafeEnabled {
- file_grpc_gcp_transport_security_common_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_gcp_transport_security_common_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*RpcProtocolVersions); i {
case 0:
return &v.state
@@ -286,7 +286,7 @@ func file_grpc_gcp_transport_security_common_proto_init() {
return nil
}
}
- file_grpc_gcp_transport_security_common_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_gcp_transport_security_common_proto_msgTypes[1].Exporter = func(v any, i int) any {
switch v := v.(*RpcProtocolVersions_Version); i {
case 0:
return &v.state
diff --git a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go
index 82bee1443bfee..4c805c64462c9 100644
--- a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go
+++ b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go
@@ -40,7 +40,7 @@ func NewCredentials() credentials.TransportCredentials {
// NoSecurity.
type insecureTC struct{}
-func (insecureTC) ClientHandshake(ctx context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) {
+func (insecureTC) ClientHandshake(_ context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) {
return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil
}
diff --git a/vendor/google.golang.org/grpc/credentials/oauth/oauth.go b/vendor/google.golang.org/grpc/credentials/oauth/oauth.go
index d475cbc0894c0..328b838ed1f6a 100644
--- a/vendor/google.golang.org/grpc/credentials/oauth/oauth.go
+++ b/vendor/google.golang.org/grpc/credentials/oauth/oauth.go
@@ -38,7 +38,7 @@ type TokenSource struct {
}
// GetRequestMetadata gets the request metadata as a map from a TokenSource.
-func (ts TokenSource) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
+func (ts TokenSource) GetRequestMetadata(ctx context.Context, _ ...string) (map[string]string, error) {
token, err := ts.Token()
if err != nil {
return nil, err
@@ -127,7 +127,7 @@ func NewOauthAccess(token *oauth2.Token) credentials.PerRPCCredentials {
return oauthAccess{token: *token}
}
-func (oa oauthAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
+func (oa oauthAccess) GetRequestMetadata(ctx context.Context, _ ...string) (map[string]string, error) {
ri, _ := credentials.RequestInfoFromContext(ctx)
if err := credentials.CheckSecurityLevel(ri.AuthInfo, credentials.PrivacyAndIntegrity); err != nil {
return nil, fmt.Errorf("unable to transfer oauthAccess PerRPCCredentials: %v", err)
@@ -156,7 +156,7 @@ type serviceAccount struct {
t *oauth2.Token
}
-func (s *serviceAccount) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
+func (s *serviceAccount) GetRequestMetadata(ctx context.Context, _ ...string) (map[string]string, error) {
s.mu.Lock()
defer s.mu.Unlock()
if !s.t.Valid() {
diff --git a/vendor/google.golang.org/grpc/credentials/tls/certprovider/store.go b/vendor/google.golang.org/grpc/credentials/tls/certprovider/store.go
index 87528b4e23e74..a4b99e3d4a2e9 100644
--- a/vendor/google.golang.org/grpc/credentials/tls/certprovider/store.go
+++ b/vendor/google.golang.org/grpc/credentials/tls/certprovider/store.go
@@ -58,7 +58,7 @@ type wrappedProvider struct {
// closedProvider always returns errProviderClosed error.
type closedProvider struct{}
-func (c closedProvider) KeyMaterial(ctx context.Context) (*KeyMaterial, error) {
+func (c closedProvider) KeyMaterial(context.Context) (*KeyMaterial, error) {
return nil, errProviderClosed
}
diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go
index f5453d48a53f3..2b285beee376b 100644
--- a/vendor/google.golang.org/grpc/dialoptions.go
+++ b/vendor/google.golang.org/grpc/dialoptions.go
@@ -33,6 +33,7 @@ import (
"google.golang.org/grpc/internal/binarylog"
"google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/keepalive"
+ "google.golang.org/grpc/mem"
"google.golang.org/grpc/resolver"
"google.golang.org/grpc/stats"
)
@@ -60,7 +61,7 @@ func init() {
internal.WithBinaryLogger = withBinaryLogger
internal.JoinDialOptions = newJoinDialOption
internal.DisableGlobalDialOptions = newDisableGlobalDialOptions
- internal.WithRecvBufferPool = withRecvBufferPool
+ internal.WithBufferPool = withBufferPool
}
// dialOptions configure a Dial call. dialOptions are set by the DialOption
@@ -92,7 +93,6 @@ type dialOptions struct {
defaultServiceConfigRawJSON *string
resolvers []resolver.Builder
idleTimeout time.Duration
- recvBufferPool SharedBufferPool
defaultScheme string
maxCallAttempts int
}
@@ -518,6 +518,8 @@ func WithUserAgent(s string) DialOption {
// WithKeepaliveParams returns a DialOption that specifies keepalive parameters
// for the client transport.
+//
+// Keepalive is disabled by default.
func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption {
if kp.Time < internal.KeepaliveMinPingTime {
logger.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime)
@@ -677,11 +679,11 @@ func defaultDialOptions() dialOptions {
WriteBufferSize: defaultWriteBufSize,
UseProxy: true,
UserAgent: grpcUA,
+ BufferPool: mem.DefaultBufferPool(),
},
bs: internalbackoff.DefaultExponential,
healthCheckFunc: internal.HealthCheckFunc,
idleTimeout: 30 * time.Minute,
- recvBufferPool: nopBufferPool{},
defaultScheme: "dns",
maxCallAttempts: defaultMaxCallAttempts,
}
@@ -758,25 +760,8 @@ func WithMaxCallAttempts(n int) DialOption {
})
}
-// WithRecvBufferPool returns a DialOption that configures the ClientConn
-// to use the provided shared buffer pool for parsing incoming messages. Depending
-// on the application's workload, this could result in reduced memory allocation.
-//
-// If you are unsure about how to implement a memory pool but want to utilize one,
-// begin with grpc.NewSharedBufferPool.
-//
-// Note: The shared buffer pool feature will not be active if any of the following
-// options are used: WithStatsHandler, EnableTracing, or binary logging. In such
-// cases, the shared buffer pool will be ignored.
-//
-// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in
-// v1.60.0 or later.
-func WithRecvBufferPool(bufferPool SharedBufferPool) DialOption {
- return withRecvBufferPool(bufferPool)
-}
-
-func withRecvBufferPool(bufferPool SharedBufferPool) DialOption {
+func withBufferPool(bufferPool mem.BufferPool) DialOption {
return newFuncDialOption(func(o *dialOptions) {
- o.recvBufferPool = bufferPool
+ o.copts.BufferPool = bufferPool
})
}
diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go
index 0022859ad7465..e7b532b6f806f 100644
--- a/vendor/google.golang.org/grpc/doc.go
+++ b/vendor/google.golang.org/grpc/doc.go
@@ -16,7 +16,7 @@
*
*/
-//go:generate ./regenerate.sh
+//go:generate ./scripts/regenerate.sh
/*
Package grpc implements an RPC system called gRPC.
diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go
index 5ebf88d7147f2..11d0ae142c429 100644
--- a/vendor/google.golang.org/grpc/encoding/encoding.go
+++ b/vendor/google.golang.org/grpc/encoding/encoding.go
@@ -94,7 +94,7 @@ type Codec interface {
Name() string
}
-var registeredCodecs = make(map[string]Codec)
+var registeredCodecs = make(map[string]any)
// RegisterCodec registers the provided Codec for use with all gRPC clients and
// servers.
@@ -126,5 +126,6 @@ func RegisterCodec(codec Codec) {
//
// The content-subtype is expected to be lowercase.
func GetCodec(contentSubtype string) Codec {
- return registeredCodecs[contentSubtype]
+ c, _ := registeredCodecs[contentSubtype].(Codec)
+ return c
}
diff --git a/vendor/google.golang.org/grpc/encoding/encoding_v2.go b/vendor/google.golang.org/grpc/encoding/encoding_v2.go
new file mode 100644
index 0000000000000..074c5e234a7b3
--- /dev/null
+++ b/vendor/google.golang.org/grpc/encoding/encoding_v2.go
@@ -0,0 +1,81 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package encoding
+
+import (
+ "strings"
+
+ "google.golang.org/grpc/mem"
+)
+
+// CodecV2 defines the interface gRPC uses to encode and decode messages. Note
+// that implementations of this interface must be thread safe; a CodecV2's
+// methods can be called from concurrent goroutines.
+type CodecV2 interface {
+ // Marshal returns the wire format of v. The buffers in the returned
+ // [mem.BufferSlice] must have at least one reference each, which will be freed
+ // by gRPC when they are no longer needed.
+ Marshal(v any) (out mem.BufferSlice, err error)
+ // Unmarshal parses the wire format into v. Note that data will be freed as soon
+ // as this function returns. If the codec wishes to guarantee access to the data
+ // after this function, it must take its own reference that it frees when it is
+ // no longer needed.
+ Unmarshal(data mem.BufferSlice, v any) error
+ // Name returns the name of the Codec implementation. The returned string
+ // will be used as part of content type in transmission. The result must be
+ // static; the result cannot change between calls.
+ Name() string
+}
+
+// RegisterCodecV2 registers the provided CodecV2 for use with all gRPC clients and
+// servers.
+//
+// The CodecV2 will be stored and looked up by result of its Name() method, which
+// should match the content-subtype of the encoding handled by the CodecV2. This
+// is case-insensitive, and is stored and looked up as lowercase. If the
+// result of calling Name() is an empty string, RegisterCodecV2 will panic. See
+// Content-Type on
+// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
+// more details.
+//
+// If both a Codec and CodecV2 are registered with the same name, the CodecV2
+// will be used.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. If multiple Codecs are
+// registered with the same name, the one registered last will take effect.
+func RegisterCodecV2(codec CodecV2) {
+ if codec == nil {
+ panic("cannot register a nil CodecV2")
+ }
+ if codec.Name() == "" {
+ panic("cannot register CodecV2 with empty string result for Name()")
+ }
+ contentSubtype := strings.ToLower(codec.Name())
+ registeredCodecs[contentSubtype] = codec
+}
+
+// GetCodecV2 gets a registered CodecV2 by content-subtype, or nil if no CodecV2 is
+// registered for the content-subtype.
+//
+// The content-subtype is expected to be lowercase.
+func GetCodecV2(contentSubtype string) CodecV2 {
+ c, _ := registeredCodecs[contentSubtype].(CodecV2)
+ return c
+}
diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go
index 66d5cdf03ec58..ceec319dd2fb4 100644
--- a/vendor/google.golang.org/grpc/encoding/proto/proto.go
+++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2018 gRPC authors.
+ * Copyright 2024 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -24,6 +24,7 @@ import (
"fmt"
"google.golang.org/grpc/encoding"
+ "google.golang.org/grpc/mem"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/protoadapt"
)
@@ -32,28 +33,51 @@ import (
const Name = "proto"
func init() {
- encoding.RegisterCodec(codec{})
+ encoding.RegisterCodecV2(&codecV2{})
}
-// codec is a Codec implementation with protobuf. It is the default codec for gRPC.
-type codec struct{}
+// codec is a CodecV2 implementation with protobuf. It is the default codec for
+// gRPC.
+type codecV2 struct{}
-func (codec) Marshal(v any) ([]byte, error) {
+func (c *codecV2) Marshal(v any) (data mem.BufferSlice, err error) {
vv := messageV2Of(v)
if vv == nil {
- return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v)
+ return nil, fmt.Errorf("proto: failed to marshal, message is %T, want proto.Message", v)
}
- return proto.Marshal(vv)
+ size := proto.Size(vv)
+ if mem.IsBelowBufferPoolingThreshold(size) {
+ buf, err := proto.Marshal(vv)
+ if err != nil {
+ return nil, err
+ }
+ data = append(data, mem.SliceBuffer(buf))
+ } else {
+ pool := mem.DefaultBufferPool()
+ buf := pool.Get(size)
+ if _, err := (proto.MarshalOptions{}).MarshalAppend((*buf)[:0], vv); err != nil {
+ pool.Put(buf)
+ return nil, err
+ }
+ data = append(data, mem.NewBuffer(buf, pool))
+ }
+
+ return data, nil
}
-func (codec) Unmarshal(data []byte, v any) error {
+func (c *codecV2) Unmarshal(data mem.BufferSlice, v any) (err error) {
vv := messageV2Of(v)
if vv == nil {
return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v)
}
- return proto.Unmarshal(data, vv)
+ buf := data.MaterializeToBuffer(mem.DefaultBufferPool())
+ defer buf.Free()
+ // TODO: Upgrade proto.Unmarshal to support mem.BufferSlice. Right now, it's not
+ // really possible without a major overhaul of the proto package, but the
+ // vtprotobuf library may be able to support this.
+ return proto.Unmarshal(buf.ReadOnlyData(), vv)
}
func messageV2Of(v any) proto.Message {
@@ -67,6 +91,6 @@ func messageV2Of(v any) proto.Message {
return nil
}
-func (codec) Name() string {
+func (c *codecV2) Name() string {
return Name
}
diff --git a/vendor/google.golang.org/grpc/experimental/experimental.go b/vendor/google.golang.org/grpc/experimental/experimental.go
deleted file mode 100644
index de7f13a2210ef..0000000000000
--- a/vendor/google.golang.org/grpc/experimental/experimental.go
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- *
- * Copyright 2023 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package experimental is a collection of experimental features that might
-// have some rough edges to them. Housing experimental features in this package
-// results in a user accessing these APIs as `experimental.Foo`, thereby making
-// it explicit that the feature is experimental and using them in production
-// code is at their own risk.
-//
-// All APIs in this package are experimental.
-package experimental
-
-import (
- "google.golang.org/grpc"
- "google.golang.org/grpc/internal"
-)
-
-// WithRecvBufferPool returns a grpc.DialOption that configures the use of
-// bufferPool for parsing incoming messages on a grpc.ClientConn. Depending on
-// the application's workload, this could result in reduced memory allocation.
-//
-// If you are unsure about how to implement a memory pool but want to utilize
-// one, begin with grpc.NewSharedBufferPool.
-//
-// Note: The shared buffer pool feature will not be active if any of the
-// following options are used: WithStatsHandler, EnableTracing, or binary
-// logging. In such cases, the shared buffer pool will be ignored.
-//
-// Note: It is not recommended to use the shared buffer pool when compression is
-// enabled.
-func WithRecvBufferPool(bufferPool grpc.SharedBufferPool) grpc.DialOption {
- return internal.WithRecvBufferPool.(func(grpc.SharedBufferPool) grpc.DialOption)(bufferPool)
-}
-
-// RecvBufferPool returns a grpc.ServerOption that configures the server to use
-// the provided shared buffer pool for parsing incoming messages. Depending on
-// the application's workload, this could result in reduced memory allocation.
-//
-// If you are unsure about how to implement a memory pool but want to utilize
-// one, begin with grpc.NewSharedBufferPool.
-//
-// Note: The shared buffer pool feature will not be active if any of the
-// following options are used: StatsHandler, EnableTracing, or binary logging.
-// In such cases, the shared buffer pool will be ignored.
-//
-// Note: It is not recommended to use the shared buffer pool when compression is
-// enabled.
-func RecvBufferPool(bufferPool grpc.SharedBufferPool) grpc.ServerOption {
- return internal.RecvBufferPool.(func(grpc.SharedBufferPool) grpc.ServerOption)(bufferPool)
-}
diff --git a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go
new file mode 100644
index 0000000000000..1d827dd5d9d41
--- /dev/null
+++ b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go
@@ -0,0 +1,269 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package stats
+
+import (
+ "maps"
+
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/internal"
+)
+
+func init() {
+ internal.SnapshotMetricRegistryForTesting = snapshotMetricsRegistryForTesting
+}
+
+var logger = grpclog.Component("metrics-registry")
+
+// DefaultMetrics are the default metrics registered through global metrics
+// registry. This is written to at initialization time only, and is read only
+// after initialization.
+var DefaultMetrics = NewMetrics()
+
+// MetricDescriptor is the data for a registered metric.
+type MetricDescriptor struct {
+ // The name of this metric. This name must be unique across the whole binary
+ // (including any per call metrics). See
+ // https://github.com/grpc/proposal/blob/master/A79-non-per-call-metrics-architecture.md#metric-instrument-naming-conventions
+ // for metric naming conventions.
+ Name Metric
+ // The description of this metric.
+ Description string
+ // The unit (e.g. entries, seconds) of this metric.
+ Unit string
+ // The required label keys for this metric. These are intended to
+ // metrics emitted from a stats handler.
+ Labels []string
+ // The optional label keys for this metric. These are intended to attached
+ // to metrics emitted from a stats handler if configured.
+ OptionalLabels []string
+ // Whether this metric is on by default.
+ Default bool
+ // The type of metric. This is set by the metric registry, and not intended
+ // to be set by a component registering a metric.
+ Type MetricType
+ // Bounds are the bounds of this metric. This only applies to histogram
+ // metrics. If unset or set with length 0, stats handlers will fall back to
+ // default bounds.
+ Bounds []float64
+}
+
+// MetricType is the type of metric.
+type MetricType int
+
+// Type of metric supported by this instrument registry.
+const (
+ MetricTypeIntCount MetricType = iota
+ MetricTypeFloatCount
+ MetricTypeIntHisto
+ MetricTypeFloatHisto
+ MetricTypeIntGauge
+)
+
+// Int64CountHandle is a typed handle for a int count metric. This handle
+// is passed at the recording point in order to know which metric to record
+// on.
+type Int64CountHandle MetricDescriptor
+
+// Descriptor returns the int64 count handle typecast to a pointer to a
+// MetricDescriptor.
+func (h *Int64CountHandle) Descriptor() *MetricDescriptor {
+ return (*MetricDescriptor)(h)
+}
+
+// Record records the int64 count value on the metrics recorder provided.
+func (h *Int64CountHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) {
+ recorder.RecordInt64Count(h, incr, labels...)
+}
+
+// Float64CountHandle is a typed handle for a float count metric. This handle is
+// passed at the recording point in order to know which metric to record on.
+type Float64CountHandle MetricDescriptor
+
+// Descriptor returns the float64 count handle typecast to a pointer to a
+// MetricDescriptor.
+func (h *Float64CountHandle) Descriptor() *MetricDescriptor {
+ return (*MetricDescriptor)(h)
+}
+
+// Record records the float64 count value on the metrics recorder provided.
+func (h *Float64CountHandle) Record(recorder MetricsRecorder, incr float64, labels ...string) {
+ recorder.RecordFloat64Count(h, incr, labels...)
+}
+
+// Int64HistoHandle is a typed handle for an int histogram metric. This handle
+// is passed at the recording point in order to know which metric to record on.
+type Int64HistoHandle MetricDescriptor
+
+// Descriptor returns the int64 histo handle typecast to a pointer to a
+// MetricDescriptor.
+func (h *Int64HistoHandle) Descriptor() *MetricDescriptor {
+ return (*MetricDescriptor)(h)
+}
+
+// Record records the int64 histo value on the metrics recorder provided.
+func (h *Int64HistoHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) {
+ recorder.RecordInt64Histo(h, incr, labels...)
+}
+
+// Float64HistoHandle is a typed handle for a float histogram metric. This
+// handle is passed at the recording point in order to know which metric to
+// record on.
+type Float64HistoHandle MetricDescriptor
+
+// Descriptor returns the float64 histo handle typecast to a pointer to a
+// MetricDescriptor.
+func (h *Float64HistoHandle) Descriptor() *MetricDescriptor {
+ return (*MetricDescriptor)(h)
+}
+
+// Record records the float64 histo value on the metrics recorder provided.
+func (h *Float64HistoHandle) Record(recorder MetricsRecorder, incr float64, labels ...string) {
+ recorder.RecordFloat64Histo(h, incr, labels...)
+}
+
+// Int64GaugeHandle is a typed handle for an int gauge metric. This handle is
+// passed at the recording point in order to know which metric to record on.
+type Int64GaugeHandle MetricDescriptor
+
+// Descriptor returns the int64 gauge handle typecast to a pointer to a
+// MetricDescriptor.
+func (h *Int64GaugeHandle) Descriptor() *MetricDescriptor {
+ return (*MetricDescriptor)(h)
+}
+
+// Record records the int64 histo value on the metrics recorder provided.
+func (h *Int64GaugeHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) {
+ recorder.RecordInt64Gauge(h, incr, labels...)
+}
+
+// registeredMetrics are the registered metric descriptor names.
+var registeredMetrics = make(map[Metric]bool)
+
+// metricsRegistry contains all of the registered metrics.
+//
+// This is written to only at init time, and read only after that.
+var metricsRegistry = make(map[Metric]*MetricDescriptor)
+
+// DescriptorForMetric returns the MetricDescriptor from the global registry.
+//
+// Returns nil if MetricDescriptor not present.
+func DescriptorForMetric(metric Metric) *MetricDescriptor {
+ return metricsRegistry[metric]
+}
+
+func registerMetric(name Metric, def bool) {
+ if registeredMetrics[name] {
+ logger.Fatalf("metric %v already registered", name)
+ }
+ registeredMetrics[name] = true
+ if def {
+ DefaultMetrics = DefaultMetrics.Add(name)
+ }
+}
+
+// RegisterInt64Count registers the metric description onto the global registry.
+// It returns a typed handle to use to recording data.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. If multiple metrics are
+// registered with the same name, this function will panic.
+func RegisterInt64Count(descriptor MetricDescriptor) *Int64CountHandle {
+ registerMetric(descriptor.Name, descriptor.Default)
+ descriptor.Type = MetricTypeIntCount
+ descPtr := &descriptor
+ metricsRegistry[descriptor.Name] = descPtr
+ return (*Int64CountHandle)(descPtr)
+}
+
+// RegisterFloat64Count registers the metric description onto the global
+// registry. It returns a typed handle to use to recording data.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. If multiple metrics are
+// registered with the same name, this function will panic.
+func RegisterFloat64Count(descriptor MetricDescriptor) *Float64CountHandle {
+ registerMetric(descriptor.Name, descriptor.Default)
+ descriptor.Type = MetricTypeFloatCount
+ descPtr := &descriptor
+ metricsRegistry[descriptor.Name] = descPtr
+ return (*Float64CountHandle)(descPtr)
+}
+
+// RegisterInt64Histo registers the metric description onto the global registry.
+// It returns a typed handle to use to recording data.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. If multiple metrics are
+// registered with the same name, this function will panic.
+func RegisterInt64Histo(descriptor MetricDescriptor) *Int64HistoHandle {
+ registerMetric(descriptor.Name, descriptor.Default)
+ descriptor.Type = MetricTypeIntHisto
+ descPtr := &descriptor
+ metricsRegistry[descriptor.Name] = descPtr
+ return (*Int64HistoHandle)(descPtr)
+}
+
+// RegisterFloat64Histo registers the metric description onto the global
+// registry. It returns a typed handle to use to recording data.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. If multiple metrics are
+// registered with the same name, this function will panic.
+func RegisterFloat64Histo(descriptor MetricDescriptor) *Float64HistoHandle {
+ registerMetric(descriptor.Name, descriptor.Default)
+ descriptor.Type = MetricTypeFloatHisto
+ descPtr := &descriptor
+ metricsRegistry[descriptor.Name] = descPtr
+ return (*Float64HistoHandle)(descPtr)
+}
+
+// RegisterInt64Gauge registers the metric description onto the global registry.
+// It returns a typed handle to use to recording data.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. If multiple metrics are
+// registered with the same name, this function will panic.
+func RegisterInt64Gauge(descriptor MetricDescriptor) *Int64GaugeHandle {
+ registerMetric(descriptor.Name, descriptor.Default)
+ descriptor.Type = MetricTypeIntGauge
+ descPtr := &descriptor
+ metricsRegistry[descriptor.Name] = descPtr
+ return (*Int64GaugeHandle)(descPtr)
+}
+
+// snapshotMetricsRegistryForTesting snapshots the global data of the metrics
+// registry. Returns a cleanup function that sets the metrics registry to its
+// original state.
+func snapshotMetricsRegistryForTesting() func() {
+ oldDefaultMetrics := DefaultMetrics
+ oldRegisteredMetrics := registeredMetrics
+ oldMetricsRegistry := metricsRegistry
+
+ registeredMetrics = make(map[Metric]bool)
+ metricsRegistry = make(map[Metric]*MetricDescriptor)
+ maps.Copy(registeredMetrics, registeredMetrics)
+ maps.Copy(metricsRegistry, metricsRegistry)
+
+ return func() {
+ DefaultMetrics = oldDefaultMetrics
+ registeredMetrics = oldRegisteredMetrics
+ metricsRegistry = oldMetricsRegistry
+ }
+}
diff --git a/vendor/google.golang.org/grpc/experimental/stats/metrics.go b/vendor/google.golang.org/grpc/experimental/stats/metrics.go
new file mode 100644
index 0000000000000..3221f7a633a37
--- /dev/null
+++ b/vendor/google.golang.org/grpc/experimental/stats/metrics.go
@@ -0,0 +1,114 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package stats contains experimental metrics/stats API's.
+package stats
+
+import "maps"
+
+// MetricsRecorder records on metrics derived from metric registry.
+type MetricsRecorder interface {
+ // RecordInt64Count records the measurement alongside labels on the int
+ // count associated with the provided handle.
+ RecordInt64Count(handle *Int64CountHandle, incr int64, labels ...string)
+ // RecordFloat64Count records the measurement alongside labels on the float
+ // count associated with the provided handle.
+ RecordFloat64Count(handle *Float64CountHandle, incr float64, labels ...string)
+ // RecordInt64Histo records the measurement alongside labels on the int
+ // histo associated with the provided handle.
+ RecordInt64Histo(handle *Int64HistoHandle, incr int64, labels ...string)
+ // RecordFloat64Histo records the measurement alongside labels on the float
+ // histo associated with the provided handle.
+ RecordFloat64Histo(handle *Float64HistoHandle, incr float64, labels ...string)
+ // RecordInt64Gauge records the measurement alongside labels on the int
+ // gauge associated with the provided handle.
+ RecordInt64Gauge(handle *Int64GaugeHandle, incr int64, labels ...string)
+}
+
+// Metric is an identifier for a metric.
+type Metric string
+
+// Metrics is a set of metrics to record. Once created, Metrics is immutable,
+// however Add and Remove can make copies with specific metrics added or
+// removed, respectively.
+//
+// Do not construct directly; use NewMetrics instead.
+type Metrics struct {
+ // metrics are the set of metrics to initialize.
+ metrics map[Metric]bool
+}
+
+// NewMetrics returns a Metrics containing Metrics.
+func NewMetrics(metrics ...Metric) *Metrics {
+ newMetrics := make(map[Metric]bool)
+ for _, metric := range metrics {
+ newMetrics[metric] = true
+ }
+ return &Metrics{
+ metrics: newMetrics,
+ }
+}
+
+// Metrics returns the metrics set. The returned map is read-only and must not
+// be modified.
+func (m *Metrics) Metrics() map[Metric]bool {
+ return m.metrics
+}
+
+// Add adds the metrics to the metrics set and returns a new copy with the
+// additional metrics.
+func (m *Metrics) Add(metrics ...Metric) *Metrics {
+ newMetrics := make(map[Metric]bool)
+ for metric := range m.metrics {
+ newMetrics[metric] = true
+ }
+
+ for _, metric := range metrics {
+ newMetrics[metric] = true
+ }
+ return &Metrics{
+ metrics: newMetrics,
+ }
+}
+
+// Join joins the metrics passed in with the metrics set, and returns a new copy
+// with the merged metrics.
+func (m *Metrics) Join(metrics *Metrics) *Metrics {
+ newMetrics := make(map[Metric]bool)
+ maps.Copy(newMetrics, m.metrics)
+ maps.Copy(newMetrics, metrics.metrics)
+ return &Metrics{
+ metrics: newMetrics,
+ }
+}
+
+// Remove removes the metrics from the metrics set and returns a new copy with
+// the metrics removed.
+func (m *Metrics) Remove(metrics ...Metric) *Metrics {
+ newMetrics := make(map[Metric]bool)
+ for metric := range m.metrics {
+ newMetrics[metric] = true
+ }
+
+ for _, metric := range metrics {
+ delete(newMetrics, metric)
+ }
+ return &Metrics{
+ metrics: newMetrics,
+ }
+}
diff --git a/vendor/google.golang.org/grpc/grpclog/component.go b/vendor/google.golang.org/grpc/grpclog/component.go
index ac73c9ced2553..f1ae080dcb816 100644
--- a/vendor/google.golang.org/grpc/grpclog/component.go
+++ b/vendor/google.golang.org/grpc/grpclog/component.go
@@ -20,8 +20,6 @@ package grpclog
import (
"fmt"
-
- "google.golang.org/grpc/internal/grpclog"
)
// componentData records the settings for a component.
@@ -33,22 +31,22 @@ var cache = map[string]*componentData{}
func (c *componentData) InfoDepth(depth int, args ...any) {
args = append([]any{"[" + string(c.name) + "]"}, args...)
- grpclog.InfoDepth(depth+1, args...)
+ InfoDepth(depth+1, args...)
}
func (c *componentData) WarningDepth(depth int, args ...any) {
args = append([]any{"[" + string(c.name) + "]"}, args...)
- grpclog.WarningDepth(depth+1, args...)
+ WarningDepth(depth+1, args...)
}
func (c *componentData) ErrorDepth(depth int, args ...any) {
args = append([]any{"[" + string(c.name) + "]"}, args...)
- grpclog.ErrorDepth(depth+1, args...)
+ ErrorDepth(depth+1, args...)
}
func (c *componentData) FatalDepth(depth int, args ...any) {
args = append([]any{"[" + string(c.name) + "]"}, args...)
- grpclog.FatalDepth(depth+1, args...)
+ FatalDepth(depth+1, args...)
}
func (c *componentData) Info(args ...any) {
diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go
index 16928c9cb993c..db320105e64e2 100644
--- a/vendor/google.golang.org/grpc/grpclog/grpclog.go
+++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go
@@ -18,18 +18,15 @@
// Package grpclog defines logging for grpc.
//
-// All logs in transport and grpclb packages only go to verbose level 2.
-// All logs in other packages in grpc are logged in spite of the verbosity level.
-//
-// In the default logger,
-// severity level can be set by environment variable GRPC_GO_LOG_SEVERITY_LEVEL,
-// verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL.
-package grpclog // import "google.golang.org/grpc/grpclog"
+// In the default logger, severity level can be set by environment variable
+// GRPC_GO_LOG_SEVERITY_LEVEL, verbosity level can be set by
+// GRPC_GO_LOG_VERBOSITY_LEVEL.
+package grpclog
import (
"os"
- "google.golang.org/grpc/internal/grpclog"
+ "google.golang.org/grpc/grpclog/internal"
)
func init() {
@@ -38,58 +35,58 @@ func init() {
// V reports whether verbosity level l is at least the requested verbose level.
func V(l int) bool {
- return grpclog.Logger.V(l)
+ return internal.LoggerV2Impl.V(l)
}
// Info logs to the INFO log.
func Info(args ...any) {
- grpclog.Logger.Info(args...)
+ internal.LoggerV2Impl.Info(args...)
}
// Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf.
func Infof(format string, args ...any) {
- grpclog.Logger.Infof(format, args...)
+ internal.LoggerV2Impl.Infof(format, args...)
}
// Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println.
func Infoln(args ...any) {
- grpclog.Logger.Infoln(args...)
+ internal.LoggerV2Impl.Infoln(args...)
}
// Warning logs to the WARNING log.
func Warning(args ...any) {
- grpclog.Logger.Warning(args...)
+ internal.LoggerV2Impl.Warning(args...)
}
// Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf.
func Warningf(format string, args ...any) {
- grpclog.Logger.Warningf(format, args...)
+ internal.LoggerV2Impl.Warningf(format, args...)
}
// Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println.
func Warningln(args ...any) {
- grpclog.Logger.Warningln(args...)
+ internal.LoggerV2Impl.Warningln(args...)
}
// Error logs to the ERROR log.
func Error(args ...any) {
- grpclog.Logger.Error(args...)
+ internal.LoggerV2Impl.Error(args...)
}
// Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf.
func Errorf(format string, args ...any) {
- grpclog.Logger.Errorf(format, args...)
+ internal.LoggerV2Impl.Errorf(format, args...)
}
// Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println.
func Errorln(args ...any) {
- grpclog.Logger.Errorln(args...)
+ internal.LoggerV2Impl.Errorln(args...)
}
// Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print.
// It calls os.Exit() with exit code 1.
func Fatal(args ...any) {
- grpclog.Logger.Fatal(args...)
+ internal.LoggerV2Impl.Fatal(args...)
// Make sure fatal logs will exit.
os.Exit(1)
}
@@ -97,15 +94,15 @@ func Fatal(args ...any) {
// Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf.
// It calls os.Exit() with exit code 1.
func Fatalf(format string, args ...any) {
- grpclog.Logger.Fatalf(format, args...)
+ internal.LoggerV2Impl.Fatalf(format, args...)
// Make sure fatal logs will exit.
os.Exit(1)
}
// Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println.
-// It calle os.Exit()) with exit code 1.
+// It calls os.Exit() with exit code 1.
func Fatalln(args ...any) {
- grpclog.Logger.Fatalln(args...)
+ internal.LoggerV2Impl.Fatalln(args...)
// Make sure fatal logs will exit.
os.Exit(1)
}
@@ -114,19 +111,76 @@ func Fatalln(args ...any) {
//
// Deprecated: use Info.
func Print(args ...any) {
- grpclog.Logger.Info(args...)
+ internal.LoggerV2Impl.Info(args...)
}
// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf.
//
// Deprecated: use Infof.
func Printf(format string, args ...any) {
- grpclog.Logger.Infof(format, args...)
+ internal.LoggerV2Impl.Infof(format, args...)
}
// Println prints to the logger. Arguments are handled in the manner of fmt.Println.
//
// Deprecated: use Infoln.
func Println(args ...any) {
- grpclog.Logger.Infoln(args...)
+ internal.LoggerV2Impl.Infoln(args...)
+}
+
+// InfoDepth logs to the INFO log at the specified depth.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func InfoDepth(depth int, args ...any) {
+ if internal.DepthLoggerV2Impl != nil {
+ internal.DepthLoggerV2Impl.InfoDepth(depth, args...)
+ } else {
+ internal.LoggerV2Impl.Infoln(args...)
+ }
+}
+
+// WarningDepth logs to the WARNING log at the specified depth.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func WarningDepth(depth int, args ...any) {
+ if internal.DepthLoggerV2Impl != nil {
+ internal.DepthLoggerV2Impl.WarningDepth(depth, args...)
+ } else {
+ internal.LoggerV2Impl.Warningln(args...)
+ }
+}
+
+// ErrorDepth logs to the ERROR log at the specified depth.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func ErrorDepth(depth int, args ...any) {
+ if internal.DepthLoggerV2Impl != nil {
+ internal.DepthLoggerV2Impl.ErrorDepth(depth, args...)
+ } else {
+ internal.LoggerV2Impl.Errorln(args...)
+ }
+}
+
+// FatalDepth logs to the FATAL log at the specified depth.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func FatalDepth(depth int, args ...any) {
+ if internal.DepthLoggerV2Impl != nil {
+ internal.DepthLoggerV2Impl.FatalDepth(depth, args...)
+ } else {
+ internal.LoggerV2Impl.Fatalln(args...)
+ }
+ os.Exit(1)
}
diff --git a/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go b/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go
new file mode 100644
index 0000000000000..59c03bc14c2a9
--- /dev/null
+++ b/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go
@@ -0,0 +1,26 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package internal contains functionality internal to the grpclog package.
+package internal
+
+// LoggerV2Impl is the logger used for the non-depth log functions.
+var LoggerV2Impl LoggerV2
+
+// DepthLoggerV2Impl is the logger used for the depth log functions.
+var DepthLoggerV2Impl DepthLoggerV2
diff --git a/vendor/google.golang.org/grpc/grpclog/internal/logger.go b/vendor/google.golang.org/grpc/grpclog/internal/logger.go
new file mode 100644
index 0000000000000..e524fdd40b236
--- /dev/null
+++ b/vendor/google.golang.org/grpc/grpclog/internal/logger.go
@@ -0,0 +1,87 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package internal
+
+// Logger mimics golang's standard Logger as an interface.
+//
+// Deprecated: use LoggerV2.
+type Logger interface {
+ Fatal(args ...any)
+ Fatalf(format string, args ...any)
+ Fatalln(args ...any)
+ Print(args ...any)
+ Printf(format string, args ...any)
+ Println(args ...any)
+}
+
+// LoggerWrapper wraps Logger into a LoggerV2.
+type LoggerWrapper struct {
+ Logger
+}
+
+// Info logs to INFO log. Arguments are handled in the manner of fmt.Print.
+func (l *LoggerWrapper) Info(args ...any) {
+ l.Logger.Print(args...)
+}
+
+// Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println.
+func (l *LoggerWrapper) Infoln(args ...any) {
+ l.Logger.Println(args...)
+}
+
+// Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf.
+func (l *LoggerWrapper) Infof(format string, args ...any) {
+ l.Logger.Printf(format, args...)
+}
+
+// Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print.
+func (l *LoggerWrapper) Warning(args ...any) {
+ l.Logger.Print(args...)
+}
+
+// Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println.
+func (l *LoggerWrapper) Warningln(args ...any) {
+ l.Logger.Println(args...)
+}
+
+// Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf.
+func (l *LoggerWrapper) Warningf(format string, args ...any) {
+ l.Logger.Printf(format, args...)
+}
+
+// Error logs to ERROR log. Arguments are handled in the manner of fmt.Print.
+func (l *LoggerWrapper) Error(args ...any) {
+ l.Logger.Print(args...)
+}
+
+// Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
+func (l *LoggerWrapper) Errorln(args ...any) {
+ l.Logger.Println(args...)
+}
+
+// Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
+func (l *LoggerWrapper) Errorf(format string, args ...any) {
+ l.Logger.Printf(format, args...)
+}
+
+// V reports whether verbosity level l is at least the requested verbose level.
+func (*LoggerWrapper) V(int) bool {
+ // Returns true for all verbose level.
+ return true
+}
diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go
similarity index 52%
rename from vendor/google.golang.org/grpc/internal/grpclog/grpclog.go
rename to vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go
index bfc45102ab245..07df71e98a87a 100644
--- a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go
+++ b/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2020 gRPC authors.
+ * Copyright 2024 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -16,59 +16,17 @@
*
*/
-// Package grpclog (internal) defines depth logging for grpc.
-package grpclog
+package internal
import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "log"
"os"
)
-// Logger is the logger used for the non-depth log functions.
-var Logger LoggerV2
-
-// DepthLogger is the logger used for the depth log functions.
-var DepthLogger DepthLoggerV2
-
-// InfoDepth logs to the INFO log at the specified depth.
-func InfoDepth(depth int, args ...any) {
- if DepthLogger != nil {
- DepthLogger.InfoDepth(depth, args...)
- } else {
- Logger.Infoln(args...)
- }
-}
-
-// WarningDepth logs to the WARNING log at the specified depth.
-func WarningDepth(depth int, args ...any) {
- if DepthLogger != nil {
- DepthLogger.WarningDepth(depth, args...)
- } else {
- Logger.Warningln(args...)
- }
-}
-
-// ErrorDepth logs to the ERROR log at the specified depth.
-func ErrorDepth(depth int, args ...any) {
- if DepthLogger != nil {
- DepthLogger.ErrorDepth(depth, args...)
- } else {
- Logger.Errorln(args...)
- }
-}
-
-// FatalDepth logs to the FATAL log at the specified depth.
-func FatalDepth(depth int, args ...any) {
- if DepthLogger != nil {
- DepthLogger.FatalDepth(depth, args...)
- } else {
- Logger.Fatalln(args...)
- }
- os.Exit(1)
-}
-
// LoggerV2 does underlying logging work for grpclog.
-// This is a copy of the LoggerV2 defined in the external grpclog package. It
-// is defined here to avoid a circular dependency.
type LoggerV2 interface {
// Info logs to INFO log. Arguments are handled in the manner of fmt.Print.
Info(args ...any)
@@ -107,14 +65,13 @@ type LoggerV2 interface {
// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements
// DepthLoggerV2, the below functions will be called with the appropriate stack
// depth set for trivial functions the logger may ignore.
-// This is a copy of the DepthLoggerV2 defined in the external grpclog package.
-// It is defined here to avoid a circular dependency.
//
// # Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
type DepthLoggerV2 interface {
+ LoggerV2
// InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println.
InfoDepth(depth int, args ...any)
// WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println.
@@ -124,3 +81,124 @@ type DepthLoggerV2 interface {
// FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println.
FatalDepth(depth int, args ...any)
}
+
+const (
+ // infoLog indicates Info severity.
+ infoLog int = iota
+ // warningLog indicates Warning severity.
+ warningLog
+ // errorLog indicates Error severity.
+ errorLog
+ // fatalLog indicates Fatal severity.
+ fatalLog
+)
+
+// severityName contains the string representation of each severity.
+var severityName = []string{
+ infoLog: "INFO",
+ warningLog: "WARNING",
+ errorLog: "ERROR",
+ fatalLog: "FATAL",
+}
+
+// loggerT is the default logger used by grpclog.
+type loggerT struct {
+ m []*log.Logger
+ v int
+ jsonFormat bool
+}
+
+func (g *loggerT) output(severity int, s string) {
+ sevStr := severityName[severity]
+ if !g.jsonFormat {
+ g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s))
+ return
+ }
+ // TODO: we can also include the logging component, but that needs more
+ // (API) changes.
+ b, _ := json.Marshal(map[string]string{
+ "severity": sevStr,
+ "message": s,
+ })
+ g.m[severity].Output(2, string(b))
+}
+
+func (g *loggerT) Info(args ...any) {
+ g.output(infoLog, fmt.Sprint(args...))
+}
+
+func (g *loggerT) Infoln(args ...any) {
+ g.output(infoLog, fmt.Sprintln(args...))
+}
+
+func (g *loggerT) Infof(format string, args ...any) {
+ g.output(infoLog, fmt.Sprintf(format, args...))
+}
+
+func (g *loggerT) Warning(args ...any) {
+ g.output(warningLog, fmt.Sprint(args...))
+}
+
+func (g *loggerT) Warningln(args ...any) {
+ g.output(warningLog, fmt.Sprintln(args...))
+}
+
+func (g *loggerT) Warningf(format string, args ...any) {
+ g.output(warningLog, fmt.Sprintf(format, args...))
+}
+
+func (g *loggerT) Error(args ...any) {
+ g.output(errorLog, fmt.Sprint(args...))
+}
+
+func (g *loggerT) Errorln(args ...any) {
+ g.output(errorLog, fmt.Sprintln(args...))
+}
+
+func (g *loggerT) Errorf(format string, args ...any) {
+ g.output(errorLog, fmt.Sprintf(format, args...))
+}
+
+func (g *loggerT) Fatal(args ...any) {
+ g.output(fatalLog, fmt.Sprint(args...))
+ os.Exit(1)
+}
+
+func (g *loggerT) Fatalln(args ...any) {
+ g.output(fatalLog, fmt.Sprintln(args...))
+ os.Exit(1)
+}
+
+func (g *loggerT) Fatalf(format string, args ...any) {
+ g.output(fatalLog, fmt.Sprintf(format, args...))
+ os.Exit(1)
+}
+
+func (g *loggerT) V(l int) bool {
+ return l <= g.v
+}
+
+// LoggerV2Config configures the LoggerV2 implementation.
+type LoggerV2Config struct {
+ // Verbosity sets the verbosity level of the logger.
+ Verbosity int
+ // FormatJSON controls whether the logger should output logs in JSON format.
+ FormatJSON bool
+}
+
+// NewLoggerV2 creates a new LoggerV2 instance with the provided configuration.
+// The infoW, warningW, and errorW writers are used to write log messages of
+// different severity levels.
+func NewLoggerV2(infoW, warningW, errorW io.Writer, c LoggerV2Config) LoggerV2 {
+ var m []*log.Logger
+ flag := log.LstdFlags
+ if c.FormatJSON {
+ flag = 0
+ }
+ m = append(m, log.New(infoW, "", flag))
+ m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag))
+ ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal.
+ m = append(m, log.New(ew, "", flag))
+ m = append(m, log.New(ew, "", flag))
+ return &loggerT{m: m, v: c.Verbosity, jsonFormat: c.FormatJSON}
+}
diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go
index b1674d8267ca4..4b203585707af 100644
--- a/vendor/google.golang.org/grpc/grpclog/logger.go
+++ b/vendor/google.golang.org/grpc/grpclog/logger.go
@@ -18,70 +18,17 @@
package grpclog
-import "google.golang.org/grpc/internal/grpclog"
+import "google.golang.org/grpc/grpclog/internal"
// Logger mimics golang's standard Logger as an interface.
//
// Deprecated: use LoggerV2.
-type Logger interface {
- Fatal(args ...any)
- Fatalf(format string, args ...any)
- Fatalln(args ...any)
- Print(args ...any)
- Printf(format string, args ...any)
- Println(args ...any)
-}
+type Logger internal.Logger
// SetLogger sets the logger that is used in grpc. Call only from
// init() functions.
//
// Deprecated: use SetLoggerV2.
func SetLogger(l Logger) {
- grpclog.Logger = &loggerWrapper{Logger: l}
-}
-
-// loggerWrapper wraps Logger into a LoggerV2.
-type loggerWrapper struct {
- Logger
-}
-
-func (g *loggerWrapper) Info(args ...any) {
- g.Logger.Print(args...)
-}
-
-func (g *loggerWrapper) Infoln(args ...any) {
- g.Logger.Println(args...)
-}
-
-func (g *loggerWrapper) Infof(format string, args ...any) {
- g.Logger.Printf(format, args...)
-}
-
-func (g *loggerWrapper) Warning(args ...any) {
- g.Logger.Print(args...)
-}
-
-func (g *loggerWrapper) Warningln(args ...any) {
- g.Logger.Println(args...)
-}
-
-func (g *loggerWrapper) Warningf(format string, args ...any) {
- g.Logger.Printf(format, args...)
-}
-
-func (g *loggerWrapper) Error(args ...any) {
- g.Logger.Print(args...)
-}
-
-func (g *loggerWrapper) Errorln(args ...any) {
- g.Logger.Println(args...)
-}
-
-func (g *loggerWrapper) Errorf(format string, args ...any) {
- g.Logger.Printf(format, args...)
-}
-
-func (g *loggerWrapper) V(l int) bool {
- // Returns true for all verbose level.
- return true
+ internal.LoggerV2Impl = &internal.LoggerWrapper{Logger: l}
}
diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go
index ecfd36d713032..892dc13d164b9 100644
--- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go
+++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go
@@ -19,52 +19,16 @@
package grpclog
import (
- "encoding/json"
- "fmt"
"io"
- "log"
"os"
"strconv"
"strings"
- "google.golang.org/grpc/internal/grpclog"
+ "google.golang.org/grpc/grpclog/internal"
)
// LoggerV2 does underlying logging work for grpclog.
-type LoggerV2 interface {
- // Info logs to INFO log. Arguments are handled in the manner of fmt.Print.
- Info(args ...any)
- // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println.
- Infoln(args ...any)
- // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf.
- Infof(format string, args ...any)
- // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print.
- Warning(args ...any)
- // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println.
- Warningln(args ...any)
- // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf.
- Warningf(format string, args ...any)
- // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print.
- Error(args ...any)
- // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
- Errorln(args ...any)
- // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
- Errorf(format string, args ...any)
- // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print.
- // gRPC ensures that all Fatal logs will exit with os.Exit(1).
- // Implementations may also call os.Exit() with a non-zero exit code.
- Fatal(args ...any)
- // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
- // gRPC ensures that all Fatal logs will exit with os.Exit(1).
- // Implementations may also call os.Exit() with a non-zero exit code.
- Fatalln(args ...any)
- // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
- // gRPC ensures that all Fatal logs will exit with os.Exit(1).
- // Implementations may also call os.Exit() with a non-zero exit code.
- Fatalf(format string, args ...any)
- // V reports whether verbosity level l is at least the requested verbose level.
- V(l int) bool
-}
+type LoggerV2 internal.LoggerV2
// SetLoggerV2 sets logger that is used in grpc to a V2 logger.
// Not mutex-protected, should be called before any gRPC functions.
@@ -72,34 +36,8 @@ func SetLoggerV2(l LoggerV2) {
if _, ok := l.(*componentData); ok {
panic("cannot use component logger as grpclog logger")
}
- grpclog.Logger = l
- grpclog.DepthLogger, _ = l.(grpclog.DepthLoggerV2)
-}
-
-const (
- // infoLog indicates Info severity.
- infoLog int = iota
- // warningLog indicates Warning severity.
- warningLog
- // errorLog indicates Error severity.
- errorLog
- // fatalLog indicates Fatal severity.
- fatalLog
-)
-
-// severityName contains the string representation of each severity.
-var severityName = []string{
- infoLog: "INFO",
- warningLog: "WARNING",
- errorLog: "ERROR",
- fatalLog: "FATAL",
-}
-
-// loggerT is the default logger used by grpclog.
-type loggerT struct {
- m []*log.Logger
- v int
- jsonFormat bool
+ internal.LoggerV2Impl = l
+ internal.DepthLoggerV2Impl, _ = l.(internal.DepthLoggerV2)
}
// NewLoggerV2 creates a loggerV2 with the provided writers.
@@ -108,32 +46,13 @@ type loggerT struct {
// Warning logs will be written to warningW and infoW.
// Info logs will be written to infoW.
func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 {
- return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{})
+ return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{})
}
// NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and
// verbosity level.
func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 {
- return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{verbose: v})
-}
-
-type loggerV2Config struct {
- verbose int
- jsonFormat bool
-}
-
-func newLoggerV2WithConfig(infoW, warningW, errorW io.Writer, c loggerV2Config) LoggerV2 {
- var m []*log.Logger
- flag := log.LstdFlags
- if c.jsonFormat {
- flag = 0
- }
- m = append(m, log.New(infoW, "", flag))
- m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag))
- ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal.
- m = append(m, log.New(ew, "", flag))
- m = append(m, log.New(ew, "", flag))
- return &loggerT{m: m, v: c.verbose, jsonFormat: c.jsonFormat}
+ return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{Verbosity: v})
}
// newLoggerV2 creates a loggerV2 to be used as default logger.
@@ -161,80 +80,10 @@ func newLoggerV2() LoggerV2 {
jsonFormat := strings.EqualFold(os.Getenv("GRPC_GO_LOG_FORMATTER"), "json")
- return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{
- verbose: v,
- jsonFormat: jsonFormat,
- })
-}
-
-func (g *loggerT) output(severity int, s string) {
- sevStr := severityName[severity]
- if !g.jsonFormat {
- g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s))
- return
- }
- // TODO: we can also include the logging component, but that needs more
- // (API) changes.
- b, _ := json.Marshal(map[string]string{
- "severity": sevStr,
- "message": s,
+ return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{
+ Verbosity: v,
+ FormatJSON: jsonFormat,
})
- g.m[severity].Output(2, string(b))
-}
-
-func (g *loggerT) Info(args ...any) {
- g.output(infoLog, fmt.Sprint(args...))
-}
-
-func (g *loggerT) Infoln(args ...any) {
- g.output(infoLog, fmt.Sprintln(args...))
-}
-
-func (g *loggerT) Infof(format string, args ...any) {
- g.output(infoLog, fmt.Sprintf(format, args...))
-}
-
-func (g *loggerT) Warning(args ...any) {
- g.output(warningLog, fmt.Sprint(args...))
-}
-
-func (g *loggerT) Warningln(args ...any) {
- g.output(warningLog, fmt.Sprintln(args...))
-}
-
-func (g *loggerT) Warningf(format string, args ...any) {
- g.output(warningLog, fmt.Sprintf(format, args...))
-}
-
-func (g *loggerT) Error(args ...any) {
- g.output(errorLog, fmt.Sprint(args...))
-}
-
-func (g *loggerT) Errorln(args ...any) {
- g.output(errorLog, fmt.Sprintln(args...))
-}
-
-func (g *loggerT) Errorf(format string, args ...any) {
- g.output(errorLog, fmt.Sprintf(format, args...))
-}
-
-func (g *loggerT) Fatal(args ...any) {
- g.output(fatalLog, fmt.Sprint(args...))
- os.Exit(1)
-}
-
-func (g *loggerT) Fatalln(args ...any) {
- g.output(fatalLog, fmt.Sprintln(args...))
- os.Exit(1)
-}
-
-func (g *loggerT) Fatalf(format string, args ...any) {
- g.output(fatalLog, fmt.Sprintf(format, args...))
- os.Exit(1)
-}
-
-func (g *loggerT) V(l int) bool {
- return l <= g.v
}
// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements
@@ -245,14 +94,4 @@ func (g *loggerT) V(l int) bool {
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
-type DepthLoggerV2 interface {
- LoggerV2
- // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println.
- InfoDepth(depth int, args ...any)
- // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println.
- WarningDepth(depth int, args ...any)
- // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println.
- ErrorDepth(depth int, args ...any)
- // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println.
- FatalDepth(depth int, args ...any)
-}
+type DepthLoggerV2 internal.DepthLoggerV2
diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
index 38b8835073502..d92335445f650 100644
--- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
+++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
@@ -17,8 +17,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.34.1
-// protoc v4.25.2
+// protoc-gen-go v1.34.2
+// protoc v5.27.1
// source: grpc/health/v1/health.proto
package grpc_health_v1
@@ -237,7 +237,7 @@ func file_grpc_health_v1_health_proto_rawDescGZIP() []byte {
var file_grpc_health_v1_health_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_grpc_health_v1_health_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
-var file_grpc_health_v1_health_proto_goTypes = []interface{}{
+var file_grpc_health_v1_health_proto_goTypes = []any{
(HealthCheckResponse_ServingStatus)(0), // 0: grpc.health.v1.HealthCheckResponse.ServingStatus
(*HealthCheckRequest)(nil), // 1: grpc.health.v1.HealthCheckRequest
(*HealthCheckResponse)(nil), // 2: grpc.health.v1.HealthCheckResponse
@@ -261,7 +261,7 @@ func file_grpc_health_v1_health_proto_init() {
return
}
if !protoimpl.UnsafeEnabled {
- file_grpc_health_v1_health_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_health_v1_health_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*HealthCheckRequest); i {
case 0:
return &v.state
@@ -273,7 +273,7 @@ func file_grpc_health_v1_health_proto_init() {
return nil
}
}
- file_grpc_health_v1_health_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_health_v1_health_proto_msgTypes[1].Exporter = func(v any, i int) any {
switch v := v.(*HealthCheckResponse); i {
case 0:
return &v.state
diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go
index 51b736ba06e5f..f96b8ab4927e1 100644
--- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go
+++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go
@@ -17,8 +17,8 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
-// - protoc-gen-go-grpc v1.4.0
-// - protoc v4.25.2
+// - protoc-gen-go-grpc v1.5.1
+// - protoc v5.27.1
// source: grpc/health/v1/health.proto
package grpc_health_v1
@@ -32,8 +32,8 @@ import (
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
-// Requires gRPC-Go v1.62.0 or later.
-const _ = grpc.SupportPackageIsVersion8
+// Requires gRPC-Go v1.64.0 or later.
+const _ = grpc.SupportPackageIsVersion9
const (
Health_Check_FullMethodName = "/grpc.health.v1.Health/Check"
@@ -73,7 +73,7 @@ type HealthClient interface {
// should assume this method is not supported and should not retry the
// call. If the call terminates with any other status (including OK),
// clients should retry the call with appropriate exponential backoff.
- Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error)
+ Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[HealthCheckResponse], error)
}
type healthClient struct {
@@ -94,13 +94,13 @@ func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts .
return out, nil
}
-func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) {
+func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[HealthCheckResponse], error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], Health_Watch_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
- x := &healthWatchClient{ClientStream: stream}
+ x := &grpc.GenericClientStream[HealthCheckRequest, HealthCheckResponse]{ClientStream: stream}
if err := x.ClientStream.SendMsg(in); err != nil {
return nil, err
}
@@ -110,26 +110,12 @@ func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts .
return x, nil
}
-type Health_WatchClient interface {
- Recv() (*HealthCheckResponse, error)
- grpc.ClientStream
-}
-
-type healthWatchClient struct {
- grpc.ClientStream
-}
-
-func (x *healthWatchClient) Recv() (*HealthCheckResponse, error) {
- m := new(HealthCheckResponse)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
+// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
+type Health_WatchClient = grpc.ServerStreamingClient[HealthCheckResponse]
// HealthServer is the server API for Health service.
// All implementations should embed UnimplementedHealthServer
-// for forward compatibility
+// for forward compatibility.
//
// Health is gRPC's mechanism for checking whether a server is able to handle
// RPCs. Its semantics are documented in
@@ -160,19 +146,23 @@ type HealthServer interface {
// should assume this method is not supported and should not retry the
// call. If the call terminates with any other status (including OK),
// clients should retry the call with appropriate exponential backoff.
- Watch(*HealthCheckRequest, Health_WatchServer) error
+ Watch(*HealthCheckRequest, grpc.ServerStreamingServer[HealthCheckResponse]) error
}
-// UnimplementedHealthServer should be embedded to have forward compatible implementations.
-type UnimplementedHealthServer struct {
-}
+// UnimplementedHealthServer should be embedded to have
+// forward compatible implementations.
+//
+// NOTE: this should be embedded by value instead of pointer to avoid a nil
+// pointer dereference when methods are called.
+type UnimplementedHealthServer struct{}
func (UnimplementedHealthServer) Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Check not implemented")
}
-func (UnimplementedHealthServer) Watch(*HealthCheckRequest, Health_WatchServer) error {
+func (UnimplementedHealthServer) Watch(*HealthCheckRequest, grpc.ServerStreamingServer[HealthCheckResponse]) error {
return status.Errorf(codes.Unimplemented, "method Watch not implemented")
}
+func (UnimplementedHealthServer) testEmbeddedByValue() {}
// UnsafeHealthServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to HealthServer will
@@ -182,6 +172,13 @@ type UnsafeHealthServer interface {
}
func RegisterHealthServer(s grpc.ServiceRegistrar, srv HealthServer) {
+ // If the following call panics, it indicates UnimplementedHealthServer was
+ // embedded by pointer and is nil. This will cause panics if an
+ // unimplemented method is ever invoked, so we test this at initialization
+ // time to prevent it from happening at runtime later due to I/O.
+ if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
+ t.testEmbeddedByValue()
+ }
s.RegisterService(&Health_ServiceDesc, srv)
}
@@ -208,21 +205,11 @@ func _Health_Watch_Handler(srv interface{}, stream grpc.ServerStream) error {
if err := stream.RecvMsg(m); err != nil {
return err
}
- return srv.(HealthServer).Watch(m, &healthWatchServer{ServerStream: stream})
-}
-
-type Health_WatchServer interface {
- Send(*HealthCheckResponse) error
- grpc.ServerStream
+ return srv.(HealthServer).Watch(m, &grpc.GenericServerStream[HealthCheckRequest, HealthCheckResponse]{ServerStream: stream})
}
-type healthWatchServer struct {
- grpc.ServerStream
-}
-
-func (x *healthWatchServer) Send(m *HealthCheckResponse) error {
- return x.ServerStream.SendMsg(m)
-}
+// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
+type Health_WatchServer = grpc.ServerStreamingServer[HealthCheckResponse]
// Health_ServiceDesc is the grpc.ServiceDesc for Health service.
// It's only intended for direct use with grpc.RegisterService,
diff --git a/vendor/google.golang.org/grpc/health/server.go b/vendor/google.golang.org/grpc/health/server.go
index cce6312d77f9c..d4b4b7081590e 100644
--- a/vendor/google.golang.org/grpc/health/server.go
+++ b/vendor/google.golang.org/grpc/health/server.go
@@ -51,7 +51,7 @@ func NewServer() *Server {
}
// Check implements `service Health`.
-func (s *Server) Check(ctx context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) {
+func (s *Server) Check(_ context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) {
s.mu.RLock()
defer s.mu.RUnlock()
if servingStatus, ok := s.statusMap[in.Service]; ok {
diff --git a/vendor/google.golang.org/grpc/internal/balancergroup/balancergroup.go b/vendor/google.golang.org/grpc/internal/balancergroup/balancergroup.go
index 5496b99dd5c45..31c9cdc9d026b 100644
--- a/vendor/google.golang.org/grpc/internal/balancergroup/balancergroup.go
+++ b/vendor/google.golang.org/grpc/internal/balancergroup/balancergroup.go
@@ -99,7 +99,7 @@ func (sbc *subBalancerWrapper) startBalancer() {
if sbc.balancer == nil {
sbc.balancer = gracefulswitch.NewBalancer(sbc, sbc.buildOpts)
}
- sbc.group.logger.Infof("Creating child policy of type %q for locality %q", sbc.builder.Name(), sbc.id)
+ sbc.group.logger.Infof("Creating child policy of type %q for child %q", sbc.builder.Name(), sbc.id)
sbc.balancer.SwitchTo(sbc.builder)
if sbc.ccState != nil {
sbc.balancer.UpdateClientConnState(*sbc.ccState)
@@ -121,14 +121,11 @@ func (sbc *subBalancerWrapper) updateClientConnState(s balancer.ClientConnState)
sbc.ccState = &s
b := sbc.balancer
if b == nil {
- // This sub-balancer was closed. This should never happen because
- // sub-balancers are closed when the locality is removed from EDS, or
- // the balancer group is closed. There should be no further address
- // updates when either of this happened.
- //
- // This will be a common case with priority support, because a
- // sub-balancer (and the whole balancer group) could be closed because
- // it's the lower priority, but it can still get address updates.
+ // A sub-balancer is closed when it is removed from the group or the
+ // group is closed as a whole, and is not expected to receive updates
+ // after that. But when used with the priority LB policy a sub-balancer
+ // (and the whole balancer group) could be closed because it's the lower
+ // priority, but it can still get address updates.
return nil
}
return b.UpdateClientConnState(s)
@@ -137,14 +134,11 @@ func (sbc *subBalancerWrapper) updateClientConnState(s balancer.ClientConnState)
func (sbc *subBalancerWrapper) resolverError(err error) {
b := sbc.balancer
if b == nil {
- // This sub-balancer was closed. This should never happen because
- // sub-balancers are closed when the locality is removed from EDS, or
- // the balancer group is closed. There should be no further address
- // updates when either of this happened.
- //
- // This will be a common case with priority support, because a
- // sub-balancer (and the whole balancer group) could be closed because
- // it's the lower priority, but it can still get address updates.
+ // A sub-balancer is closed when it is removed from the group or the
+ // group is closed as a whole, and is not expected to receive updates
+ // after that. But when used with the priority LB policy a sub-balancer
+ // (and the whole balancer group) could be closed because it's the lower
+ // priority, but it can still get address updates.
return
}
b.ResolverError(err)
@@ -210,7 +204,7 @@ type BalancerGroup struct {
// after it's closed.
//
// We don't share the mutex to avoid deadlocks (e.g. a call to sub-balancer
- // may call back to balancer group inline. It causes deaclock if they
+ // may call back to balancer group inline. It causes deadlock if they
// require the same mutex).
//
// We should never need to hold multiple locks at the same time in this
@@ -224,7 +218,7 @@ type BalancerGroup struct {
// guards the map from SubConn to balancer ID, so updateSubConnState needs
// to hold it shortly to potentially delete from the map.
//
- // UpdateState is called by the balancer state aggretator, and it will
+ // UpdateState is called by the balancer state aggregator, and it will
// decide when and whether to call.
//
// The corresponding boolean incomingStarted is used to stop further updates
@@ -298,11 +292,11 @@ func (bg *BalancerGroup) Start() {
// AddWithClientConn adds a balancer with the given id to the group. The
// balancer is built with a balancer builder registered with balancerName. The
// given ClientConn is passed to the newly built balancer instead of the
-// onepassed to balancergroup.New().
+// one passed to balancergroup.New().
//
// TODO: Get rid of the existing Add() API and replace it with this.
func (bg *BalancerGroup) AddWithClientConn(id, balancerName string, cc balancer.ClientConn) error {
- bg.logger.Infof("Adding child policy of type %q for locality %q", balancerName, id)
+ bg.logger.Infof("Adding child policy of type %q for child %q", balancerName, id)
builder := balancer.Get(balancerName)
if builder == nil {
return fmt.Errorf("unregistered balancer name %q", balancerName)
@@ -318,7 +312,7 @@ func (bg *BalancerGroup) AddWithClientConn(id, balancerName string, cc balancer.
if bg.outgoingStarted && bg.deletedBalancerCache != nil {
if old, ok := bg.deletedBalancerCache.Remove(id); ok {
if bg.logger.V(2) {
- bg.logger.Infof("Removing and reusing child policy of type %q for locality %q from the balancer cache", balancerName, id)
+ bg.logger.Infof("Removing and reusing child policy of type %q for child %q from the balancer cache", balancerName, id)
bg.logger.Infof("Number of items remaining in the balancer cache: %d", bg.deletedBalancerCache.Len())
}
@@ -372,13 +366,13 @@ func (bg *BalancerGroup) Add(id string, builder balancer.Builder) {
// closed after timeout. Cleanup work (closing sub-balancer and removing
// subconns) will be done after timeout.
func (bg *BalancerGroup) Remove(id string) {
- bg.logger.Infof("Removing child policy for locality %q", id)
+ bg.logger.Infof("Removing child policy for child %q", id)
bg.outgoingMu.Lock()
sbToRemove, ok := bg.idToBalancerConfig[id]
if !ok {
- bg.logger.Errorf("Child policy for locality %q does not exist in the balancer group", id)
+ bg.logger.Errorf("Child policy for child %q does not exist in the balancer group", id)
bg.outgoingMu.Unlock()
return
}
@@ -394,13 +388,13 @@ func (bg *BalancerGroup) Remove(id string) {
if bg.deletedBalancerCache != nil {
if bg.logger.V(2) {
- bg.logger.Infof("Adding child policy for locality %q to the balancer cache", id)
+ bg.logger.Infof("Adding child policy for child %q to the balancer cache", id)
bg.logger.Infof("Number of items remaining in the balancer cache: %d", bg.deletedBalancerCache.Len())
}
bg.deletedBalancerCache.Add(id, sbToRemove, func() {
if bg.logger.V(2) {
- bg.logger.Infof("Removing child policy for locality %q from the balancer cache after timeout", id)
+ bg.logger.Infof("Removing child policy for child %q from the balancer cache after timeout", id)
bg.logger.Infof("Number of items remaining in the balancer cache: %d", bg.deletedBalancerCache.Len())
}
@@ -541,7 +535,7 @@ func (bg *BalancerGroup) newSubConn(config *subBalancerWrapper, addrs []resolver
// aggregator will create an aggregated picker and an aggregated connectivity
// state, then forward to ClientConn.
func (bg *BalancerGroup) updateBalancerState(id string, state balancer.State) {
- bg.logger.Infof("Balancer state update from locality %v, new state: %+v", id, state)
+ bg.logger.Infof("Balancer state update from child %v, new state: %+v", id, state)
// Send new state to the aggregator, without holding the incomingMu.
// incomingMu is to protect all calls to the parent ClientConn, this update
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
index aa4505a871dfb..9669328914ad4 100644
--- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
+++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
@@ -106,7 +106,7 @@ func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry
}
// Log creates a proto binary log entry, and logs it to the sink.
-func (ml *TruncatingMethodLogger) Log(ctx context.Context, c LogEntryConfig) {
+func (ml *TruncatingMethodLogger) Log(_ context.Context, c LogEntryConfig) {
ml.sink.Write(ml.Build(c))
}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/channelmap.go b/vendor/google.golang.org/grpc/internal/channelz/channelmap.go
index dfe18b08925d9..64c791953d017 100644
--- a/vendor/google.golang.org/grpc/internal/channelz/channelmap.go
+++ b/vendor/google.golang.org/grpc/internal/channelz/channelmap.go
@@ -46,7 +46,7 @@ type entry interface {
// channelMap is the storage data structure for channelz.
//
-// Methods of channelMap can be divided in two two categories with respect to
+// Methods of channelMap can be divided into two categories with respect to
// locking.
//
// 1. Methods acquire the global lock.
@@ -234,13 +234,6 @@ func copyMap(m map[int64]string) map[int64]string {
return n
}
-func min(a, b int) int {
- if a < b {
- return a
- }
- return b
-}
-
func (c *channelMap) getTopChannels(id int64, maxResults int) ([]*Channel, bool) {
if maxResults <= 0 {
maxResults = EntriesPerPage
diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go
index 03e24e1507aa6..078bb81238bc4 100644
--- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go
+++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go
@@ -33,7 +33,7 @@ var (
// outside this package except by tests.
IDGen IDGenerator
- db *channelMap = newChannelMap()
+ db = newChannelMap()
// EntriesPerPage defines the number of channelz entries to be shown on a web page.
EntriesPerPage = 50
curState int32
diff --git a/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go
index d1ed8df6a5186..0e6e18e185c7a 100644
--- a/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go
+++ b/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go
@@ -35,13 +35,13 @@ type SocketOptionData struct {
// Getsockopt defines the function to get socket options requested by channelz.
// It is to be passed to syscall.RawConn.Control().
// Windows OS doesn't support Socket Option
-func (s *SocketOptionData) Getsockopt(fd uintptr) {
+func (s *SocketOptionData) Getsockopt(uintptr) {
once.Do(func() {
logger.Warning("Channelz: socket options are not supported on non-linux environments")
})
}
// GetSocketOption gets the socket option info of the conn.
-func GetSocketOption(c any) *SocketOptionData {
+func GetSocketOption(any) *SocketOptionData {
return nil
}
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
index d906487139445..452985f8d8f1b 100644
--- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
+++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
@@ -45,7 +45,11 @@ var (
// option is present for backward compatibility. This option may be overridden
// by setting the environment variable "GRPC_ENFORCE_ALPN_ENABLED" to "true"
// or "false".
- EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", false)
+ EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", true)
+ // XDSFallbackSupport is the env variable that controls whether support for
+ // xDS fallback is turned on. If this is unset or is false, only the first
+ // xDS server in the list of server configs will be used.
+ XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", false)
)
func boolFromEnv(envVar string, def bool) bool {
diff --git a/vendor/google.golang.org/grpc/internal/experimental.go b/vendor/google.golang.org/grpc/internal/experimental.go
index 7f7044e1731c8..7617be2158957 100644
--- a/vendor/google.golang.org/grpc/internal/experimental.go
+++ b/vendor/google.golang.org/grpc/internal/experimental.go
@@ -18,11 +18,11 @@
package internal
var (
- // WithRecvBufferPool is implemented by the grpc package and returns a dial
+ // WithBufferPool is implemented by the grpc package and returns a dial
// option to configure a shared buffer pool for a grpc.ClientConn.
- WithRecvBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption
+ WithBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption
- // RecvBufferPool is implemented by the grpc package and returns a server
+ // BufferPool is implemented by the grpc package and returns a server
// option to configure a shared buffer pool for a grpc.Server.
- RecvBufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption
+ BufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption
)
diff --git a/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go b/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go
index 6717b757f80dc..43423d8ad9ab1 100644
--- a/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go
+++ b/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go
@@ -62,9 +62,9 @@ func isRunningOnGCE(manufacturer []byte, goos string) bool {
name = strings.TrimSpace(name)
return name == "Google" || name == "Google Compute Engine"
case "windows":
- name = strings.Replace(name, " ", "", -1)
- name = strings.Replace(name, "\n", "", -1)
- name = strings.Replace(name, "\r", "", -1)
+ name = strings.ReplaceAll(name, " ", "")
+ name = strings.ReplaceAll(name, "\n", "")
+ name = strings.ReplaceAll(name, "\r", "")
return name == "Google"
default:
return false
diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go
similarity index 63%
rename from vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go
rename to vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go
index faa998de7632b..092ad187a2c8d 100644
--- a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go
+++ b/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go
@@ -16,17 +16,21 @@
*
*/
+// Package grpclog provides logging functionality for internal gRPC packages,
+// outside of the functionality provided by the external `grpclog` package.
package grpclog
import (
"fmt"
+
+ "google.golang.org/grpc/grpclog"
)
// PrefixLogger does logging with a prefix.
//
// Logging method on a nil logs without any prefix.
type PrefixLogger struct {
- logger DepthLoggerV2
+ logger grpclog.DepthLoggerV2
prefix string
}
@@ -38,7 +42,7 @@ func (pl *PrefixLogger) Infof(format string, args ...any) {
pl.logger.InfoDepth(1, fmt.Sprintf(format, args...))
return
}
- InfoDepth(1, fmt.Sprintf(format, args...))
+ grpclog.InfoDepth(1, fmt.Sprintf(format, args...))
}
// Warningf does warning logging.
@@ -48,7 +52,7 @@ func (pl *PrefixLogger) Warningf(format string, args ...any) {
pl.logger.WarningDepth(1, fmt.Sprintf(format, args...))
return
}
- WarningDepth(1, fmt.Sprintf(format, args...))
+ grpclog.WarningDepth(1, fmt.Sprintf(format, args...))
}
// Errorf does error logging.
@@ -58,36 +62,18 @@ func (pl *PrefixLogger) Errorf(format string, args ...any) {
pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...))
return
}
- ErrorDepth(1, fmt.Sprintf(format, args...))
-}
-
-// Debugf does info logging at verbose level 2.
-func (pl *PrefixLogger) Debugf(format string, args ...any) {
- // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe
- // rewrite PrefixLogger a little to ensure that we don't use the global
- // `Logger` here, and instead use the `logger` field.
- if !Logger.V(2) {
- return
- }
- if pl != nil {
- // Handle nil, so the tests can pass in a nil logger.
- format = pl.prefix + format
- pl.logger.InfoDepth(1, fmt.Sprintf(format, args...))
- return
- }
- InfoDepth(1, fmt.Sprintf(format, args...))
-
+ grpclog.ErrorDepth(1, fmt.Sprintf(format, args...))
}
// V reports whether verbosity level l is at least the requested verbose level.
func (pl *PrefixLogger) V(l int) bool {
- // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe
- // rewrite PrefixLogger a little to ensure that we don't use the global
- // `Logger` here, and instead use the `logger` field.
- return Logger.V(l)
+ if pl != nil {
+ return pl.logger.V(l)
+ }
+ return true
}
// NewPrefixLogger creates a prefix logger with the given prefix.
-func NewPrefixLogger(logger DepthLoggerV2, prefix string) *PrefixLogger {
+func NewPrefixLogger(logger grpclog.DepthLoggerV2, prefix string) *PrefixLogger {
return &PrefixLogger{logger: logger, prefix: prefix}
}
diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
index f7f40a16acee5..19b9d639275a8 100644
--- a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
+++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
@@ -53,16 +53,28 @@ func NewCallbackSerializer(ctx context.Context) *CallbackSerializer {
return cs
}
-// Schedule adds a callback to be scheduled after existing callbacks are run.
+// TrySchedule tries to schedules the provided callback function f to be
+// executed in the order it was added. This is a best-effort operation. If the
+// context passed to NewCallbackSerializer was canceled before this method is
+// called, the callback will not be scheduled.
//
// Callbacks are expected to honor the context when performing any blocking
// operations, and should return early when the context is canceled.
+func (cs *CallbackSerializer) TrySchedule(f func(ctx context.Context)) {
+ cs.callbacks.Put(f)
+}
+
+// ScheduleOr schedules the provided callback function f to be executed in the
+// order it was added. If the context passed to NewCallbackSerializer has been
+// canceled before this method is called, the onFailure callback will be
+// executed inline instead.
//
-// Return value indicates if the callback was successfully added to the list of
-// callbacks to be executed by the serializer. It is not possible to add
-// callbacks once the context passed to NewCallbackSerializer is cancelled.
-func (cs *CallbackSerializer) Schedule(f func(ctx context.Context)) bool {
- return cs.callbacks.Put(f) == nil
+// Callbacks are expected to honor the context when performing any blocking
+// operations, and should return early when the context is canceled.
+func (cs *CallbackSerializer) ScheduleOr(f func(ctx context.Context), onFailure func()) {
+ if cs.callbacks.Put(f) != nil {
+ onFailure()
+ }
}
func (cs *CallbackSerializer) run(ctx context.Context) {
diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go
index aef8cec1ab0cd..6d8c2f518dff7 100644
--- a/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go
+++ b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go
@@ -77,7 +77,7 @@ func (ps *PubSub) Subscribe(sub Subscriber) (cancel func()) {
if ps.msg != nil {
msg := ps.msg
- ps.cs.Schedule(func(context.Context) {
+ ps.cs.TrySchedule(func(context.Context) {
ps.mu.Lock()
defer ps.mu.Unlock()
if !ps.subscribers[sub] {
@@ -103,7 +103,7 @@ func (ps *PubSub) Publish(msg any) {
ps.msg = msg
for sub := range ps.subscribers {
s := sub
- ps.cs.Schedule(func(context.Context) {
+ ps.cs.TrySchedule(func(context.Context) {
ps.mu.Lock()
defer ps.mu.Unlock()
if !ps.subscribers[s] {
diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go
index 5d66539869232..7aae9240ffc07 100644
--- a/vendor/google.golang.org/grpc/internal/internal.go
+++ b/vendor/google.golang.org/grpc/internal/internal.go
@@ -183,7 +183,7 @@ var (
// GRPCResolverSchemeExtraMetadata determines when gRPC will add extra
// metadata to RPCs.
- GRPCResolverSchemeExtraMetadata string = "xds"
+ GRPCResolverSchemeExtraMetadata = "xds"
// EnterIdleModeForTesting gets the ClientConn to enter IDLE mode.
EnterIdleModeForTesting any // func(*grpc.ClientConn)
@@ -203,11 +203,31 @@ var (
// UserSetDefaultScheme is set to true if the user has overridden the
// default resolver scheme.
- UserSetDefaultScheme bool = false
+ UserSetDefaultScheme = false
// ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n
// is the number of elements. swap swaps the elements with indexes i and j.
ShuffleAddressListForTesting any // func(n int, swap func(i, j int))
+
+ // ConnectedAddress returns the connected address for a SubConnState. The
+ // address is only valid if the state is READY.
+ ConnectedAddress any // func (scs SubConnState) resolver.Address
+
+ // SetConnectedAddress sets the connected address for a SubConnState.
+ SetConnectedAddress any // func(scs *SubConnState, addr resolver.Address)
+
+ // SnapshotMetricRegistryForTesting snapshots the global data of the metric
+ // registry. Returns a cleanup function that sets the metric registry to its
+ // original state. Only called in testing functions.
+ SnapshotMetricRegistryForTesting func() func()
+
+ // SetDefaultBufferPoolForTesting updates the default buffer pool, for
+ // testing purposes.
+ SetDefaultBufferPoolForTesting any // func(mem.BufferPool)
+
+ // SetBufferPoolingThresholdForTesting updates the buffer pooling threshold, for
+ // testing purposes.
+ SetBufferPoolingThresholdForTesting any // func(int)
)
// HealthChecker defines the signature of the client-side LB channel health
diff --git a/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go
index 3244718625cbc..703091047b4b8 100644
--- a/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go
+++ b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go
@@ -14,8 +14,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.34.1
-// protoc v4.25.2
+// protoc-gen-go v1.34.2
+// protoc v5.27.1
// source: grpc/lookup/v1/rls.proto
package grpc_lookup_v1
@@ -313,7 +313,7 @@ func file_grpc_lookup_v1_rls_proto_rawDescGZIP() []byte {
var file_grpc_lookup_v1_rls_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_grpc_lookup_v1_rls_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
-var file_grpc_lookup_v1_rls_proto_goTypes = []interface{}{
+var file_grpc_lookup_v1_rls_proto_goTypes = []any{
(RouteLookupRequest_Reason)(0), // 0: grpc.lookup.v1.RouteLookupRequest.Reason
(*RouteLookupRequest)(nil), // 1: grpc.lookup.v1.RouteLookupRequest
(*RouteLookupResponse)(nil), // 2: grpc.lookup.v1.RouteLookupResponse
@@ -340,7 +340,7 @@ func file_grpc_lookup_v1_rls_proto_init() {
return
}
if !protoimpl.UnsafeEnabled {
- file_grpc_lookup_v1_rls_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_lookup_v1_rls_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*RouteLookupRequest); i {
case 0:
return &v.state
@@ -352,7 +352,7 @@ func file_grpc_lookup_v1_rls_proto_init() {
return nil
}
}
- file_grpc_lookup_v1_rls_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_lookup_v1_rls_proto_msgTypes[1].Exporter = func(v any, i int) any {
switch v := v.(*RouteLookupResponse); i {
case 0:
return &v.state
diff --git a/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_config.pb.go b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_config.pb.go
index c42cb8cba0c2c..a0be3c8cb268e 100644
--- a/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_config.pb.go
+++ b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_config.pb.go
@@ -14,8 +14,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.34.1
-// protoc v4.25.2
+// protoc-gen-go v1.34.2
+// protoc v5.27.1
// source: grpc/lookup/v1/rls_config.proto
package grpc_lookup_v1
@@ -271,6 +271,8 @@ type HttpKeyBuilder struct {
// for example if you are suppressing a lot of information from the URL, but
// need to separately cache and request URLs with that content.
ConstantKeys map[string]string `protobuf:"bytes,5,rep,name=constant_keys,json=constantKeys,proto3" json:"constant_keys,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ // If specified, the HTTP method/verb will be extracted under this key name.
+ Method string `protobuf:"bytes,6,opt,name=method,proto3" json:"method,omitempty"`
}
func (x *HttpKeyBuilder) Reset() {
@@ -340,6 +342,13 @@ func (x *HttpKeyBuilder) GetConstantKeys() map[string]string {
return nil
}
+func (x *HttpKeyBuilder) GetMethod() string {
+ if x != nil {
+ return x.Method
+ }
+ return ""
+}
+
type RouteLookupConfig struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -707,7 +716,7 @@ var file_grpc_lookup_v1_rls_config_proto_rawDesc = []byte{
0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20,
0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02,
- 0x38, 0x01, 0x22, 0xf1, 0x02, 0x0a, 0x0e, 0x48, 0x74, 0x74, 0x70, 0x4b, 0x65, 0x79, 0x42, 0x75,
+ 0x38, 0x01, 0x22, 0x89, 0x03, 0x0a, 0x0e, 0x48, 0x74, 0x74, 0x70, 0x4b, 0x65, 0x79, 0x42, 0x75,
0x69, 0x6c, 0x64, 0x65, 0x72, 0x12, 0x23, 0x0a, 0x0d, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x70, 0x61,
0x74, 0x74, 0x65, 0x72, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x68, 0x6f,
0x73, 0x74, 0x50, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x61,
@@ -726,58 +735,60 @@ var file_grpc_lookup_v1_rls_config_proto_rawDesc = []byte{
0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x4b, 0x65, 0x79, 0x42, 0x75,
0x69, 0x6c, 0x64, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x4b, 0x65,
0x79, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e,
- 0x74, 0x4b, 0x65, 0x79, 0x73, 0x1a, 0x3f, 0x0a, 0x11, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e,
- 0x74, 0x4b, 0x65, 0x79, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65,
- 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05,
- 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c,
- 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xa6, 0x04, 0x0a, 0x11, 0x52, 0x6f, 0x75, 0x74, 0x65,
- 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x49, 0x0a, 0x10,
- 0x68, 0x74, 0x74, 0x70, 0x5f, 0x6b, 0x65, 0x79, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x73,
- 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f,
- 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x4b, 0x65, 0x79, 0x42,
- 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x52, 0x0f, 0x68, 0x74, 0x74, 0x70, 0x4b, 0x65, 0x79, 0x62,
- 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x12, 0x49, 0x0a, 0x10, 0x67, 0x72, 0x70, 0x63, 0x5f,
- 0x6b, 0x65, 0x79, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28,
- 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e,
- 0x76, 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65,
- 0x72, 0x52, 0x0f, 0x67, 0x72, 0x70, 0x63, 0x4b, 0x65, 0x79, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x65,
- 0x72, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x5f, 0x73, 0x65, 0x72,
- 0x76, 0x69, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6c, 0x6f, 0x6f, 0x6b,
- 0x75, 0x70, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x4f, 0x0a, 0x16, 0x6c, 0x6f, 0x6f,
- 0x6b, 0x75, 0x70, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65,
- 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x14, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x53, 0x65, 0x72, 0x76,
- 0x69, 0x63, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x6d, 0x61,
- 0x78, 0x5f, 0x61, 0x67, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75,
- 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x6d, 0x61, 0x78, 0x41, 0x67, 0x65, 0x12, 0x36,
- 0x0a, 0x09, 0x73, 0x74, 0x61, 0x6c, 0x65, 0x5f, 0x61, 0x67, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28,
+ 0x74, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18,
+ 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x1a, 0x3f, 0x0a,
+ 0x11, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x45, 0x6e, 0x74,
+ 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xa6,
+ 0x04, 0x0a, 0x11, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x43, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x12, 0x49, 0x0a, 0x10, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x6b, 0x65, 0x79,
+ 0x62, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e,
+ 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e,
+ 0x48, 0x74, 0x74, 0x70, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x52, 0x0f,
+ 0x68, 0x74, 0x74, 0x70, 0x4b, 0x65, 0x79, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x12,
+ 0x49, 0x0a, 0x10, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x62, 0x75, 0x69, 0x6c, 0x64,
+ 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63,
+ 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4b,
+ 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x52, 0x0f, 0x67, 0x72, 0x70, 0x63, 0x4b,
+ 0x65, 0x79, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6c, 0x6f,
+ 0x6f, 0x6b, 0x75, 0x70, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x0d, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x12, 0x4f, 0x0a, 0x16, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x5f, 0x73, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x73, 0x74,
- 0x61, 0x6c, 0x65, 0x41, 0x67, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f,
- 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03,
- 0x52, 0x0e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73,
- 0x12, 0x23, 0x0a, 0x0d, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74,
- 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x54, 0x61,
- 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74,
- 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x64,
- 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4a, 0x04, 0x08, 0x0a,
- 0x10, 0x0b, 0x52, 0x1b, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x72, 0x6f, 0x63,
- 0x65, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x22,
- 0x70, 0x0a, 0x1b, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x43, 0x6c,
- 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x51,
- 0x0a, 0x13, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x5f, 0x63,
- 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72,
- 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75,
- 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x11,
- 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69,
- 0x67, 0x42, 0x53, 0x0a, 0x11, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f,
- 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x52, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69,
- 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63,
- 0x2f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x6c, 0x6f, 0x6f,
- 0x6b, 0x75, 0x70, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x14, 0x6c, 0x6f,
+ 0x6f, 0x6b, 0x75, 0x70, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f,
+ 0x75, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x5f, 0x61, 0x67, 0x65, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06,
+ 0x6d, 0x61, 0x78, 0x41, 0x67, 0x65, 0x12, 0x36, 0x0a, 0x09, 0x73, 0x74, 0x61, 0x6c, 0x65, 0x5f,
+ 0x61, 0x67, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x73, 0x74, 0x61, 0x6c, 0x65, 0x41, 0x67, 0x65, 0x12, 0x28,
+ 0x0a, 0x10, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x62, 0x79, 0x74,
+ 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x53,
+ 0x69, 0x7a, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x76, 0x61, 0x6c, 0x69,
+ 0x64, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52,
+ 0x0c, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x25, 0x0a,
+ 0x0e, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18,
+ 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x54, 0x61,
+ 0x72, 0x67, 0x65, 0x74, 0x4a, 0x04, 0x08, 0x0a, 0x10, 0x0b, 0x52, 0x1b, 0x72, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x5f, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x5f, 0x73,
+ 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x22, 0x70, 0x0a, 0x1b, 0x52, 0x6f, 0x75, 0x74, 0x65,
+ 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x70, 0x65,
+ 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x51, 0x0a, 0x13, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f,
+ 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75,
+ 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x11, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f,
+ 0x6b, 0x75, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x53, 0x0a, 0x11, 0x69, 0x6f, 0x2e,
+ 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x42, 0x0e,
+ 0x52, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
+ 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e,
+ 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2f,
+ 0x67, 0x72, 0x70, 0x63, 0x5f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x5f, 0x76, 0x31, 0x62, 0x06,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@@ -793,7 +804,7 @@ func file_grpc_lookup_v1_rls_config_proto_rawDescGZIP() []byte {
}
var file_grpc_lookup_v1_rls_config_proto_msgTypes = make([]protoimpl.MessageInfo, 9)
-var file_grpc_lookup_v1_rls_config_proto_goTypes = []interface{}{
+var file_grpc_lookup_v1_rls_config_proto_goTypes = []any{
(*NameMatcher)(nil), // 0: grpc.lookup.v1.NameMatcher
(*GrpcKeyBuilder)(nil), // 1: grpc.lookup.v1.GrpcKeyBuilder
(*HttpKeyBuilder)(nil), // 2: grpc.lookup.v1.HttpKeyBuilder
@@ -832,7 +843,7 @@ func file_grpc_lookup_v1_rls_config_proto_init() {
return
}
if !protoimpl.UnsafeEnabled {
- file_grpc_lookup_v1_rls_config_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_lookup_v1_rls_config_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*NameMatcher); i {
case 0:
return &v.state
@@ -844,7 +855,7 @@ func file_grpc_lookup_v1_rls_config_proto_init() {
return nil
}
}
- file_grpc_lookup_v1_rls_config_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_lookup_v1_rls_config_proto_msgTypes[1].Exporter = func(v any, i int) any {
switch v := v.(*GrpcKeyBuilder); i {
case 0:
return &v.state
@@ -856,7 +867,7 @@ func file_grpc_lookup_v1_rls_config_proto_init() {
return nil
}
}
- file_grpc_lookup_v1_rls_config_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_lookup_v1_rls_config_proto_msgTypes[2].Exporter = func(v any, i int) any {
switch v := v.(*HttpKeyBuilder); i {
case 0:
return &v.state
@@ -868,7 +879,7 @@ func file_grpc_lookup_v1_rls_config_proto_init() {
return nil
}
}
- file_grpc_lookup_v1_rls_config_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_lookup_v1_rls_config_proto_msgTypes[3].Exporter = func(v any, i int) any {
switch v := v.(*RouteLookupConfig); i {
case 0:
return &v.state
@@ -880,7 +891,7 @@ func file_grpc_lookup_v1_rls_config_proto_init() {
return nil
}
}
- file_grpc_lookup_v1_rls_config_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_lookup_v1_rls_config_proto_msgTypes[4].Exporter = func(v any, i int) any {
switch v := v.(*RouteLookupClusterSpecifier); i {
case 0:
return &v.state
@@ -892,7 +903,7 @@ func file_grpc_lookup_v1_rls_config_proto_init() {
return nil
}
}
- file_grpc_lookup_v1_rls_config_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_lookup_v1_rls_config_proto_msgTypes[5].Exporter = func(v any, i int) any {
switch v := v.(*GrpcKeyBuilder_Name); i {
case 0:
return &v.state
@@ -904,7 +915,7 @@ func file_grpc_lookup_v1_rls_config_proto_init() {
return nil
}
}
- file_grpc_lookup_v1_rls_config_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_lookup_v1_rls_config_proto_msgTypes[6].Exporter = func(v any, i int) any {
switch v := v.(*GrpcKeyBuilder_ExtraKeys); i {
case 0:
return &v.state
diff --git a/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_grpc.pb.go b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_grpc.pb.go
index 5c7a25efd8409..23dcb2100c3dd 100644
--- a/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_grpc.pb.go
+++ b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_grpc.pb.go
@@ -14,8 +14,8 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
-// - protoc-gen-go-grpc v1.4.0
-// - protoc v4.25.2
+// - protoc-gen-go-grpc v1.5.1
+// - protoc v5.27.1
// source: grpc/lookup/v1/rls.proto
package grpc_lookup_v1
@@ -64,21 +64,25 @@ func (c *routeLookupServiceClient) RouteLookup(ctx context.Context, in *RouteLoo
// RouteLookupServiceServer is the server API for RouteLookupService service.
// All implementations must embed UnimplementedRouteLookupServiceServer
-// for forward compatibility
+// for forward compatibility.
type RouteLookupServiceServer interface {
// Lookup returns a target for a single key.
RouteLookup(context.Context, *RouteLookupRequest) (*RouteLookupResponse, error)
mustEmbedUnimplementedRouteLookupServiceServer()
}
-// UnimplementedRouteLookupServiceServer must be embedded to have forward compatible implementations.
-type UnimplementedRouteLookupServiceServer struct {
-}
+// UnimplementedRouteLookupServiceServer must be embedded to have
+// forward compatible implementations.
+//
+// NOTE: this should be embedded by value instead of pointer to avoid a nil
+// pointer dereference when methods are called.
+type UnimplementedRouteLookupServiceServer struct{}
func (UnimplementedRouteLookupServiceServer) RouteLookup(context.Context, *RouteLookupRequest) (*RouteLookupResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method RouteLookup not implemented")
}
func (UnimplementedRouteLookupServiceServer) mustEmbedUnimplementedRouteLookupServiceServer() {}
+func (UnimplementedRouteLookupServiceServer) testEmbeddedByValue() {}
// UnsafeRouteLookupServiceServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to RouteLookupServiceServer will
@@ -88,6 +92,13 @@ type UnsafeRouteLookupServiceServer interface {
}
func RegisterRouteLookupServiceServer(s grpc.ServiceRegistrar, srv RouteLookupServiceServer) {
+ // If the following call panics, it indicates UnimplementedRouteLookupServiceServer was
+ // embedded by pointer and is nil. This will cause panics if an
+ // unimplemented method is ever invoked, so we test this at initialization
+ // time to prevent it from happening at runtime later due to I/O.
+ if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
+ t.testEmbeddedByValue()
+ }
s.RegisterService(&RouteLookupService_ServiceDesc, srv)
}
diff --git a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go
index afac56572ad55..b901c7bace506 100644
--- a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go
+++ b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go
@@ -55,7 +55,7 @@ func (r *passthroughResolver) start() {
r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint()}}})
}
-func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {}
+func (*passthroughResolver) ResolveNow(resolver.ResolveNowOptions) {}
func (*passthroughResolver) Close() {}
diff --git a/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go
new file mode 100644
index 0000000000000..be110d41f9a42
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package stats
+
+import (
+ "fmt"
+
+ estats "google.golang.org/grpc/experimental/stats"
+ "google.golang.org/grpc/stats"
+)
+
+// MetricsRecorderList forwards Record calls to all of its metricsRecorders.
+//
+// It eats any record calls where the label values provided do not match the
+// number of label keys.
+type MetricsRecorderList struct {
+ // metricsRecorders are the metrics recorders this list will forward to.
+ metricsRecorders []estats.MetricsRecorder
+}
+
+// NewMetricsRecorderList creates a new metric recorder list with all the stats
+// handlers provided which implement the MetricsRecorder interface.
+// If no stats handlers provided implement the MetricsRecorder interface,
+// the MetricsRecorder list returned is a no-op.
+func NewMetricsRecorderList(shs []stats.Handler) *MetricsRecorderList {
+ var mrs []estats.MetricsRecorder
+ for _, sh := range shs {
+ if mr, ok := sh.(estats.MetricsRecorder); ok {
+ mrs = append(mrs, mr)
+ }
+ }
+ return &MetricsRecorderList{
+ metricsRecorders: mrs,
+ }
+}
+
+func verifyLabels(desc *estats.MetricDescriptor, labelsRecv ...string) {
+ if got, want := len(labelsRecv), len(desc.Labels)+len(desc.OptionalLabels); got != want {
+ panic(fmt.Sprintf("Received %d labels in call to record metric %q, but expected %d.", got, desc.Name, want))
+ }
+}
+
+func (l *MetricsRecorderList) RecordInt64Count(handle *estats.Int64CountHandle, incr int64, labels ...string) {
+ verifyLabels(handle.Descriptor(), labels...)
+
+ for _, metricRecorder := range l.metricsRecorders {
+ metricRecorder.RecordInt64Count(handle, incr, labels...)
+ }
+}
+
+func (l *MetricsRecorderList) RecordFloat64Count(handle *estats.Float64CountHandle, incr float64, labels ...string) {
+ verifyLabels(handle.Descriptor(), labels...)
+
+ for _, metricRecorder := range l.metricsRecorders {
+ metricRecorder.RecordFloat64Count(handle, incr, labels...)
+ }
+}
+
+func (l *MetricsRecorderList) RecordInt64Histo(handle *estats.Int64HistoHandle, incr int64, labels ...string) {
+ verifyLabels(handle.Descriptor(), labels...)
+
+ for _, metricRecorder := range l.metricsRecorders {
+ metricRecorder.RecordInt64Histo(handle, incr, labels...)
+ }
+}
+
+func (l *MetricsRecorderList) RecordFloat64Histo(handle *estats.Float64HistoHandle, incr float64, labels ...string) {
+ verifyLabels(handle.Descriptor(), labels...)
+
+ for _, metricRecorder := range l.metricsRecorders {
+ metricRecorder.RecordFloat64Histo(handle, incr, labels...)
+ }
+}
+
+func (l *MetricsRecorderList) RecordInt64Gauge(handle *estats.Int64GaugeHandle, incr int64, labels ...string) {
+ verifyLabels(handle.Descriptor(), labels...)
+
+ for _, metricRecorder := range l.metricsRecorders {
+ metricRecorder.RecordInt64Gauge(handle, incr, labels...)
+ }
+}
diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go
index c7dbc82059525..757925381fe75 100644
--- a/vendor/google.golang.org/grpc/internal/status/status.go
+++ b/vendor/google.golang.org/grpc/internal/status/status.go
@@ -138,11 +138,11 @@ func (s *Status) WithDetails(details ...protoadapt.MessageV1) (*Status, error) {
// s.Code() != OK implies that s.Proto() != nil.
p := s.Proto()
for _, detail := range details {
- any, err := anypb.New(protoadapt.MessageV2Of(detail))
+ m, err := anypb.New(protoadapt.MessageV2Of(detail))
if err != nil {
return nil, err
}
- p.Details = append(p.Details, any)
+ p.Details = append(p.Details, m)
}
return &Status{s: p}, nil
}
diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
index 999f52cd75bdb..54c24c2ff3865 100644
--- a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
+++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
@@ -58,20 +58,20 @@ func GetRusage() *Rusage {
// CPUTimeDiff returns the differences of user CPU time and system CPU time used
// between two Rusage structs. It a no-op function for non-linux environments.
-func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) {
+func CPUTimeDiff(*Rusage, *Rusage) (float64, float64) {
log()
return 0, 0
}
// SetTCPUserTimeout is a no-op function under non-linux environments.
-func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error {
+func SetTCPUserTimeout(net.Conn, time.Duration) error {
log()
return nil
}
// GetTCPUserTimeout is a no-op function under non-linux environments.
// A negative return value indicates the operation is not supported
-func GetTCPUserTimeout(conn net.Conn) (int, error) {
+func GetTCPUserTimeout(net.Conn) (int, error) {
log()
return -1, nil
}
diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go
index 078137b7fd705..7e7aaa5463683 100644
--- a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go
+++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go
@@ -44,7 +44,7 @@ func NetDialerWithTCPKeepalive() *net.Dialer {
// combination of unconditionally enabling TCP keepalives here, and
// disabling the overriding of TCP keepalive parameters by setting the
// KeepAlive field to a negative value above, results in OS defaults for
- // the TCP keealive interval and time parameters.
+ // the TCP keepalive interval and time parameters.
Control: func(_, _ string, c syscall.RawConn) error {
return c.Control(func(fd uintptr) {
unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1)
diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go
index fd7d43a8907ba..d5c1085eeaecd 100644
--- a/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go
+++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go
@@ -44,7 +44,7 @@ func NetDialerWithTCPKeepalive() *net.Dialer {
// combination of unconditionally enabling TCP keepalives here, and
// disabling the overriding of TCP keepalive parameters by setting the
// KeepAlive field to a negative value above, results in OS defaults for
- // the TCP keealive interval and time parameters.
+ // the TCP keepalive interval and time parameters.
Control: func(_, _ string, c syscall.RawConn) error {
return c.Control(func(fd uintptr) {
windows.SetsockoptInt(windows.Handle(fd), windows.SOL_SOCKET, windows.SO_KEEPALIVE, 1)
diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
index 3deadfb4a20c9..ef72fbb3a0163 100644
--- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
+++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
@@ -32,6 +32,7 @@ import (
"golang.org/x/net/http2/hpack"
"google.golang.org/grpc/internal/grpclog"
"google.golang.org/grpc/internal/grpcutil"
+ "google.golang.org/grpc/mem"
"google.golang.org/grpc/status"
)
@@ -148,9 +149,9 @@ type dataFrame struct {
streamID uint32
endStream bool
h []byte
- d []byte
+ reader mem.Reader
// onEachWrite is called every time
- // a part of d is written out.
+ // a part of data is written out.
onEachWrite func()
}
@@ -289,18 +290,22 @@ func (l *outStreamList) dequeue() *outStream {
}
// controlBuffer is a way to pass information to loopy.
-// Information is passed as specific struct types called control frames.
-// A control frame not only represents data, messages or headers to be sent out
-// but can also be used to instruct loopy to update its internal state.
-// It shouldn't be confused with an HTTP2 frame, although some of the control frames
-// like dataFrame and headerFrame do go out on wire as HTTP2 frames.
+//
+// Information is passed as specific struct types called control frames. A
+// control frame not only represents data, messages or headers to be sent out
+// but can also be used to instruct loopy to update its internal state. It
+// shouldn't be confused with an HTTP2 frame, although some of the control
+// frames like dataFrame and headerFrame do go out on wire as HTTP2 frames.
type controlBuffer struct {
- ch chan struct{}
- done <-chan struct{}
+ wakeupCh chan struct{} // Unblocks readers waiting for something to read.
+ done <-chan struct{} // Closed when the transport is done.
+
+ // Mutex guards all the fields below, except trfChan which can be read
+ // atomically without holding mu.
mu sync.Mutex
- consumerWaiting bool
- list *itemList
- err error
+ consumerWaiting bool // True when readers are blocked waiting for new data.
+ closed bool // True when the controlbuf is finished.
+ list *itemList // List of queued control frames.
// transportResponseFrames counts the number of queued items that represent
// the response of an action initiated by the peer. trfChan is created
@@ -308,47 +313,59 @@ type controlBuffer struct {
// closed and nilled when transportResponseFrames drops below the
// threshold. Both fields are protected by mu.
transportResponseFrames int
- trfChan atomic.Value // chan struct{}
+ trfChan atomic.Pointer[chan struct{}]
}
func newControlBuffer(done <-chan struct{}) *controlBuffer {
return &controlBuffer{
- ch: make(chan struct{}, 1),
- list: &itemList{},
- done: done,
+ wakeupCh: make(chan struct{}, 1),
+ list: &itemList{},
+ done: done,
}
}
-// throttle blocks if there are too many incomingSettings/cleanupStreams in the
-// controlbuf.
+// throttle blocks if there are too many frames in the control buf that
+// represent the response of an action initiated by the peer, like
+// incomingSettings cleanupStreams etc.
func (c *controlBuffer) throttle() {
- ch, _ := c.trfChan.Load().(chan struct{})
- if ch != nil {
+ if ch := c.trfChan.Load(); ch != nil {
select {
- case <-ch:
+ case <-(*ch):
case <-c.done:
}
}
}
+// put adds an item to the controlbuf.
func (c *controlBuffer) put(it cbItem) error {
_, err := c.executeAndPut(nil, it)
return err
}
+// executeAndPut runs f, and if the return value is true, adds the given item to
+// the controlbuf. The item could be nil, in which case, this method simply
+// executes f and does not add the item to the controlbuf.
+//
+// The first return value indicates whether the item was successfully added to
+// the control buffer. A non-nil error, specifically ErrConnClosing, is returned
+// if the control buffer is already closed.
func (c *controlBuffer) executeAndPut(f func() bool, it cbItem) (bool, error) {
- var wakeUp bool
c.mu.Lock()
- if c.err != nil {
- c.mu.Unlock()
- return false, c.err
+ defer c.mu.Unlock()
+
+ if c.closed {
+ return false, ErrConnClosing
}
if f != nil {
if !f() { // f wasn't successful
- c.mu.Unlock()
return false, nil
}
}
+ if it == nil {
+ return true, nil
+ }
+
+ var wakeUp bool
if c.consumerWaiting {
wakeUp = true
c.consumerWaiting = false
@@ -359,98 +376,102 @@ func (c *controlBuffer) executeAndPut(f func() bool, it cbItem) (bool, error) {
if c.transportResponseFrames == maxQueuedTransportResponseFrames {
// We are adding the frame that puts us over the threshold; create
// a throttling channel.
- c.trfChan.Store(make(chan struct{}))
+ ch := make(chan struct{})
+ c.trfChan.Store(&ch)
}
}
- c.mu.Unlock()
if wakeUp {
select {
- case c.ch <- struct{}{}:
+ case c.wakeupCh <- struct{}{}:
default:
}
}
return true, nil
}
-// Note argument f should never be nil.
-func (c *controlBuffer) execute(f func(it any) bool, it any) (bool, error) {
- c.mu.Lock()
- if c.err != nil {
- c.mu.Unlock()
- return false, c.err
- }
- if !f(it) { // f wasn't successful
- c.mu.Unlock()
- return false, nil
- }
- c.mu.Unlock()
- return true, nil
-}
-
+// get returns the next control frame from the control buffer. If block is true
+// **and** there are no control frames in the control buffer, the call blocks
+// until one of the conditions is met: there is a frame to return or the
+// transport is closed.
func (c *controlBuffer) get(block bool) (any, error) {
for {
c.mu.Lock()
- if c.err != nil {
+ frame, err := c.getOnceLocked()
+ if frame != nil || err != nil || !block {
+ // If we read a frame or an error, we can return to the caller. The
+ // call to getOnceLocked() returns a nil frame and a nil error if
+ // there is nothing to read, and in that case, if the caller asked
+ // us not to block, we can return now as well.
c.mu.Unlock()
- return nil, c.err
- }
- if !c.list.isEmpty() {
- h := c.list.dequeue().(cbItem)
- if h.isTransportResponseFrame() {
- if c.transportResponseFrames == maxQueuedTransportResponseFrames {
- // We are removing the frame that put us over the
- // threshold; close and clear the throttling channel.
- ch := c.trfChan.Load().(chan struct{})
- close(ch)
- c.trfChan.Store((chan struct{})(nil))
- }
- c.transportResponseFrames--
- }
- c.mu.Unlock()
- return h, nil
- }
- if !block {
- c.mu.Unlock()
- return nil, nil
+ return frame, err
}
c.consumerWaiting = true
c.mu.Unlock()
+
+ // Release the lock above and wait to be woken up.
select {
- case <-c.ch:
+ case <-c.wakeupCh:
case <-c.done:
return nil, errors.New("transport closed by client")
}
}
}
+// Callers must not use this method, but should instead use get().
+//
+// Caller must hold c.mu.
+func (c *controlBuffer) getOnceLocked() (any, error) {
+ if c.closed {
+ return false, ErrConnClosing
+ }
+ if c.list.isEmpty() {
+ return nil, nil
+ }
+ h := c.list.dequeue().(cbItem)
+ if h.isTransportResponseFrame() {
+ if c.transportResponseFrames == maxQueuedTransportResponseFrames {
+ // We are removing the frame that put us over the
+ // threshold; close and clear the throttling channel.
+ ch := c.trfChan.Swap(nil)
+ close(*ch)
+ }
+ c.transportResponseFrames--
+ }
+ return h, nil
+}
+
+// finish closes the control buffer, cleaning up any streams that have queued
+// header frames. Once this method returns, no more frames can be added to the
+// control buffer, and attempts to do so will return ErrConnClosing.
func (c *controlBuffer) finish() {
c.mu.Lock()
- if c.err != nil {
- c.mu.Unlock()
+ defer c.mu.Unlock()
+
+ if c.closed {
return
}
- c.err = ErrConnClosing
+ c.closed = true
// There may be headers for streams in the control buffer.
// These streams need to be cleaned out since the transport
// is still not aware of these yet.
for head := c.list.dequeueAll(); head != nil; head = head.next {
- hdr, ok := head.it.(*headerFrame)
- if !ok {
- continue
- }
- if hdr.onOrphaned != nil { // It will be nil on the server-side.
- hdr.onOrphaned(ErrConnClosing)
+ switch v := head.it.(type) {
+ case *headerFrame:
+ if v.onOrphaned != nil { // It will be nil on the server-side.
+ v.onOrphaned(ErrConnClosing)
+ }
+ case *dataFrame:
+ _ = v.reader.Close()
}
}
+
// In case throttle() is currently in flight, it needs to be unblocked.
// Otherwise, the transport may not close, since the transport is closed by
// the reader encountering the connection error.
- ch, _ := c.trfChan.Load().(chan struct{})
+ ch := c.trfChan.Swap(nil)
if ch != nil {
- close(ch)
+ close(*ch)
}
- c.trfChan.Store((chan struct{})(nil))
- c.mu.Unlock()
}
type side int
@@ -466,7 +487,7 @@ const (
// stream maintains a queue of data frames; as loopy receives data frames
// it gets added to the queue of the relevant stream.
// Loopy goes over this list of active streams by processing one node every iteration,
-// thereby closely resemebling to a round-robin scheduling over all streams. While
+// thereby closely resembling a round-robin scheduling over all streams. While
// processing a stream, loopy writes out data bytes from this stream capped by the min
// of http2MaxFrameLen, connection-level flow control and stream-level flow control.
type loopyWriter struct {
@@ -490,12 +511,13 @@ type loopyWriter struct {
draining bool
conn net.Conn
logger *grpclog.PrefixLogger
+ bufferPool mem.BufferPool
// Side-specific handlers
ssGoAwayHandler func(*goAway) (bool, error)
}
-func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error)) *loopyWriter {
+func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error), bufferPool mem.BufferPool) *loopyWriter {
var buf bytes.Buffer
l := &loopyWriter{
side: s,
@@ -511,6 +533,7 @@ func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimato
conn: conn,
logger: logger,
ssGoAwayHandler: goAwayHandler,
+ bufferPool: bufferPool,
}
return l
}
@@ -768,6 +791,11 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error {
// not be established yet.
delete(l.estdStreams, c.streamID)
str.deleteSelf()
+ for head := str.itl.dequeueAll(); head != nil; head = head.next {
+ if df, ok := head.it.(*dataFrame); ok {
+ _ = df.reader.Close()
+ }
+ }
}
if c.rst { // If RST_STREAM needs to be sent.
if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil {
@@ -903,16 +931,18 @@ func (l *loopyWriter) processData() (bool, error) {
dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream.
// A data item is represented by a dataFrame, since it later translates into
// multiple HTTP2 data frames.
- // Every dataFrame has two buffers; h that keeps grpc-message header and d that is actual data.
- // As an optimization to keep wire traffic low, data from d is copied to h to make as big as the
- // maximum possible HTTP2 frame size.
+ // Every dataFrame has two buffers; h that keeps grpc-message header and data
+ // that is the actual message. As an optimization to keep wire traffic low, data
+ // from data is copied to h to make as big as the maximum possible HTTP2 frame
+ // size.
- if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame
+ if len(dataItem.h) == 0 && dataItem.reader.Remaining() == 0 { // Empty data frame
// Client sends out empty data frame with endStream = true
if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil {
return false, err
}
str.itl.dequeue() // remove the empty data item from stream
+ _ = dataItem.reader.Close()
if str.itl.isEmpty() {
str.state = empty
} else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers.
@@ -927,9 +957,7 @@ func (l *loopyWriter) processData() (bool, error) {
}
return false, nil
}
- var (
- buf []byte
- )
+
// Figure out the maximum size we can send
maxSize := http2MaxFrameLen
if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control.
@@ -943,43 +971,50 @@ func (l *loopyWriter) processData() (bool, error) {
}
// Compute how much of the header and data we can send within quota and max frame length
hSize := min(maxSize, len(dataItem.h))
- dSize := min(maxSize-hSize, len(dataItem.d))
- if hSize != 0 {
- if dSize == 0 {
- buf = dataItem.h
- } else {
- // We can add some data to grpc message header to distribute bytes more equally across frames.
- // Copy on the stack to avoid generating garbage
- var localBuf [http2MaxFrameLen]byte
- copy(localBuf[:hSize], dataItem.h)
- copy(localBuf[hSize:], dataItem.d[:dSize])
- buf = localBuf[:hSize+dSize]
- }
+ dSize := min(maxSize-hSize, dataItem.reader.Remaining())
+ remainingBytes := len(dataItem.h) + dataItem.reader.Remaining() - hSize - dSize
+ size := hSize + dSize
+
+ var buf *[]byte
+
+ if hSize != 0 && dSize == 0 {
+ buf = &dataItem.h
} else {
- buf = dataItem.d
- }
+ // Note: this is only necessary because the http2.Framer does not support
+ // partially writing a frame, so the sequence must be materialized into a buffer.
+ // TODO: Revisit once https://github.com/golang/go/issues/66655 is addressed.
+ pool := l.bufferPool
+ if pool == nil {
+ // Note that this is only supposed to be nil in tests. Otherwise, stream is
+ // always initialized with a BufferPool.
+ pool = mem.DefaultBufferPool()
+ }
+ buf = pool.Get(size)
+ defer pool.Put(buf)
- size := hSize + dSize
+ copy((*buf)[:hSize], dataItem.h)
+ _, _ = dataItem.reader.Read((*buf)[hSize:])
+ }
// Now that outgoing flow controls are checked we can replenish str's write quota
str.wq.replenish(size)
var endStream bool
// If this is the last data message on this stream and all of it can be written in this iteration.
- if dataItem.endStream && len(dataItem.h)+len(dataItem.d) <= size {
+ if dataItem.endStream && remainingBytes == 0 {
endStream = true
}
if dataItem.onEachWrite != nil {
dataItem.onEachWrite()
}
- if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil {
+ if err := l.framer.fr.WriteData(dataItem.streamID, endStream, (*buf)[:size]); err != nil {
return false, err
}
str.bytesOutStanding += size
l.sendQuota -= uint32(size)
dataItem.h = dataItem.h[hSize:]
- dataItem.d = dataItem.d[dSize:]
- if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out.
+ if remainingBytes == 0 { // All the data from that message was written out.
+ _ = dataItem.reader.Close()
str.itl.dequeue()
}
if str.itl.isEmpty() {
@@ -998,10 +1033,3 @@ func (l *loopyWriter) processData() (bool, error) {
}
return false, nil
}
-
-func min(a, b int) int {
- if a < b {
- return a
- }
- return b
-}
diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
index 4a3ddce29a4e7..ce878693bd741 100644
--- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
@@ -24,7 +24,6 @@
package transport
import (
- "bytes"
"context"
"errors"
"fmt"
@@ -40,6 +39,7 @@ import (
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/internal/grpclog"
"google.golang.org/grpc/internal/grpcutil"
+ "google.golang.org/grpc/mem"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
@@ -50,7 +50,7 @@ import (
// NewServerHandlerTransport returns a ServerTransport handling gRPC from
// inside an http.Handler, or writes an HTTP error to w and returns an error.
// It requires that the http Server supports HTTP/2.
-func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) {
+func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler, bufferPool mem.BufferPool) (ServerTransport, error) {
if r.Method != http.MethodPost {
w.Header().Set("Allow", http.MethodPost)
msg := fmt.Sprintf("invalid gRPC request method %q", r.Method)
@@ -98,6 +98,7 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s
contentType: contentType,
contentSubtype: contentSubtype,
stats: stats,
+ bufferPool: bufferPool,
}
st.logger = prefixLoggerForServerHandlerTransport(st)
@@ -171,6 +172,8 @@ type serverHandlerTransport struct {
stats []stats.Handler
logger *grpclog.PrefixLogger
+
+ bufferPool mem.BufferPool
}
func (ht *serverHandlerTransport) Close(err error) {
@@ -244,6 +247,7 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro
}
s.hdrMu.Lock()
+ defer s.hdrMu.Unlock()
if p := st.Proto(); p != nil && len(p.Details) > 0 {
delete(s.trailer, grpcStatusDetailsBinHeader)
stBytes, err := proto.Marshal(p)
@@ -268,7 +272,6 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro
}
}
}
- s.hdrMu.Unlock()
})
if err == nil { // transport has not been closed
@@ -330,16 +333,28 @@ func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) {
s.hdrMu.Unlock()
}
-func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
+func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error {
+ // Always take a reference because otherwise there is no guarantee the data will
+ // be available after this function returns. This is what callers to Write
+ // expect.
+ data.Ref()
headersWritten := s.updateHeaderSent()
- return ht.do(func() {
+ err := ht.do(func() {
+ defer data.Free()
if !headersWritten {
ht.writePendingHeaders(s)
}
ht.rw.Write(hdr)
- ht.rw.Write(data)
+ for _, b := range data {
+ _, _ = ht.rw.Write(b.ReadOnlyData())
+ }
ht.rw.(http.Flusher).Flush()
})
+ if err != nil {
+ data.Free()
+ return err
+ }
+ return nil
}
func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
@@ -406,7 +421,7 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream
headerWireLength: 0, // won't have access to header wire length until golang/go#18997.
}
s.trReader = &transportReader{
- reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}},
+ reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf},
windowHandler: func(int) {},
}
@@ -415,21 +430,19 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream
go func() {
defer close(readerDone)
- // TODO: minimize garbage, optimize recvBuffer code/ownership
- const readSize = 8196
- for buf := make([]byte, readSize); ; {
- n, err := req.Body.Read(buf)
+ for {
+ buf := ht.bufferPool.Get(http2MaxFrameLen)
+ n, err := req.Body.Read(*buf)
if n > 0 {
- s.buf.put(recvMsg{buffer: bytes.NewBuffer(buf[:n:n])})
- buf = buf[n:]
+ *buf = (*buf)[:n]
+ s.buf.put(recvMsg{buffer: mem.NewBuffer(buf, ht.bufferPool)})
+ } else {
+ ht.bufferPool.Put(buf)
}
if err != nil {
s.buf.put(recvMsg{err: mapRecvMsgError(err)})
return
}
- if len(buf) == 0 {
- buf = make([]byte, readSize)
- }
}
}()
@@ -462,7 +475,7 @@ func (ht *serverHandlerTransport) IncrMsgSent() {}
func (ht *serverHandlerTransport) IncrMsgRecv() {}
-func (ht *serverHandlerTransport) Drain(debugData string) {
+func (ht *serverHandlerTransport) Drain(string) {
panic("Drain() is not implemented")
}
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
index 3c63c706986da..c769deab53c77 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
@@ -47,6 +47,7 @@ import (
isyscall "google.golang.org/grpc/internal/syscall"
"google.golang.org/grpc/internal/transport/networktype"
"google.golang.org/grpc/keepalive"
+ "google.golang.org/grpc/mem"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/resolver"
@@ -59,6 +60,8 @@ import (
// atomically.
var clientConnectionCounter uint64
+var goAwayLoopyWriterTimeout = 5 * time.Second
+
var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool))
// http2Client implements the ClientTransport interface with HTTP2.
@@ -144,7 +147,7 @@ type http2Client struct {
onClose func(GoAwayReason)
- bufferPool *bufferPool
+ bufferPool mem.BufferPool
connectionID uint64
logger *grpclog.PrefixLogger
@@ -229,7 +232,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
}
}(conn)
- // The following defer and goroutine monitor the connectCtx for cancelation
+ // The following defer and goroutine monitor the connectCtx for cancellation
// and deadline. On context expiration, the connection is hard closed and
// this function will naturally fail as a result. Otherwise, the defer
// waits for the goroutine to exit to prevent the context from being
@@ -346,7 +349,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
streamQuota: defaultMaxStreamsClient,
streamsQuotaAvailable: make(chan struct{}, 1),
keepaliveEnabled: keepaliveEnabled,
- bufferPool: newBufferPool(),
+ bufferPool: opts.BufferPool,
onClose: onClose,
}
var czSecurity credentials.ChannelzSecurityValue
@@ -463,7 +466,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
return nil, err
}
go func() {
- t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler)
+ t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler, t.bufferPool)
if err := t.loopy.run(); !isIOError(err) {
// Immediately close the connection, as the loopy writer returns
// when there are no more active streams and we were draining (the
@@ -504,7 +507,6 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
closeStream: func(err error) {
t.CloseStream(s, err)
},
- freeBuffer: t.bufferPool.put,
},
windowHandler: func(n int) {
t.updateWindow(s, uint32(n))
@@ -770,7 +772,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
hdr := &headerFrame{
hf: headerFields,
endStream: false,
- initStream: func(id uint32) error {
+ initStream: func(uint32) error {
t.mu.Lock()
// TODO: handle transport closure in loopy instead and remove this
// initStream is never called when transport is draining.
@@ -983,6 +985,7 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.
// only once on a transport. Once it is called, the transport should not be
// accessed anymore.
func (t *http2Client) Close(err error) {
+ t.conn.SetWriteDeadline(time.Now().Add(time.Second * 10))
t.mu.Lock()
// Make sure we only close once.
if t.state == closing {
@@ -1006,10 +1009,20 @@ func (t *http2Client) Close(err error) {
t.kpDormancyCond.Signal()
}
t.mu.Unlock()
+
// Per HTTP/2 spec, a GOAWAY frame must be sent before closing the
- // connection. See https://httpwg.org/specs/rfc7540.html#GOAWAY.
+ // connection. See https://httpwg.org/specs/rfc7540.html#GOAWAY. It
+ // also waits for loopyWriter to be closed with a timer to avoid the
+ // long blocking in case the connection is blackholed, i.e. TCP is
+ // just stuck.
t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte("client transport shutdown"), closeConn: err})
- <-t.writerDone
+ timer := time.NewTimer(goAwayLoopyWriterTimeout)
+ defer timer.Stop()
+ select {
+ case <-t.writerDone: // success
+ case <-timer.C:
+ t.logger.Infof("Failed to write a GOAWAY frame as part of connection close after %s. Giving up and closing the transport.", goAwayLoopyWriterTimeout)
+ }
t.cancel()
t.conn.Close()
channelz.RemoveEntry(t.channelz.ID)
@@ -1065,27 +1078,36 @@ func (t *http2Client) GracefulClose() {
// Write formats the data into HTTP2 data frame(s) and sends it out. The caller
// should proceed only if Write returns nil.
-func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
+func (t *http2Client) Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error {
+ reader := data.Reader()
+
if opts.Last {
// If it's the last message, update stream state.
if !s.compareAndSwapState(streamActive, streamWriteDone) {
+ _ = reader.Close()
return errStreamDone
}
} else if s.getState() != streamActive {
+ _ = reader.Close()
return errStreamDone
}
df := &dataFrame{
streamID: s.id,
endStream: opts.Last,
h: hdr,
- d: data,
+ reader: reader,
}
- if hdr != nil || data != nil { // If it's not an empty data frame, check quota.
- if err := s.wq.get(int32(len(hdr) + len(data))); err != nil {
+ if hdr != nil || df.reader.Remaining() != 0 { // If it's not an empty data frame, check quota.
+ if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil {
+ _ = reader.Close()
return err
}
}
- return t.controlBuf.put(df)
+ if err := t.controlBuf.put(df); err != nil {
+ _ = reader.Close()
+ return err
+ }
+ return nil
}
func (t *http2Client) getStream(f http2.Frame) *Stream {
@@ -1190,10 +1212,13 @@ func (t *http2Client) handleData(f *http2.DataFrame) {
// guarantee f.Data() is consumed before the arrival of next frame.
// Can this copy be eliminated?
if len(f.Data()) > 0 {
- buffer := t.bufferPool.get()
- buffer.Reset()
- buffer.Write(f.Data())
- s.write(recvMsg{buffer: buffer})
+ pool := t.bufferPool
+ if pool == nil {
+ // Note that this is only supposed to be nil in tests. Otherwise, stream is
+ // always initialized with a BufferPool.
+ pool = mem.DefaultBufferPool()
+ }
+ s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)})
}
}
// The server has closed the stream without sending trailers. Record that
@@ -1222,7 +1247,7 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
if statusCode == codes.Canceled {
if d, ok := s.ctx.Deadline(); ok && !d.After(time.Now()) {
// Our deadline was already exceeded, and that was likely the cause
- // of this cancelation. Alter the status code accordingly.
+ // of this cancellation. Alter the status code accordingly.
statusCode = codes.DeadlineExceeded
}
}
@@ -1307,7 +1332,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
id := f.LastStreamID
if id > 0 && id%2 == 0 {
t.mu.Unlock()
- t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered numbered stream id: %v", id))
+ t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered stream id: %v", id))
return
}
// A client can receive multiple GoAways from the server (see
@@ -1642,11 +1667,10 @@ func (t *http2Client) reader(errCh chan<- error) {
t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false)
}
continue
- } else {
- // Transport error.
- t.Close(connectionErrorf(true, err, "error reading from server: %v", err))
- return
}
+ // Transport error.
+ t.Close(connectionErrorf(true, err, "error reading from server: %v", err))
+ return
}
switch frame := frame.(type) {
case *http2.MetaHeadersFrame:
@@ -1671,13 +1695,6 @@ func (t *http2Client) reader(errCh chan<- error) {
}
}
-func minTime(a, b time.Duration) time.Duration {
- if a < b {
- return a
- }
- return b
-}
-
// keepalive running in a separate goroutine makes sure the connection is alive by sending pings.
func (t *http2Client) keepalive() {
p := &ping{data: [8]byte{}}
@@ -1745,7 +1762,7 @@ func (t *http2Client) keepalive() {
// timeoutLeft. This will ensure that we wait only for kp.Time
// before sending out the next ping (for cases where the ping is
// acked).
- sleepDuration := minTime(t.kp.Time, timeoutLeft)
+ sleepDuration := min(t.kp.Time, timeoutLeft)
timeoutLeft -= sleepDuration
timer.Reset(sleepDuration)
case <-t.ctx.Done():
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
index b7091165b5013..584b50fe55302 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
@@ -39,6 +39,7 @@ import (
"google.golang.org/grpc/internal/grpcutil"
"google.golang.org/grpc/internal/pretty"
"google.golang.org/grpc/internal/syscall"
+ "google.golang.org/grpc/mem"
"google.golang.org/protobuf/proto"
"google.golang.org/grpc/codes"
@@ -119,7 +120,7 @@ type http2Server struct {
// Fields below are for channelz metric collection.
channelz *channelz.Socket
- bufferPool *bufferPool
+ bufferPool mem.BufferPool
connectionID uint64
@@ -261,7 +262,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
idle: time.Now(),
kep: kep,
initialWindowSize: iwz,
- bufferPool: newBufferPool(),
+ bufferPool: config.BufferPool,
}
var czSecurity credentials.ChannelzSecurityValue
if au, ok := authInfo.(credentials.ChannelzSecurityInfo); ok {
@@ -330,7 +331,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
t.handleSettings(sf)
go func() {
- t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler)
+ t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler, t.bufferPool)
err := t.loopy.run()
close(t.loopyWriterDone)
if !isIOError(err) {
@@ -613,10 +614,9 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade
s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone)
s.trReader = &transportReader{
reader: &recvBufferReader{
- ctx: s.ctx,
- ctxDone: s.ctxDone,
- recv: s.buf,
- freeBuffer: t.bufferPool.put,
+ ctx: s.ctx,
+ ctxDone: s.ctxDone,
+ recv: s.buf,
},
windowHandler: func(n int) {
t.updateWindow(s, uint32(n))
@@ -813,10 +813,13 @@ func (t *http2Server) handleData(f *http2.DataFrame) {
// guarantee f.Data() is consumed before the arrival of next frame.
// Can this copy be eliminated?
if len(f.Data()) > 0 {
- buffer := t.bufferPool.get()
- buffer.Reset()
- buffer.Write(f.Data())
- s.write(recvMsg{buffer: buffer})
+ pool := t.bufferPool
+ if pool == nil {
+ // Note that this is only supposed to be nil in tests. Otherwise, stream is
+ // always initialized with a BufferPool.
+ pool = mem.DefaultBufferPool()
+ }
+ s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)})
}
}
if f.StreamEnded() {
@@ -1089,7 +1092,9 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
onWrite: t.setResetPingStrikes,
}
- success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader)
+ success, err := t.controlBuf.executeAndPut(func() bool {
+ return t.checkForHeaderListSize(trailingHeader)
+ }, nil)
if !success {
if err != nil {
return err
@@ -1112,27 +1117,37 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
// Write converts the data into HTTP2 data frame and sends it out. Non-nil error
// is returns if it fails (e.g., framing error, transport error).
-func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
+func (t *http2Server) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error {
+ reader := data.Reader()
+
if !s.isHeaderSent() { // Headers haven't been written yet.
if err := t.WriteHeader(s, nil); err != nil {
+ _ = reader.Close()
return err
}
} else {
// Writing headers checks for this condition.
if s.getState() == streamDone {
+ _ = reader.Close()
return t.streamContextErr(s)
}
}
+
df := &dataFrame{
streamID: s.id,
h: hdr,
- d: data,
+ reader: reader,
onEachWrite: t.setResetPingStrikes,
}
- if err := s.wq.get(int32(len(hdr) + len(data))); err != nil {
+ if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil {
+ _ = reader.Close()
return t.streamContextErr(s)
}
- return t.controlBuf.put(df)
+ if err := t.controlBuf.put(df); err != nil {
+ _ = reader.Close()
+ return err
+ }
+ return nil
}
// keepalive running in a separate goroutine does the following:
@@ -1223,7 +1238,7 @@ func (t *http2Server) keepalive() {
// timeoutLeft. This will ensure that we wait only for kp.Time
// before sending out the next ping (for cases where the ping is
// acked).
- sleepDuration := minTime(t.kp.Time, kpTimeoutLeft)
+ sleepDuration := min(t.kp.Time, kpTimeoutLeft)
kpTimeoutLeft -= sleepDuration
kpTimer.Reset(sleepDuration)
case <-t.done:
diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go
index 39cef3bd442eb..3613d7b64817d 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http_util.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go
@@ -317,28 +317,32 @@ func newBufWriter(conn net.Conn, batchSize int, pool *sync.Pool) *bufWriter {
return w
}
-func (w *bufWriter) Write(b []byte) (n int, err error) {
+func (w *bufWriter) Write(b []byte) (int, error) {
if w.err != nil {
return 0, w.err
}
if w.batchSize == 0 { // Buffer has been disabled.
- n, err = w.conn.Write(b)
+ n, err := w.conn.Write(b)
return n, toIOError(err)
}
if w.buf == nil {
b := w.pool.Get().(*[]byte)
w.buf = *b
}
+ written := 0
for len(b) > 0 {
- nn := copy(w.buf[w.offset:], b)
- b = b[nn:]
- w.offset += nn
- n += nn
- if w.offset >= w.batchSize {
- err = w.flushKeepBuffer()
+ copied := copy(w.buf[w.offset:], b)
+ b = b[copied:]
+ written += copied
+ w.offset += copied
+ if w.offset < w.batchSize {
+ continue
+ }
+ if err := w.flushKeepBuffer(); err != nil {
+ return written, err
}
}
- return n, err
+ return written, nil
}
func (w *bufWriter) Flush() error {
@@ -389,7 +393,7 @@ type framer struct {
fr *http2.Framer
}
-var writeBufferPoolMap map[int]*sync.Pool = make(map[int]*sync.Pool)
+var writeBufferPoolMap = make(map[int]*sync.Pool)
var writeBufferMutex sync.Mutex
func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32) *framer {
diff --git a/vendor/google.golang.org/grpc/internal/transport/proxy.go b/vendor/google.golang.org/grpc/internal/transport/proxy.go
index 24fa1032574cb..54b2244365444 100644
--- a/vendor/google.golang.org/grpc/internal/transport/proxy.go
+++ b/vendor/google.golang.org/grpc/internal/transport/proxy.go
@@ -107,8 +107,14 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr stri
}
return nil, fmt.Errorf("failed to do connect handshake, response: %q", dump)
}
-
- return &bufConn{Conn: conn, r: r}, nil
+ // The buffer could contain extra bytes from the target server, so we can't
+ // discard it. However, in many cases where the server waits for the client
+ // to send the first message (e.g. when TLS is being used), the buffer will
+ // be empty, so we can avoid the overhead of reading through this buffer.
+ if r.Buffered() != 0 {
+ return &bufConn{Conn: conn, r: r}, nil
+ }
+ return conn, nil
}
// proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy
diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go
index 4b39c0ade97c0..924ba4f365338 100644
--- a/vendor/google.golang.org/grpc/internal/transport/transport.go
+++ b/vendor/google.golang.org/grpc/internal/transport/transport.go
@@ -22,7 +22,6 @@
package transport
import (
- "bytes"
"context"
"errors"
"fmt"
@@ -37,6 +36,7 @@ import (
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/keepalive"
+ "google.golang.org/grpc/mem"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/resolver"
@@ -47,32 +47,10 @@ import (
const logLevel = 2
-type bufferPool struct {
- pool sync.Pool
-}
-
-func newBufferPool() *bufferPool {
- return &bufferPool{
- pool: sync.Pool{
- New: func() any {
- return new(bytes.Buffer)
- },
- },
- }
-}
-
-func (p *bufferPool) get() *bytes.Buffer {
- return p.pool.Get().(*bytes.Buffer)
-}
-
-func (p *bufferPool) put(b *bytes.Buffer) {
- p.pool.Put(b)
-}
-
// recvMsg represents the received msg from the transport. All transport
// protocol specific info has been removed.
type recvMsg struct {
- buffer *bytes.Buffer
+ buffer mem.Buffer
// nil: received some data
// io.EOF: stream is completed. data is nil.
// other non-nil error: transport failure. data is nil.
@@ -102,6 +80,9 @@ func newRecvBuffer() *recvBuffer {
func (b *recvBuffer) put(r recvMsg) {
b.mu.Lock()
if b.err != nil {
+ // drop the buffer on the floor. Since b.err is not nil, any subsequent reads
+ // will always return an error, making this buffer inaccessible.
+ r.buffer.Free()
b.mu.Unlock()
// An error had occurred earlier, don't accept more
// data or errors.
@@ -148,45 +129,97 @@ type recvBufferReader struct {
ctx context.Context
ctxDone <-chan struct{} // cache of ctx.Done() (for performance).
recv *recvBuffer
- last *bytes.Buffer // Stores the remaining data in the previous calls.
+ last mem.Buffer // Stores the remaining data in the previous calls.
err error
- freeBuffer func(*bytes.Buffer)
}
-// Read reads the next len(p) bytes from last. If last is drained, it tries to
-// read additional data from recv. It blocks if there no additional data available
-// in recv. If Read returns any non-nil error, it will continue to return that error.
-func (r *recvBufferReader) Read(p []byte) (n int, err error) {
+func (r *recvBufferReader) ReadHeader(header []byte) (n int, err error) {
if r.err != nil {
return 0, r.err
}
if r.last != nil {
- // Read remaining data left in last call.
- copied, _ := r.last.Read(p)
- if r.last.Len() == 0 {
- r.freeBuffer(r.last)
+ n, r.last = mem.ReadUnsafe(header, r.last)
+ return n, nil
+ }
+ if r.closeStream != nil {
+ n, r.err = r.readHeaderClient(header)
+ } else {
+ n, r.err = r.readHeader(header)
+ }
+ return n, r.err
+}
+
+// Read reads the next n bytes from last. If last is drained, it tries to read
+// additional data from recv. It blocks if there no additional data available in
+// recv. If Read returns any non-nil error, it will continue to return that
+// error.
+func (r *recvBufferReader) Read(n int) (buf mem.Buffer, err error) {
+ if r.err != nil {
+ return nil, r.err
+ }
+ if r.last != nil {
+ buf = r.last
+ if r.last.Len() > n {
+ buf, r.last = mem.SplitUnsafe(buf, n)
+ } else {
r.last = nil
}
- return copied, nil
+ return buf, nil
}
if r.closeStream != nil {
- n, r.err = r.readClient(p)
+ buf, r.err = r.readClient(n)
} else {
- n, r.err = r.read(p)
+ buf, r.err = r.read(n)
}
- return n, r.err
+ return buf, r.err
}
-func (r *recvBufferReader) read(p []byte) (n int, err error) {
+func (r *recvBufferReader) readHeader(header []byte) (n int, err error) {
select {
case <-r.ctxDone:
return 0, ContextErr(r.ctx.Err())
case m := <-r.recv.get():
- return r.readAdditional(m, p)
+ return r.readHeaderAdditional(m, header)
+ }
+}
+
+func (r *recvBufferReader) read(n int) (buf mem.Buffer, err error) {
+ select {
+ case <-r.ctxDone:
+ return nil, ContextErr(r.ctx.Err())
+ case m := <-r.recv.get():
+ return r.readAdditional(m, n)
+ }
+}
+
+func (r *recvBufferReader) readHeaderClient(header []byte) (n int, err error) {
+ // If the context is canceled, then closes the stream with nil metadata.
+ // closeStream writes its error parameter to r.recv as a recvMsg.
+ // r.readAdditional acts on that message and returns the necessary error.
+ select {
+ case <-r.ctxDone:
+ // Note that this adds the ctx error to the end of recv buffer, and
+ // reads from the head. This will delay the error until recv buffer is
+ // empty, thus will delay ctx cancellation in Recv().
+ //
+ // It's done this way to fix a race between ctx cancel and trailer. The
+ // race was, stream.Recv() may return ctx error if ctxDone wins the
+ // race, but stream.Trailer() may return a non-nil md because the stream
+ // was not marked as done when trailer is received. This closeStream
+ // call will mark stream as done, thus fix the race.
+ //
+ // TODO: delaying ctx error seems like a unnecessary side effect. What
+ // we really want is to mark the stream as done, and return ctx error
+ // faster.
+ r.closeStream(ContextErr(r.ctx.Err()))
+ m := <-r.recv.get()
+ return r.readHeaderAdditional(m, header)
+ case m := <-r.recv.get():
+ return r.readHeaderAdditional(m, header)
}
}
-func (r *recvBufferReader) readClient(p []byte) (n int, err error) {
+func (r *recvBufferReader) readClient(n int) (buf mem.Buffer, err error) {
// If the context is canceled, then closes the stream with nil metadata.
// closeStream writes its error parameter to r.recv as a recvMsg.
// r.readAdditional acts on that message and returns the necessary error.
@@ -207,25 +240,40 @@ func (r *recvBufferReader) readClient(p []byte) (n int, err error) {
// faster.
r.closeStream(ContextErr(r.ctx.Err()))
m := <-r.recv.get()
- return r.readAdditional(m, p)
+ return r.readAdditional(m, n)
case m := <-r.recv.get():
- return r.readAdditional(m, p)
+ return r.readAdditional(m, n)
}
}
-func (r *recvBufferReader) readAdditional(m recvMsg, p []byte) (n int, err error) {
+func (r *recvBufferReader) readHeaderAdditional(m recvMsg, header []byte) (n int, err error) {
r.recv.load()
if m.err != nil {
+ if m.buffer != nil {
+ m.buffer.Free()
+ }
return 0, m.err
}
- copied, _ := m.buffer.Read(p)
- if m.buffer.Len() == 0 {
- r.freeBuffer(m.buffer)
- r.last = nil
- } else {
- r.last = m.buffer
+
+ n, r.last = mem.ReadUnsafe(header, m.buffer)
+
+ return n, nil
+}
+
+func (r *recvBufferReader) readAdditional(m recvMsg, n int) (b mem.Buffer, err error) {
+ r.recv.load()
+ if m.err != nil {
+ if m.buffer != nil {
+ m.buffer.Free()
+ }
+ return nil, m.err
+ }
+
+ if m.buffer.Len() > n {
+ m.buffer, r.last = mem.SplitUnsafe(m.buffer, n)
}
- return copied, nil
+
+ return m.buffer, nil
}
type streamState uint32
@@ -241,7 +289,7 @@ const (
type Stream struct {
id uint32
st ServerTransport // nil for client side Stream
- ct *http2Client // nil for server side Stream
+ ct ClientTransport // nil for server side Stream
ctx context.Context // the associated context of the stream
cancel context.CancelFunc // always nil for client side Stream
done chan struct{} // closed at the end of stream to unblock writers. On the client side.
@@ -251,7 +299,7 @@ type Stream struct {
recvCompress string
sendCompress string
buf *recvBuffer
- trReader io.Reader
+ trReader *transportReader
fc *inFlow
wq *writeQuota
@@ -408,7 +456,7 @@ func (s *Stream) TrailersOnly() bool {
return s.noHeaders
}
-// Trailer returns the cached trailer metedata. Note that if it is not called
+// Trailer returns the cached trailer metadata. Note that if it is not called
// after the entire stream is done, it could return an empty MD. Client
// side only.
// It can be safely read only after stream has ended that is either read
@@ -499,36 +547,87 @@ func (s *Stream) write(m recvMsg) {
s.buf.put(m)
}
-// Read reads all p bytes from the wire for this stream.
-func (s *Stream) Read(p []byte) (n int, err error) {
+func (s *Stream) ReadHeader(header []byte) (err error) {
+ // Don't request a read if there was an error earlier
+ if er := s.trReader.er; er != nil {
+ return er
+ }
+ s.requestRead(len(header))
+ for len(header) != 0 {
+ n, err := s.trReader.ReadHeader(header)
+ header = header[n:]
+ if len(header) == 0 {
+ err = nil
+ }
+ if err != nil {
+ if n > 0 && err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ return err
+ }
+ }
+ return nil
+}
+
+// Read reads n bytes from the wire for this stream.
+func (s *Stream) Read(n int) (data mem.BufferSlice, err error) {
// Don't request a read if there was an error earlier
- if er := s.trReader.(*transportReader).er; er != nil {
- return 0, er
+ if er := s.trReader.er; er != nil {
+ return nil, er
}
- s.requestRead(len(p))
- return io.ReadFull(s.trReader, p)
+ s.requestRead(n)
+ for n != 0 {
+ buf, err := s.trReader.Read(n)
+ var bufLen int
+ if buf != nil {
+ bufLen = buf.Len()
+ }
+ n -= bufLen
+ if n == 0 {
+ err = nil
+ }
+ if err != nil {
+ if bufLen > 0 && err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ data.Free()
+ return nil, err
+ }
+ data = append(data, buf)
+ }
+ return data, nil
}
-// tranportReader reads all the data available for this Stream from the transport and
+// transportReader reads all the data available for this Stream from the transport and
// passes them into the decoder, which converts them into a gRPC message stream.
// The error is io.EOF when the stream is done or another non-nil error if
// the stream broke.
type transportReader struct {
- reader io.Reader
+ reader *recvBufferReader
// The handler to control the window update procedure for both this
// particular stream and the associated transport.
windowHandler func(int)
er error
}
-func (t *transportReader) Read(p []byte) (n int, err error) {
- n, err = t.reader.Read(p)
+func (t *transportReader) ReadHeader(header []byte) (int, error) {
+ n, err := t.reader.ReadHeader(header)
if err != nil {
t.er = err
- return
+ return 0, err
}
t.windowHandler(n)
- return
+ return n, nil
+}
+
+func (t *transportReader) Read(n int) (mem.Buffer, error) {
+ buf, err := t.reader.Read(n)
+ if err != nil {
+ t.er = err
+ return buf, err
+ }
+ t.windowHandler(buf.Len())
+ return buf, nil
}
// BytesReceived indicates whether any bytes have been received on this stream.
@@ -574,6 +673,7 @@ type ServerConfig struct {
ChannelzParent *channelz.Server
MaxHeaderListSize *uint32
HeaderTableSize *uint32
+ BufferPool mem.BufferPool
}
// ConnectOptions covers all relevant options for communicating with the server.
@@ -612,6 +712,8 @@ type ConnectOptions struct {
MaxHeaderListSize *uint32
// UseProxy specifies if a proxy should be used.
UseProxy bool
+ // The mem.BufferPool to use when reading/writing to the wire.
+ BufferPool mem.BufferPool
}
// NewClientTransport establishes the transport with the required ConnectOptions
@@ -673,7 +775,7 @@ type ClientTransport interface {
// Write sends the data for the given stream. A nil stream indicates
// the write is to be performed on the transport as a whole.
- Write(s *Stream, hdr []byte, data []byte, opts *Options) error
+ Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error
// NewStream creates a Stream for an RPC.
NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error)
@@ -725,7 +827,7 @@ type ServerTransport interface {
// Write sends the data for the given stream.
// Write may not be called on all streams.
- Write(s *Stream, hdr []byte, data []byte, opts *Options) error
+ Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error
// WriteStatus sends the status of a stream to the client. WriteStatus is
// the final call made on a stream and always occurs.
@@ -798,7 +900,7 @@ var (
// connection is draining. This could be caused by goaway or balancer
// removing the address.
errStreamDrain = status.Error(codes.Unavailable, "the connection is draining")
- // errStreamDone is returned from write at the client side to indiacte application
+ // errStreamDone is returned from write at the client side to indicate application
// layer of an error.
errStreamDone = errors.New("the stream is done")
// StatusGoAway indicates that the server sent a GOAWAY that included this
diff --git a/vendor/google.golang.org/grpc/internal/xds/bootstrap/bootstrap.go b/vendor/google.golang.org/grpc/internal/xds/bootstrap/bootstrap.go
index b8b92a6cb550e..8317859e1e95f 100644
--- a/vendor/google.golang.org/grpc/internal/xds/bootstrap/bootstrap.go
+++ b/vendor/google.golang.org/grpc/internal/xds/bootstrap/bootstrap.go
@@ -24,35 +24,29 @@ import (
"bytes"
"encoding/json"
"fmt"
+ "maps"
"net/url"
"os"
+ "slices"
"strings"
+ "sync"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/tls/certprovider"
"google.golang.org/grpc/internal"
"google.golang.org/grpc/internal/envconfig"
- "google.golang.org/grpc/internal/pretty"
"google.golang.org/grpc/xds/bootstrap"
- "google.golang.org/protobuf/encoding/protojson"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/types/known/structpb"
v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
)
const (
- // The "server_features" field in the bootstrap file contains a list of
- // features supported by the server:
- // - A value of "xds_v3" indicates that the server supports the v3 version of
- // the xDS transport protocol.
- // - A value of "ignore_resource_deletion" indicates that the client should
- // ignore deletion of Listener and Cluster resources in updates from the
- // server.
- serverFeaturesV3 = "xds_v3"
serverFeaturesIgnoreResourceDeletion = "ignore_resource_deletion"
-
- gRPCUserAgentName = "gRPC Go"
- clientFeatureNoOverprovisioning = "envoy.lb.does_not_support_overprovisioning"
- clientFeatureResourceWrapper = "xds.config.resource-in-sotw"
+ gRPCUserAgentName = "gRPC Go"
+ clientFeatureNoOverprovisioning = "envoy.lb.does_not_support_overprovisioning"
+ clientFeatureResourceWrapper = "xds.config.resource-in-sotw"
)
// For overriding in unit tests.
@@ -60,12 +54,15 @@ var bootstrapFileReadFunc = os.ReadFile
// ChannelCreds contains the credentials to be used while communicating with an
// xDS server. It is also used to dedup servers with the same server URI.
+//
+// This type does not implement custom JSON marshal/unmarshal logic because it
+// is straightforward to accomplish the same with json struct tags.
type ChannelCreds struct {
// Type contains a unique name identifying the credentials type. The only
// supported types currently are "google_default" and "insecure".
- Type string
+ Type string `json:"type,omitempty"`
// Config contains the JSON configuration associated with the credentials.
- Config json.RawMessage
+ Config json.RawMessage `json:"config,omitempty"`
}
// Equal reports whether cc and other are considered equal.
@@ -87,92 +84,208 @@ func (cc ChannelCreds) String() string {
return cc.Type + "-" + string(b)
}
-// ServerConfig contains the configuration to connect to a server, including
-// URI, creds, and transport API version (e.g. v2 or v3).
+// ServerConfigs represents a collection of server configurations.
+type ServerConfigs []*ServerConfig
+
+// Equal returns true if scs equals other.
+func (scs *ServerConfigs) Equal(other *ServerConfigs) bool {
+ if len(*scs) != len(*other) {
+ return false
+ }
+ for i := range *scs {
+ if !(*scs)[i].Equal((*other)[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// UnmarshalJSON takes the json data (a list of server configurations) and
+// unmarshals it to the struct.
+func (scs *ServerConfigs) UnmarshalJSON(data []byte) error {
+ servers := []*ServerConfig{}
+ if err := json.Unmarshal(data, &servers); err != nil {
+ return fmt.Errorf("xds: failed to JSON unmarshal server configurations during bootstrap: %v, config:\n%s", err, string(data))
+ }
+ // Only use the first server config if fallback support is disabled.
+ if !envconfig.XDSFallbackSupport {
+ if len(servers) > 1 {
+ servers = servers[:1]
+ }
+ }
+ *scs = servers
+ return nil
+}
+
+// String returns a string representation of the ServerConfigs, by concatenating
+// the string representations of the underlying server configs.
+func (scs *ServerConfigs) String() string {
+ ret := ""
+ for i, sc := range *scs {
+ if i > 0 {
+ ret += ", "
+ }
+ ret += sc.String()
+ }
+ return ret
+}
+
+// Authority contains configuration for an xDS control plane authority.
//
-// It contains unexported fields that are initialized when unmarshaled from JSON
-// using either the UnmarshalJSON() method or the ServerConfigFromJSON()
-// function. Hence users are strongly encouraged not to use a literal struct
-// initialization to create an instance of this type, but instead unmarshal from
-// JSON using one of the two available options.
-type ServerConfig struct {
- // ServerURI is the management server to connect to.
+// This type does not implement custom JSON marshal/unmarshal logic because it
+// is straightforward to accomplish the same with json struct tags.
+type Authority struct {
+ // ClientListenerResourceNameTemplate is template for the name of the
+ // Listener resource to subscribe to for a gRPC client channel. Used only
+ // when the channel is created using an "xds:" URI with this authority name.
//
- // The bootstrap file contains an ordered list of xDS servers to contact for
- // this authority. The first one is picked.
- ServerURI string
- // Creds contains the credentials to be used while communicationg with this
- // xDS server. It is also used to dedup servers with the same server URI.
- Creds ChannelCreds
- // ServerFeatures contains a list of features supported by this xDS server.
- // It is also used to dedup servers with the same server URI and creds.
- ServerFeatures []string
+ // The token "%s", if present in this string, will be replaced
+ // with %-encoded service authority (i.e., the path part of the target
+ // URI used to create the gRPC channel).
+ //
+ // Must start with "xdstp:///". If it does not,
+ // that is considered a bootstrap file parsing error.
+ //
+ // If not present in the bootstrap file, defaults to
+ // "xdstp:///envoy.config.listener.v3.Listener/%s".
+ ClientListenerResourceNameTemplate string `json:"client_listener_resource_name_template,omitempty"`
+ // XDSServers contains the list of server configurations for this authority.
+ XDSServers ServerConfigs `json:"xds_servers,omitempty"`
+}
+
+// Equal returns true if a equals other.
+func (a *Authority) Equal(other *Authority) bool {
+ switch {
+ case a == nil && other == nil:
+ return true
+ case (a != nil) != (other != nil):
+ return false
+ case a.ClientListenerResourceNameTemplate != other.ClientListenerResourceNameTemplate:
+ return false
+ case !a.XDSServers.Equal(&other.XDSServers):
+ return false
+ }
+ return true
+}
+
+// ServerConfig contains the configuration to connect to a server.
+type ServerConfig struct {
+ serverURI string
+ channelCreds []ChannelCreds
+ serverFeatures []string
// As part of unmarshalling the JSON config into this struct, we ensure that
// the credentials config is valid by building an instance of the specified
- // credentials and store it here as a grpc.DialOption for easy access when
- // dialing this xDS server.
+ // credentials and store it here for easy access.
+ selectedCreds ChannelCreds
credsDialOption grpc.DialOption
- // IgnoreResourceDeletion controls the behavior of the xDS client when the
- // server deletes a previously sent Listener or Cluster resource. If set, the
- // xDS client will not invoke the watchers' OnResourceDoesNotExist() method
- // when a resource is deleted, nor will it remove the existing resource value
- // from its cache.
- IgnoreResourceDeletion bool
+ cleanups []func()
+}
- // Cleanups are called when the xDS client for this server is closed. Allows
- // cleaning up resources created specifically for this ServerConfig.
- Cleanups []func()
+// ServerURI returns the URI of the management server to connect to.
+func (sc *ServerConfig) ServerURI() string {
+ return sc.serverURI
}
-// CredsDialOption returns the configured credentials as a grpc dial option.
+// ChannelCreds returns the credentials configuration to use when communicating
+// with this server. Also used to dedup servers with the same server URI.
+func (sc *ServerConfig) ChannelCreds() []ChannelCreds {
+ return sc.channelCreds
+}
+
+// ServerFeatures returns the list of features supported by this server. Also
+// used to dedup servers with the same server URI and channel creds.
+func (sc *ServerConfig) ServerFeatures() []string {
+ return sc.serverFeatures
+}
+
+// ServerFeaturesIgnoreResourceDeletion returns true if this server supports a
+// feature where the xDS client can ignore resource deletions from this server,
+// as described in gRFC A53.
+//
+// This feature controls the behavior of the xDS client when the server deletes
+// a previously sent Listener or Cluster resource. If set, the xDS client will
+// not invoke the watchers' OnResourceDoesNotExist() method when a resource is
+// deleted, nor will it remove the existing resource value from its cache.
+func (sc *ServerConfig) ServerFeaturesIgnoreResourceDeletion() bool {
+ for _, sf := range sc.serverFeatures {
+ if sf == serverFeaturesIgnoreResourceDeletion {
+ return true
+ }
+ }
+ return false
+}
+
+// CredsDialOption returns the first supported transport credentials from the
+// configuration, as a dial option.
func (sc *ServerConfig) CredsDialOption() grpc.DialOption {
return sc.credsDialOption
}
+// Cleanups returns a collection of functions to be called when the xDS client
+// for this server is closed. Allows cleaning up resources created specifically
+// for this server.
+func (sc *ServerConfig) Cleanups() []func() {
+ return sc.cleanups
+}
+
+// Equal reports whether sc and other are considered equal.
+func (sc *ServerConfig) Equal(other *ServerConfig) bool {
+ switch {
+ case sc == nil && other == nil:
+ return true
+ case (sc != nil) != (other != nil):
+ return false
+ case sc.serverURI != other.serverURI:
+ return false
+ case !slices.EqualFunc(sc.channelCreds, other.channelCreds, func(a, b ChannelCreds) bool { return a.Equal(b) }):
+ return false
+ case !slices.Equal(sc.serverFeatures, other.serverFeatures):
+ return false
+ case !sc.selectedCreds.Equal(other.selectedCreds):
+ return false
+ }
+ return true
+}
+
// String returns the string representation of the ServerConfig.
-//
-// This string representation will be used as map keys in federation
-// (`map[ServerConfig]authority`), so that the xDS ClientConn and stream will be
-// shared by authorities with different names but the same server config.
-//
-// It covers (almost) all the fields so the string can represent the config
-// content. It doesn't cover NodeProto because NodeProto isn't used by
-// federation.
func (sc *ServerConfig) String() string {
- features := strings.Join(sc.ServerFeatures, "-")
- return strings.Join([]string{sc.ServerURI, sc.Creds.String(), features}, "-")
+ if len(sc.serverFeatures) == 0 {
+ return fmt.Sprintf("%s-%s", sc.serverURI, sc.selectedCreds.String())
+ }
+ features := strings.Join(sc.serverFeatures, "-")
+ return strings.Join([]string{sc.serverURI, sc.selectedCreds.String(), features}, "-")
}
-// MarshalJSON marshals the ServerConfig to json.
-func (sc ServerConfig) MarshalJSON() ([]byte, error) {
- server := xdsServer{
- ServerURI: sc.ServerURI,
- ChannelCreds: []channelCreds{{Type: sc.Creds.Type, Config: sc.Creds.Config}},
- ServerFeatures: sc.ServerFeatures,
- }
- server.ServerFeatures = []string{serverFeaturesV3}
- if sc.IgnoreResourceDeletion {
- server.ServerFeatures = append(server.ServerFeatures, serverFeaturesIgnoreResourceDeletion)
+// The following fields correspond 1:1 with the JSON schema for ServerConfig.
+type serverConfigJSON struct {
+ ServerURI string `json:"server_uri,omitempty"`
+ ChannelCreds []ChannelCreds `json:"channel_creds,omitempty"`
+ ServerFeatures []string `json:"server_features,omitempty"`
+}
+
+// MarshalJSON returns marshaled JSON bytes corresponding to this server config.
+func (sc *ServerConfig) MarshalJSON() ([]byte, error) {
+ server := &serverConfigJSON{
+ ServerURI: sc.serverURI,
+ ChannelCreds: sc.channelCreds,
+ ServerFeatures: sc.serverFeatures,
}
return json.Marshal(server)
}
// UnmarshalJSON takes the json data (a server) and unmarshals it to the struct.
func (sc *ServerConfig) UnmarshalJSON(data []byte) error {
- var server xdsServer
+ server := serverConfigJSON{}
if err := json.Unmarshal(data, &server); err != nil {
- return fmt.Errorf("xds: json.Unmarshal(data) for field ServerConfig failed during bootstrap: %v", err)
+ return fmt.Errorf("xds: failed to JSON unmarshal server configuration during bootstrap: %v, config:\n%s", err, string(data))
}
- sc.ServerURI = server.ServerURI
- sc.ServerFeatures = server.ServerFeatures
- for _, f := range server.ServerFeatures {
- if f == serverFeaturesIgnoreResourceDeletion {
- sc.IgnoreResourceDeletion = true
- }
- }
+ sc.serverURI = server.ServerURI
+ sc.channelCreds = server.ChannelCreds
+ sc.serverFeatures = server.ServerFeatures
+
for _, cc := range server.ChannelCreds {
// We stop at the first credential type that we support.
c := bootstrap.GetCredentials(cc.Type)
@@ -183,351 +296,533 @@ func (sc *ServerConfig) UnmarshalJSON(data []byte) error {
if err != nil {
return fmt.Errorf("failed to build credentials bundle from bootstrap for %q: %v", cc.Type, err)
}
- sc.Creds = ChannelCreds(cc)
+ sc.selectedCreds = cc
sc.credsDialOption = grpc.WithCredentialsBundle(bundle)
- sc.Cleanups = append(sc.Cleanups, cancel)
+ sc.cleanups = append(sc.cleanups, cancel)
break
}
+ if sc.serverURI == "" {
+ return fmt.Errorf("xds: `server_uri` field in server config cannot be empty: %s", string(data))
+ }
+ if sc.credsDialOption == nil {
+ return fmt.Errorf("xds: `channel_creds` field in server config cannot be empty: %s", string(data))
+ }
return nil
}
-// ServerConfigFromJSON creates a new ServerConfig from the given JSON
-// configuration. This is the preferred way of creating a ServerConfig when
-// hand-crafting the JSON configuration.
-func ServerConfigFromJSON(data []byte) (*ServerConfig, error) {
+// ServerConfigTestingOptions specifies options for creating a new ServerConfig
+// for testing purposes.
+//
+// # Testing-Only
+type ServerConfigTestingOptions struct {
+ // URI is the name of the server corresponding to this server config.
+ URI string
+ // ChannelCreds contains a list of channel credentials to use when talking
+ // to this server. If unspecified, `insecure` credentials will be used.
+ ChannelCreds []ChannelCreds
+ // ServerFeatures represents the list of features supported by this server.
+ ServerFeatures []string
+}
+
+// ServerConfigForTesting creates a new ServerConfig from the passed in options,
+// for testing purposes.
+//
+// # Testing-Only
+func ServerConfigForTesting(opts ServerConfigTestingOptions) (*ServerConfig, error) {
+ cc := opts.ChannelCreds
+ if cc == nil {
+ cc = []ChannelCreds{{Type: "insecure"}}
+ }
+ scInternal := &serverConfigJSON{
+ ServerURI: opts.URI,
+ ChannelCreds: cc,
+ ServerFeatures: opts.ServerFeatures,
+ }
+ scJSON, err := json.Marshal(scInternal)
+ if err != nil {
+ return nil, err
+ }
+
sc := new(ServerConfig)
- if err := sc.UnmarshalJSON(data); err != nil {
+ if err := sc.UnmarshalJSON(scJSON); err != nil {
return nil, err
}
return sc, nil
}
-// Equal reports whether sc and other are considered equal.
-func (sc *ServerConfig) Equal(other *ServerConfig) bool {
+// Config is the internal representation of the bootstrap configuration provided
+// to the xDS client.
+type Config struct {
+ xDSServers ServerConfigs
+ cpcs map[string]certproviderNameAndConfig
+ serverListenerResourceNameTemplate string
+ clientDefaultListenerResourceNameTemplate string
+ authorities map[string]*Authority
+ node node
+
+ // A map from certprovider instance names to parsed buildable configs.
+ certProviderConfigs map[string]*certprovider.BuildableConfig
+}
+
+// XDSServers returns the top-level list of management servers to connect to,
+// ordered by priority.
+func (c *Config) XDSServers() ServerConfigs {
+ return c.xDSServers
+}
+
+// CertProviderConfigs returns a map from certificate provider plugin instance
+// name to their configuration. Callers must not modify the returned map.
+func (c *Config) CertProviderConfigs() map[string]*certprovider.BuildableConfig {
+ return c.certProviderConfigs
+}
+
+// ServerListenerResourceNameTemplate returns template for the name of the
+// Listener resource to subscribe to for a gRPC server.
+//
+// If starts with "xdstp:", will be interpreted as a new-style name,
+// in which case the authority of the URI will be used to select the
+// relevant configuration in the "authorities" map.
+//
+// The token "%s", if present in this string, will be replaced with the IP
+// and port on which the server is listening. (e.g., "0.0.0.0:8080",
+// "[::]:8080"). For example, a value of "example/resource/%s" could become
+// "example/resource/0.0.0.0:8080". If the template starts with "xdstp:",
+// the replaced string will be %-encoded.
+//
+// There is no default; if unset, xDS-based server creation fails.
+func (c *Config) ServerListenerResourceNameTemplate() string {
+ return c.serverListenerResourceNameTemplate
+}
+
+// ClientDefaultListenerResourceNameTemplate returns a template for the name of
+// the Listener resource to subscribe to for a gRPC client channel. Used only
+// when the channel is created with an "xds:" URI with no authority.
+//
+// If starts with "xdstp:", will be interpreted as a new-style name,
+// in which case the authority of the URI will be used to select the
+// relevant configuration in the "authorities" map.
+//
+// The token "%s", if present in this string, will be replaced with
+// the service authority (i.e., the path part of the target URI
+// used to create the gRPC channel). If the template starts with
+// "xdstp:", the replaced string will be %-encoded.
+//
+// Defaults to "%s".
+func (c *Config) ClientDefaultListenerResourceNameTemplate() string {
+ return c.clientDefaultListenerResourceNameTemplate
+}
+
+// Authorities returns a map of authority name to corresponding configuration.
+// Callers must not modify the returned map.
+//
+// This is used in the following cases:
+// - A gRPC client channel is created using an "xds:" URI that includes
+// an authority.
+// - A gRPC client channel is created using an "xds:" URI with no
+// authority, but the "client_default_listener_resource_name_template"
+// field above turns it into an "xdstp:" URI.
+// - A gRPC server is created and the
+// "server_listener_resource_name_template" field is an "xdstp:" URI.
+//
+// In any of those cases, it is an error if the specified authority is
+// not present in this map.
+func (c *Config) Authorities() map[string]*Authority {
+ return c.authorities
+}
+
+// Node returns xDS a v3 Node proto corresponding to the node field in the
+// bootstrap configuration, which identifies a specific gRPC instance.
+func (c *Config) Node() *v3corepb.Node {
+ return c.node.toProto()
+}
+
+// Equal returns true if c equals other.
+func (c *Config) Equal(other *Config) bool {
switch {
- case sc == nil && other == nil:
+ case c == nil && other == nil:
return true
- case (sc != nil) != (other != nil):
+ case (c != nil) != (other != nil):
+ return false
+ case !c.xDSServers.Equal(&other.xDSServers):
+ return false
+ case !maps.EqualFunc(c.certProviderConfigs, other.certProviderConfigs, func(a, b *certprovider.BuildableConfig) bool { return a.String() == b.String() }):
+ return false
+ case c.serverListenerResourceNameTemplate != other.serverListenerResourceNameTemplate:
return false
- case sc.ServerURI != other.ServerURI:
+ case c.clientDefaultListenerResourceNameTemplate != other.clientDefaultListenerResourceNameTemplate:
return false
- case !sc.Creds.Equal(other.Creds):
+ case !maps.EqualFunc(c.authorities, other.authorities, func(a, b *Authority) bool { return a.Equal(b) }):
return false
- case !equalStringSlice(sc.ServerFeatures, other.ServerFeatures):
+ case !c.node.Equal(other.node):
return false
}
return true
}
-func equalStringSlice(a, b []string) bool {
- if len(a) != len(b) {
- return false
- }
- for i := range a {
- if a[i] != b[i] {
- return false
- }
- }
- return true
+// String returns a string representation of the Config.
+func (c *Config) String() string {
+ s, _ := c.MarshalJSON()
+ return string(s)
}
-// unmarshalJSONServerConfigSlice unmarshals JSON to a slice.
-func unmarshalJSONServerConfigSlice(data []byte) ([]*ServerConfig, error) {
- var servers []*ServerConfig
- if err := json.Unmarshal(data, &servers); err != nil {
- return nil, fmt.Errorf("failed to unmarshal JSON to []*ServerConfig: %v", err)
- }
- if len(servers) < 1 {
- return nil, fmt.Errorf("no management server found in JSON")
- }
- return servers, nil
+// The following fields correspond 1:1 with the JSON schema for Config.
+type configJSON struct {
+ XDSServers ServerConfigs `json:"xds_servers,omitempty"`
+ CertificateProviders map[string]certproviderNameAndConfig `json:"certificate_providers,omitempty"`
+ ServerListenerResourceNameTemplate string `json:"server_listener_resource_name_template,omitempty"`
+ ClientDefaultListenerResourceNameTemplate string `json:"client_default_listener_resource_name_template,omitempty"`
+ Authorities map[string]*Authority `json:"authorities,omitempty"`
+ Node node `json:"node,omitempty"`
}
-// Authority contains configuration for an Authority for an xDS control plane
-// server. See the Authorities field in the Config struct for how it's used.
-type Authority struct {
- // ClientListenerResourceNameTemplate is template for the name of the
- // Listener resource to subscribe to for a gRPC client channel. Used only
- // when the channel is created using an "xds:" URI with this authority name.
- //
- // The token "%s", if present in this string, will be replaced
- // with %-encoded service authority (i.e., the path part of the target
- // URI used to create the gRPC channel).
- //
- // Must start with "xdstp:///". If it does not,
- // that is considered a bootstrap file parsing error.
- //
- // If not present in the bootstrap file, defaults to
- // "xdstp:///envoy.config.listener.v3.Listener/%s".
- ClientListenerResourceNameTemplate string
- // XDSServer contains the management server and config to connect to for
- // this authority.
- XDSServer *ServerConfig
+// MarshalJSON returns marshaled JSON bytes corresponding to this config.
+func (c *Config) MarshalJSON() ([]byte, error) {
+ config := &configJSON{
+ XDSServers: c.xDSServers,
+ CertificateProviders: c.cpcs,
+ ServerListenerResourceNameTemplate: c.serverListenerResourceNameTemplate,
+ ClientDefaultListenerResourceNameTemplate: c.clientDefaultListenerResourceNameTemplate,
+ Authorities: c.authorities,
+ Node: c.node,
+ }
+ return json.MarshalIndent(config, " ", " ")
}
-// UnmarshalJSON implement json unmarshaller.
-func (a *Authority) UnmarshalJSON(data []byte) error {
- var jsonData map[string]json.RawMessage
- if err := json.Unmarshal(data, &jsonData); err != nil {
- return fmt.Errorf("xds: failed to parse authority: %v", err)
+// UnmarshalJSON takes the json data (the complete bootstrap configuration) and
+// unmarshals it to the struct.
+func (c *Config) UnmarshalJSON(data []byte) error {
+ // Initialize the node field with client controlled values. This ensures
+ // even if the bootstrap configuration did not contain the node field, we
+ // will have a node field with client controlled fields alone.
+ config := configJSON{Node: newNode()}
+ if err := json.Unmarshal(data, &config); err != nil {
+ return fmt.Errorf("xds: json.Unmarshal(%s) failed during bootstrap: %v", string(data), err)
}
- for k, v := range jsonData {
- switch k {
- case "xds_servers":
- servers, err := unmarshalJSONServerConfigSlice(v)
- if err != nil {
- return fmt.Errorf("xds: json.Unmarshal(data) for field %q failed during bootstrap: %v", k, err)
- }
- a.XDSServer = servers[0]
- case "client_listener_resource_name_template":
- if err := json.Unmarshal(v, &a.ClientListenerResourceNameTemplate); err != nil {
- return fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err)
- }
+ c.xDSServers = config.XDSServers
+ c.cpcs = config.CertificateProviders
+ c.serverListenerResourceNameTemplate = config.ServerListenerResourceNameTemplate
+ c.clientDefaultListenerResourceNameTemplate = config.ClientDefaultListenerResourceNameTemplate
+ c.authorities = config.Authorities
+ c.node = config.Node
+
+ // Build the certificate providers configuration to ensure that it is valid.
+ cpcCfgs := make(map[string]*certprovider.BuildableConfig)
+ getBuilder := internal.GetCertificateProviderBuilder.(func(string) certprovider.Builder)
+ for instance, nameAndConfig := range c.cpcs {
+ name := nameAndConfig.PluginName
+ parser := getBuilder(nameAndConfig.PluginName)
+ if parser == nil {
+ // We ignore plugins that we do not know about.
+ continue
}
+ bc, err := parser.ParseConfig(nameAndConfig.Config)
+ if err != nil {
+ return fmt.Errorf("xds: config parsing for certificate provider plugin %q failed during bootstrap: %v", name, err)
+ }
+ cpcCfgs[instance] = bc
}
- return nil
-}
+ c.certProviderConfigs = cpcCfgs
-// Config provides the xDS client with several key bits of information that it
-// requires in its interaction with the management server. The Config is
-// initialized from the bootstrap file.
-//
-// Users must use one of the NewConfigXxx() functions to create a Config
-// instance, and not initialize it manually.
-type Config struct {
- // XDSServer is the management server to connect to.
- //
- // The bootstrap file contains a list of servers (with name+creds), but we
- // pick the first one.
- XDSServer *ServerConfig
- // CertProviderConfigs contains a mapping from certificate provider plugin
- // instance names to parsed buildable configs.
- CertProviderConfigs map[string]*certprovider.BuildableConfig
- // ServerListenerResourceNameTemplate is a template for the name of the
- // Listener resource to subscribe to for a gRPC server.
- //
- // If starts with "xdstp:", will be interpreted as a new-style name,
- // in which case the authority of the URI will be used to select the
- // relevant configuration in the "authorities" map.
- //
- // The token "%s", if present in this string, will be replaced with the IP
- // and port on which the server is listening. (e.g., "0.0.0.0:8080",
- // "[::]:8080"). For example, a value of "example/resource/%s" could become
- // "example/resource/0.0.0.0:8080". If the template starts with "xdstp:",
- // the replaced string will be %-encoded.
- //
- // There is no default; if unset, xDS-based server creation fails.
- ServerListenerResourceNameTemplate string
- // A template for the name of the Listener resource to subscribe to
- // for a gRPC client channel. Used only when the channel is created
- // with an "xds:" URI with no authority.
- //
- // If starts with "xdstp:", will be interpreted as a new-style name,
- // in which case the authority of the URI will be used to select the
- // relevant configuration in the "authorities" map.
- //
- // The token "%s", if present in this string, will be replaced with
- // the service authority (i.e., the path part of the target URI
- // used to create the gRPC channel). If the template starts with
- // "xdstp:", the replaced string will be %-encoded.
- //
- // Defaults to "%s".
- ClientDefaultListenerResourceNameTemplate string
- // Authorities is a map of authority name to corresponding configuration.
- //
- // This is used in the following cases:
- // - A gRPC client channel is created using an "xds:" URI that includes
- // an authority.
- // - A gRPC client channel is created using an "xds:" URI with no
- // authority, but the "client_default_listener_resource_name_template"
- // field above turns it into an "xdstp:" URI.
- // - A gRPC server is created and the
- // "server_listener_resource_name_template" field is an "xdstp:" URI.
- //
- // In any of those cases, it is an error if the specified authority is
- // not present in this map.
- Authorities map[string]*Authority
- // NodeProto contains the Node proto to be used in xDS requests. This will be
- // of type *v3corepb.Node.
- NodeProto *v3corepb.Node
-}
-
-type channelCreds struct {
- Type string `json:"type"`
- Config json.RawMessage `json:"config,omitempty"`
-}
+ // Default value of the default client listener name template is "%s".
+ if c.clientDefaultListenerResourceNameTemplate == "" {
+ c.clientDefaultListenerResourceNameTemplate = "%s"
+ }
+ if len(c.xDSServers) == 0 {
+ return fmt.Errorf("xds: required field `xds_servers` not found in bootstrap configuration: %s", string(data))
+ }
-type xdsServer struct {
- ServerURI string `json:"server_uri"`
- ChannelCreds []channelCreds `json:"channel_creds"`
- ServerFeatures []string `json:"server_features"`
+ // Post-process the authorities' client listener resource template field:
+ // - if set, it must start with "xdstp:///"
+ // - if not set, it defaults to "xdstp:///envoy.config.listener.v3.Listener/%s"
+ for name, authority := range c.authorities {
+ prefix := fmt.Sprintf("xdstp://%s", url.PathEscape(name))
+ if authority.ClientListenerResourceNameTemplate == "" {
+ authority.ClientListenerResourceNameTemplate = prefix + "/envoy.config.listener.v3.Listener/%s"
+ continue
+ }
+ if !strings.HasPrefix(authority.ClientListenerResourceNameTemplate, prefix) {
+ return fmt.Errorf("xds: field clientListenerResourceNameTemplate %q of authority %q doesn't start with prefix %q", authority.ClientListenerResourceNameTemplate, name, prefix)
+ }
+ }
+ return nil
}
-func bootstrapConfigFromEnvVariable() ([]byte, error) {
+// GetConfiguration returns the bootstrap configuration initialized by reading
+// the bootstrap file found at ${GRPC_XDS_BOOTSTRAP} or bootstrap contents
+// specified at ${GRPC_XDS_BOOTSTRAP_CONFIG}. If both env vars are set, the
+// former is preferred.
+//
+// If none of the env vars are set, this function returns the fallback
+// configuration if it is not nil. Else, it returns an error.
+//
+// This function tries to process as much of the bootstrap file as possible (in
+// the presence of the errors) and may return a Config object with certain
+// fields left unspecified, in which case the caller should use some sane
+// defaults.
+func GetConfiguration() (*Config, error) {
fName := envconfig.XDSBootstrapFileName
fContent := envconfig.XDSBootstrapFileContent
- // Bootstrap file name has higher priority than bootstrap content.
if fName != "" {
- // If file name is set
- // - If file not found (or other errors), fail
- // - Otherwise, use the content.
- //
- // Note that even if the content is invalid, we don't failover to the
- // file content env variable.
- logger.Debugf("Using bootstrap file with name %q", fName)
- return bootstrapFileReadFunc(fName)
+ if logger.V(2) {
+ logger.Infof("Using bootstrap file with name %q from GRPC_XDS_BOOTSTRAP environment variable", fName)
+ }
+ cfg, err := bootstrapFileReadFunc(fName)
+ if err != nil {
+ return nil, fmt.Errorf("xds: failed to read bootstrap config from file %q: %v", fName, err)
+ }
+ return newConfigFromContents(cfg)
}
if fContent != "" {
- return []byte(fContent), nil
+ if logger.V(2) {
+ logger.Infof("Using bootstrap contents from GRPC_XDS_BOOTSTRAP_CONFIG environment variable")
+ }
+ return newConfigFromContents([]byte(fContent))
+ }
+
+ if cfg := fallbackBootstrapConfig(); cfg != nil {
+ if logger.V(2) {
+ logger.Infof("Using bootstrap contents from fallback config")
+ }
+ return cfg, nil
}
- return nil, fmt.Errorf("none of the bootstrap environment variables (%q or %q) defined",
- envconfig.XDSBootstrapFileNameEnv, envconfig.XDSBootstrapFileContentEnv)
+ return nil, fmt.Errorf("bootstrap environment variables (%q or %q) not defined, and no fallback config set", envconfig.XDSBootstrapFileNameEnv, envconfig.XDSBootstrapFileContentEnv)
+}
+
+func newConfigFromContents(data []byte) (*Config, error) {
+ // Normalize the input configuration.
+ buf := bytes.Buffer{}
+ err := json.Indent(&buf, data, "", "")
+ if err != nil {
+ return nil, fmt.Errorf("xds: error normalizing JSON bootstrap configuration: %v", err)
+ }
+ data = bytes.TrimSpace(buf.Bytes())
+
+ config := &Config{}
+ if err := config.UnmarshalJSON(data); err != nil {
+ return nil, err
+ }
+ return config, nil
}
-// NewConfig returns a new instance of Config initialized by reading the
-// bootstrap file found at ${GRPC_XDS_BOOTSTRAP} or bootstrap contents specified
-// at ${GRPC_XDS_BOOTSTRAP_CONFIG}. If both env vars are set, the former is
-// preferred.
+// ConfigOptionsForTesting specifies options for creating a new bootstrap
+// configuration for testing purposes.
//
-// We support a credential registration mechanism and only credentials
-// registered through that mechanism will be accepted here. See package
-// `xds/bootstrap` for details.
+// # Testing-Only
+type ConfigOptionsForTesting struct {
+ // Servers is the top-level xDS server configuration. It contains a list of
+ // server configurations.
+ Servers json.RawMessage
+ // CertificateProviders is the certificate providers configuration.
+ CertificateProviders map[string]json.RawMessage
+ // ServerListenerResourceNameTemplate is the listener resource name template
+ // to be used on the gRPC server.
+ ServerListenerResourceNameTemplate string
+ // ClientDefaultListenerResourceNameTemplate is the default listener
+ // resource name template to be used on the gRPC client.
+ ClientDefaultListenerResourceNameTemplate string
+ // Authorities is a list of non-default authorities.
+ Authorities map[string]json.RawMessage
+ // Node identifies the gRPC client/server node in the
+ // proxyless service mesh.
+ Node json.RawMessage
+}
+
+// NewContentsForTesting creates a new bootstrap configuration from the passed in
+// options, for testing purposes.
//
-// This function tries to process as much of the bootstrap file as possible (in
-// the presence of the errors) and may return a Config object with certain
-// fields left unspecified, in which case the caller should use some sane
-// defaults.
-func NewConfig() (*Config, error) {
- // Examples of the bootstrap json can be found in the generator tests
- // https://github.com/GoogleCloudPlatform/traffic-director-grpc-bootstrap/blob/master/main_test.go.
- data, err := bootstrapConfigFromEnvVariable()
+// # Testing-Only
+func NewContentsForTesting(opts ConfigOptionsForTesting) ([]byte, error) {
+ var servers ServerConfigs
+ if err := json.Unmarshal(opts.Servers, &servers); err != nil {
+ return nil, err
+ }
+ certProviders := make(map[string]certproviderNameAndConfig)
+ for k, v := range opts.CertificateProviders {
+ cp := certproviderNameAndConfig{}
+ if err := json.Unmarshal(v, &cp); err != nil {
+ return nil, fmt.Errorf("failed to unmarshal certificate provider configuration for %s: %s", k, string(v))
+ }
+ certProviders[k] = cp
+ }
+ authorities := make(map[string]*Authority)
+ for k, v := range opts.Authorities {
+ a := &Authority{}
+ if err := json.Unmarshal(v, a); err != nil {
+ return nil, fmt.Errorf("failed to unmarshal authority configuration for %s: %s", k, string(v))
+ }
+ authorities[k] = a
+ }
+ node := newNode()
+ if err := json.Unmarshal(opts.Node, &node); err != nil {
+ return nil, fmt.Errorf("failed to unmarshal node configuration %s: %v", string(opts.Node), err)
+ }
+ cfgJSON := configJSON{
+ XDSServers: servers,
+ CertificateProviders: certProviders,
+ ServerListenerResourceNameTemplate: opts.ServerListenerResourceNameTemplate,
+ ClientDefaultListenerResourceNameTemplate: opts.ClientDefaultListenerResourceNameTemplate,
+ Authorities: authorities,
+ Node: node,
+ }
+ contents, err := json.MarshalIndent(cfgJSON, " ", " ")
if err != nil {
- return nil, fmt.Errorf("xds: Failed to read bootstrap config: %v", err)
+ return nil, fmt.Errorf("failed to marshal bootstrap configuration for provided options %+v: %v", opts, err)
}
- return newConfigFromContents(data)
+ return contents, nil
}
-// NewConfigFromContents returns a new Config using the specified
-// bootstrap file contents instead of reading the environment variable.
-func NewConfigFromContents(data []byte) (*Config, error) {
- return newConfigFromContents(data)
+// NewConfigForTesting creates a new bootstrap configuration from the provided
+// contents, for testing purposes.
+//
+// # Testing-Only
+func NewConfigForTesting(contents []byte) (*Config, error) {
+ return newConfigFromContents(contents)
}
-func newConfigFromContents(data []byte) (*Config, error) {
- config := &Config{}
+// certproviderNameAndConfig is the internal representation of
+// the`certificate_providers` field in the bootstrap configuration.
+type certproviderNameAndConfig struct {
+ PluginName string `json:"plugin_name"`
+ Config json.RawMessage `json:"config"`
+}
- var jsonData map[string]json.RawMessage
- if err := json.Unmarshal(data, &jsonData); err != nil {
- return nil, fmt.Errorf("xds: failed to parse bootstrap config: %v", err)
- }
+// locality is the internal representation of the locality field within node.
+type locality struct {
+ Region string `json:"region,omitempty"`
+ Zone string `json:"zone,omitempty"`
+ SubZone string `json:"sub_zone,omitempty"`
+}
- var node *v3corepb.Node
- opts := protojson.UnmarshalOptions{DiscardUnknown: true}
- for k, v := range jsonData {
- switch k {
- case "node":
- node = &v3corepb.Node{}
- if err := opts.Unmarshal(v, node); err != nil {
- return nil, fmt.Errorf("xds: protojson.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err)
- }
- case "xds_servers":
- servers, err := unmarshalJSONServerConfigSlice(v)
- if err != nil {
- return nil, fmt.Errorf("xds: json.Unmarshal(data) for field %q failed during bootstrap: %v", k, err)
- }
- config.XDSServer = servers[0]
- case "certificate_providers":
- var providerInstances map[string]json.RawMessage
- if err := json.Unmarshal(v, &providerInstances); err != nil {
- return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err)
- }
- configs := make(map[string]*certprovider.BuildableConfig)
- getBuilder := internal.GetCertificateProviderBuilder.(func(string) certprovider.Builder)
- for instance, data := range providerInstances {
- var nameAndConfig struct {
- PluginName string `json:"plugin_name"`
- Config json.RawMessage `json:"config"`
- }
- if err := json.Unmarshal(data, &nameAndConfig); err != nil {
- return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), instance, err)
- }
-
- name := nameAndConfig.PluginName
- parser := getBuilder(nameAndConfig.PluginName)
- if parser == nil {
- // We ignore plugins that we do not know about.
- continue
- }
- bc, err := parser.ParseConfig(nameAndConfig.Config)
- if err != nil {
- return nil, fmt.Errorf("xds: config parsing for plugin %q failed: %v", name, err)
- }
- configs[instance] = bc
- }
- config.CertProviderConfigs = configs
- case "server_listener_resource_name_template":
- if err := json.Unmarshal(v, &config.ServerListenerResourceNameTemplate); err != nil {
- return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err)
- }
- case "client_default_listener_resource_name_template":
- if err := json.Unmarshal(v, &config.ClientDefaultListenerResourceNameTemplate); err != nil {
- return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err)
- }
- case "authorities":
- if err := json.Unmarshal(v, &config.Authorities); err != nil {
- return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err)
- }
- default:
- logger.Warningf("Bootstrap content has unknown field: %s", k)
- }
- // Do not fail the xDS bootstrap when an unknown field is seen. This can
- // happen when an older version client reads a newer version bootstrap
- // file with new fields.
- }
+func (l locality) Equal(other locality) bool {
+ return l.Region == other.Region && l.Zone == other.Zone && l.SubZone == other.SubZone
+}
+
+func (l locality) isEmpty() bool {
+ return l.Equal(locality{})
+}
- if config.ClientDefaultListenerResourceNameTemplate == "" {
- // Default value of the default client listener name template is "%s".
- config.ClientDefaultListenerResourceNameTemplate = "%s"
+type userAgentVersion struct {
+ UserAgentVersion string `json:"user_agent_version,omitempty"`
+}
+
+// node is the internal representation of the node field in the bootstrap
+// configuration.
+type node struct {
+ ID string `json:"id,omitempty"`
+ Cluster string `json:"cluster,omitempty"`
+ Locality locality `json:"locality,omitempty"`
+ Metadata *structpb.Struct `json:"metadata,omitempty"`
+
+ // The following fields are controlled by the client implementation and
+ // should not unmarshaled from JSON.
+ userAgentName string
+ userAgentVersionType userAgentVersion
+ clientFeatures []string
+}
+
+// newNode is a convenience function to create a new node instance with fields
+// controlled by the client implementation set to the desired values.
+func newNode() node {
+ return node{
+ userAgentName: gRPCUserAgentName,
+ userAgentVersionType: userAgentVersion{UserAgentVersion: grpc.Version},
+ clientFeatures: []string{clientFeatureNoOverprovisioning, clientFeatureResourceWrapper},
}
- if config.XDSServer == nil {
- return nil, fmt.Errorf("xds: required field %q not found in bootstrap %s", "xds_servers", jsonData["xds_servers"])
+}
+
+func (n node) Equal(other node) bool {
+ switch {
+ case n.ID != other.ID:
+ return false
+ case n.Cluster != other.Cluster:
+ return false
+ case !n.Locality.Equal(other.Locality):
+ return false
+ case n.userAgentName != other.userAgentName:
+ return false
+ case n.userAgentVersionType != other.userAgentVersionType:
+ return false
}
- if config.XDSServer.ServerURI == "" {
- return nil, fmt.Errorf("xds: required field %q not found in bootstrap %s", "xds_servers.server_uri", jsonData["xds_servers"])
+
+ // Consider failures in JSON marshaling as being unable to perform the
+ // comparison, and hence return false.
+ nMetadata, err := n.Metadata.MarshalJSON()
+ if err != nil {
+ return false
}
- if config.XDSServer.CredsDialOption() == nil {
- return nil, fmt.Errorf("xds: required field %q doesn't contain valid value in bootstrap %s", "xds_servers.channel_creds", jsonData["xds_servers"])
+ otherMetadata, err := other.Metadata.MarshalJSON()
+ if err != nil {
+ return false
}
- // Post-process the authorities' client listener resource template field:
- // - if set, it must start with "xdstp:///"
- // - if not set, it defaults to "xdstp:///envoy.config.listener.v3.Listener/%s"
- for name, authority := range config.Authorities {
- prefix := fmt.Sprintf("xdstp://%s", url.PathEscape(name))
- if authority.ClientListenerResourceNameTemplate == "" {
- authority.ClientListenerResourceNameTemplate = prefix + "/envoy.config.listener.v3.Listener/%s"
- continue
- }
- if !strings.HasPrefix(authority.ClientListenerResourceNameTemplate, prefix) {
- return nil, fmt.Errorf("xds: field ClientListenerResourceNameTemplate %q of authority %q doesn't start with prefix %q", authority.ClientListenerResourceNameTemplate, name, prefix)
- }
+ if !bytes.Equal(nMetadata, otherMetadata) {
+ return false
}
- // Performing post-production on the node information. Some additional fields
- // which are not expected to be set in the bootstrap file are populated here.
- if node == nil {
- node = &v3corepb.Node{}
+ return slices.Equal(n.clientFeatures, other.clientFeatures)
+}
+
+func (n node) toProto() *v3corepb.Node {
+ return &v3corepb.Node{
+ Id: n.ID,
+ Cluster: n.Cluster,
+ Locality: func() *v3corepb.Locality {
+ if n.Locality.isEmpty() {
+ return nil
+ }
+ return &v3corepb.Locality{
+ Region: n.Locality.Region,
+ Zone: n.Locality.Zone,
+ SubZone: n.Locality.SubZone,
+ }
+ }(),
+ Metadata: proto.Clone(n.Metadata).(*structpb.Struct),
+ UserAgentName: n.userAgentName,
+ UserAgentVersionType: &v3corepb.Node_UserAgentVersion{UserAgentVersion: n.userAgentVersionType.UserAgentVersion},
+ ClientFeatures: slices.Clone(n.clientFeatures),
}
- node.UserAgentName = gRPCUserAgentName
- node.UserAgentVersionType = &v3corepb.Node_UserAgentVersion{UserAgentVersion: grpc.Version}
- node.ClientFeatures = append(node.ClientFeatures, clientFeatureNoOverprovisioning, clientFeatureResourceWrapper)
- config.NodeProto = node
+}
- if logger.V(2) {
- logger.Infof("Bootstrap config for creating xds-client: %v", pretty.ToJSON(config))
+// SetFallbackBootstrapConfig sets the fallback bootstrap configuration to be
+// used when the bootstrap environment variables are unset.
+//
+// The provided configuration must be valid JSON. Returns a non-nil error if
+// parsing the provided configuration fails.
+func SetFallbackBootstrapConfig(cfgJSON []byte) error {
+ config, err := newConfigFromContents(cfgJSON)
+ if err != nil {
+ return err
}
- return config, nil
+
+ configMu.Lock()
+ defer configMu.Unlock()
+ fallbackBootstrapCfg = config
+ return nil
+}
+
+// UnsetFallbackBootstrapConfigForTesting unsets the fallback bootstrap
+// configuration to be used when the bootstrap environment variables are unset.
+//
+// # Testing-Only
+func UnsetFallbackBootstrapConfigForTesting() {
+ configMu.Lock()
+ defer configMu.Unlock()
+ fallbackBootstrapCfg = nil
}
+
+// fallbackBootstrapConfig returns the fallback bootstrap configuration
+// that will be used by the xDS client when the bootstrap environment
+// variables are unset.
+func fallbackBootstrapConfig() *Config {
+ configMu.Lock()
+ defer configMu.Unlock()
+ return fallbackBootstrapCfg
+}
+
+var (
+ configMu sync.Mutex
+ fallbackBootstrapCfg *Config
+)
diff --git a/vendor/google.golang.org/grpc/internal/xds/bootstrap/template.go b/vendor/google.golang.org/grpc/internal/xds/bootstrap/template.go
index 9b51fcc839721..ec1a30919ec9a 100644
--- a/vendor/google.golang.org/grpc/internal/xds/bootstrap/template.go
+++ b/vendor/google.golang.org/grpc/internal/xds/bootstrap/template.go
@@ -34,7 +34,7 @@ func PopulateResourceTemplate(template, target string) string {
if strings.HasPrefix(template, "xdstp:") {
target = percentEncode(target)
}
- return strings.Replace(template, "%s", target, -1)
+ return strings.ReplaceAll(template, "%s", target)
}
// percentEncode percent encode t, except for "/". See the tests for examples.
diff --git a/vendor/google.golang.org/grpc/internal/xds/rbac/converter.go b/vendor/google.golang.org/grpc/internal/xds/rbac/converter.go
index 713e39cf31cb9..fb599954a6c18 100644
--- a/vendor/google.golang.org/grpc/internal/xds/rbac/converter.go
+++ b/vendor/google.golang.org/grpc/internal/xds/rbac/converter.go
@@ -59,11 +59,11 @@ func buildLogger(loggerConfig *v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConf
}
func getCustomConfig(config *anypb.Any) (json.RawMessage, string, error) {
- any, err := config.UnmarshalNew()
+ c, err := config.UnmarshalNew()
if err != nil {
return nil, "", err
}
- switch m := any.(type) {
+ switch m := c.(type) {
case *v1xdsudpatypepb.TypedStruct:
return convertCustomConfig(m.TypeUrl, m.Value)
case *v3xdsxdstypepb.TypedStruct:
diff --git a/vendor/google.golang.org/grpc/internal/xds/rbac/matchers.go b/vendor/google.golang.org/grpc/internal/xds/rbac/matchers.go
index c9f71d32cbb28..e1c15018bde08 100644
--- a/vendor/google.golang.org/grpc/internal/xds/rbac/matchers.go
+++ b/vendor/google.golang.org/grpc/internal/xds/rbac/matchers.go
@@ -244,7 +244,7 @@ func (am *andMatcher) match(data *rpcData) bool {
type alwaysMatcher struct {
}
-func (am *alwaysMatcher) match(data *rpcData) bool {
+func (am *alwaysMatcher) match(*rpcData) bool {
return true
}
diff --git a/vendor/google.golang.org/grpc/internal/xds/rbac/rbac_engine.go b/vendor/google.golang.org/grpc/internal/xds/rbac/rbac_engine.go
index 33011726a6f60..344052cb04fd1 100644
--- a/vendor/google.golang.org/grpc/internal/xds/rbac/rbac_engine.go
+++ b/vendor/google.golang.org/grpc/internal/xds/rbac/rbac_engine.go
@@ -237,12 +237,9 @@ func newRPCData(ctx context.Context) (*rpcData, error) {
var authType string
var peerCertificates []*x509.Certificate
- if pi.AuthInfo != nil {
- tlsInfo, ok := pi.AuthInfo.(credentials.TLSInfo)
- if ok {
- authType = pi.AuthInfo.AuthType()
- peerCertificates = tlsInfo.State.PeerCertificates
- }
+ if tlsInfo, ok := pi.AuthInfo.(credentials.TLSInfo); ok {
+ authType = pi.AuthInfo.AuthType()
+ peerCertificates = tlsInfo.State.PeerCertificates
}
return &rpcData{
@@ -281,11 +278,12 @@ func (e *engine) doAuditLogging(rpcData *rpcData, rule string, authorized bool)
// In the RBAC world, we need to have a SPIFFE ID as the principal for this
// to be meaningful
principal := ""
- if rpcData.peerInfo != nil && rpcData.peerInfo.AuthInfo != nil && rpcData.peerInfo.AuthInfo.AuthType() == "tls" {
+ if rpcData.peerInfo != nil {
// If AuthType = tls, then we can cast AuthInfo to TLSInfo.
- tlsInfo := rpcData.peerInfo.AuthInfo.(credentials.TLSInfo)
- if tlsInfo.SPIFFEID != nil {
- principal = tlsInfo.SPIFFEID.String()
+ if tlsInfo, ok := rpcData.peerInfo.AuthInfo.(credentials.TLSInfo); ok {
+ if tlsInfo.SPIFFEID != nil {
+ principal = tlsInfo.SPIFFEID.String()
+ }
}
}
diff --git a/vendor/google.golang.org/grpc/keepalive/keepalive.go b/vendor/google.golang.org/grpc/keepalive/keepalive.go
index 34d31b5e7d311..eb42b19fb99a1 100644
--- a/vendor/google.golang.org/grpc/keepalive/keepalive.go
+++ b/vendor/google.golang.org/grpc/keepalive/keepalive.go
@@ -34,15 +34,29 @@ type ClientParameters struct {
// After a duration of this time if the client doesn't see any activity it
// pings the server to see if the transport is still alive.
// If set below 10s, a minimum value of 10s will be used instead.
- Time time.Duration // The current default value is infinity.
+ //
+ // Note that gRPC servers have a default EnforcementPolicy.MinTime of 5
+ // minutes (which means the client shouldn't ping more frequently than every
+ // 5 minutes).
+ //
+ // Though not ideal, it's not a strong requirement for Time to be less than
+ // EnforcementPolicy.MinTime. Time will automatically double if the server
+ // disconnects due to its enforcement policy.
+ //
+ // For more details, see
+ // https://github.com/grpc/proposal/blob/master/A8-client-side-keepalive.md
+ Time time.Duration
// After having pinged for keepalive check, the client waits for a duration
// of Timeout and if no activity is seen even after that the connection is
// closed.
- Timeout time.Duration // The current default value is 20 seconds.
+ //
+ // If keepalive is enabled, and this value is not explicitly set, the default
+ // is 20 seconds.
+ Timeout time.Duration
// If true, client sends keepalive pings even with no active RPCs. If false,
// when there are no active RPCs, Time and Timeout will be ignored and no
// keepalive pings will be sent.
- PermitWithoutStream bool // false by default.
+ PermitWithoutStream bool
}
// ServerParameters is used to set keepalive and max-age parameters on the
diff --git a/vendor/google.golang.org/grpc/mem/buffer_pool.go b/vendor/google.golang.org/grpc/mem/buffer_pool.go
new file mode 100644
index 0000000000000..c37c58c0233ec
--- /dev/null
+++ b/vendor/google.golang.org/grpc/mem/buffer_pool.go
@@ -0,0 +1,194 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package mem
+
+import (
+ "sort"
+ "sync"
+
+ "google.golang.org/grpc/internal"
+)
+
+// BufferPool is a pool of buffers that can be shared and reused, resulting in
+// decreased memory allocation.
+type BufferPool interface {
+ // Get returns a buffer with specified length from the pool.
+ Get(length int) *[]byte
+
+ // Put returns a buffer to the pool.
+ Put(*[]byte)
+}
+
+var defaultBufferPoolSizes = []int{
+ 256,
+ 4 << 10, // 4KB (go page size)
+ 16 << 10, // 16KB (max HTTP/2 frame size used by gRPC)
+ 32 << 10, // 32KB (default buffer size for io.Copy)
+ 1 << 20, // 1MB
+}
+
+var defaultBufferPool BufferPool
+
+func init() {
+ defaultBufferPool = NewTieredBufferPool(defaultBufferPoolSizes...)
+
+ internal.SetDefaultBufferPoolForTesting = func(pool BufferPool) {
+ defaultBufferPool = pool
+ }
+
+ internal.SetBufferPoolingThresholdForTesting = func(threshold int) {
+ bufferPoolingThreshold = threshold
+ }
+}
+
+// DefaultBufferPool returns the current default buffer pool. It is a BufferPool
+// created with NewBufferPool that uses a set of default sizes optimized for
+// expected workflows.
+func DefaultBufferPool() BufferPool {
+ return defaultBufferPool
+}
+
+// NewTieredBufferPool returns a BufferPool implementation that uses multiple
+// underlying pools of the given pool sizes.
+func NewTieredBufferPool(poolSizes ...int) BufferPool {
+ sort.Ints(poolSizes)
+ pools := make([]*sizedBufferPool, len(poolSizes))
+ for i, s := range poolSizes {
+ pools[i] = newSizedBufferPool(s)
+ }
+ return &tieredBufferPool{
+ sizedPools: pools,
+ }
+}
+
+// tieredBufferPool implements the BufferPool interface with multiple tiers of
+// buffer pools for different sizes of buffers.
+type tieredBufferPool struct {
+ sizedPools []*sizedBufferPool
+ fallbackPool simpleBufferPool
+}
+
+func (p *tieredBufferPool) Get(size int) *[]byte {
+ return p.getPool(size).Get(size)
+}
+
+func (p *tieredBufferPool) Put(buf *[]byte) {
+ p.getPool(cap(*buf)).Put(buf)
+}
+
+func (p *tieredBufferPool) getPool(size int) BufferPool {
+ poolIdx := sort.Search(len(p.sizedPools), func(i int) bool {
+ return p.sizedPools[i].defaultSize >= size
+ })
+
+ if poolIdx == len(p.sizedPools) {
+ return &p.fallbackPool
+ }
+
+ return p.sizedPools[poolIdx]
+}
+
+// sizedBufferPool is a BufferPool implementation that is optimized for specific
+// buffer sizes. For example, HTTP/2 frames within gRPC have a default max size
+// of 16kb and a sizedBufferPool can be configured to only return buffers with a
+// capacity of 16kb. Note that however it does not support returning larger
+// buffers and in fact panics if such a buffer is requested. Because of this,
+// this BufferPool implementation is not meant to be used on its own and rather
+// is intended to be embedded in a tieredBufferPool such that Get is only
+// invoked when the required size is smaller than or equal to defaultSize.
+type sizedBufferPool struct {
+ pool sync.Pool
+ defaultSize int
+}
+
+func (p *sizedBufferPool) Get(size int) *[]byte {
+ buf := p.pool.Get().(*[]byte)
+ b := *buf
+ clear(b[:cap(b)])
+ *buf = b[:size]
+ return buf
+}
+
+func (p *sizedBufferPool) Put(buf *[]byte) {
+ if cap(*buf) < p.defaultSize {
+ // Ignore buffers that are too small to fit in the pool. Otherwise, when
+ // Get is called it will panic as it tries to index outside the bounds
+ // of the buffer.
+ return
+ }
+ p.pool.Put(buf)
+}
+
+func newSizedBufferPool(size int) *sizedBufferPool {
+ return &sizedBufferPool{
+ pool: sync.Pool{
+ New: func() any {
+ buf := make([]byte, size)
+ return &buf
+ },
+ },
+ defaultSize: size,
+ }
+}
+
+var _ BufferPool = (*simpleBufferPool)(nil)
+
+// simpleBufferPool is an implementation of the BufferPool interface that
+// attempts to pool buffers with a sync.Pool. When Get is invoked, it tries to
+// acquire a buffer from the pool but if that buffer is too small, it returns it
+// to the pool and creates a new one.
+type simpleBufferPool struct {
+ pool sync.Pool
+}
+
+func (p *simpleBufferPool) Get(size int) *[]byte {
+ bs, ok := p.pool.Get().(*[]byte)
+ if ok && cap(*bs) >= size {
+ *bs = (*bs)[:size]
+ return bs
+ }
+
+ // A buffer was pulled from the pool, but it is too small. Put it back in
+ // the pool and create one large enough.
+ if ok {
+ p.pool.Put(bs)
+ }
+
+ b := make([]byte, size)
+ return &b
+}
+
+func (p *simpleBufferPool) Put(buf *[]byte) {
+ p.pool.Put(buf)
+}
+
+var _ BufferPool = NopBufferPool{}
+
+// NopBufferPool is a buffer pool that returns new buffers without pooling.
+type NopBufferPool struct{}
+
+// Get returns a buffer with specified length from the pool.
+func (NopBufferPool) Get(length int) *[]byte {
+ b := make([]byte, length)
+ return &b
+}
+
+// Put returns a buffer to the pool.
+func (NopBufferPool) Put(*[]byte) {
+}
diff --git a/vendor/google.golang.org/grpc/mem/buffer_slice.go b/vendor/google.golang.org/grpc/mem/buffer_slice.go
new file mode 100644
index 0000000000000..228e9c2f20f26
--- /dev/null
+++ b/vendor/google.golang.org/grpc/mem/buffer_slice.go
@@ -0,0 +1,226 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package mem
+
+import (
+ "io"
+)
+
+// BufferSlice offers a means to represent data that spans one or more Buffer
+// instances. A BufferSlice is meant to be immutable after creation, and methods
+// like Ref create and return copies of the slice. This is why all methods have
+// value receivers rather than pointer receivers.
+//
+// Note that any of the methods that read the underlying buffers such as Ref,
+// Len or CopyTo etc., will panic if any underlying buffers have already been
+// freed. It is recommended to not directly interact with any of the underlying
+// buffers directly, rather such interactions should be mediated through the
+// various methods on this type.
+//
+// By convention, any APIs that return (mem.BufferSlice, error) should reduce
+// the burden on the caller by never returning a mem.BufferSlice that needs to
+// be freed if the error is non-nil, unless explicitly stated.
+type BufferSlice []Buffer
+
+// Len returns the sum of the length of all the Buffers in this slice.
+//
+// # Warning
+//
+// Invoking the built-in len on a BufferSlice will return the number of buffers
+// in the slice, and *not* the value returned by this function.
+func (s BufferSlice) Len() int {
+ var length int
+ for _, b := range s {
+ length += b.Len()
+ }
+ return length
+}
+
+// Ref invokes Ref on each buffer in the slice.
+func (s BufferSlice) Ref() {
+ for _, b := range s {
+ b.Ref()
+ }
+}
+
+// Free invokes Buffer.Free() on each Buffer in the slice.
+func (s BufferSlice) Free() {
+ for _, b := range s {
+ b.Free()
+ }
+}
+
+// CopyTo copies each of the underlying Buffer's data into the given buffer,
+// returning the number of bytes copied. Has the same semantics as the copy
+// builtin in that it will copy as many bytes as it can, stopping when either dst
+// is full or s runs out of data, returning the minimum of s.Len() and len(dst).
+func (s BufferSlice) CopyTo(dst []byte) int {
+ off := 0
+ for _, b := range s {
+ off += copy(dst[off:], b.ReadOnlyData())
+ }
+ return off
+}
+
+// Materialize concatenates all the underlying Buffer's data into a single
+// contiguous buffer using CopyTo.
+func (s BufferSlice) Materialize() []byte {
+ l := s.Len()
+ if l == 0 {
+ return nil
+ }
+ out := make([]byte, l)
+ s.CopyTo(out)
+ return out
+}
+
+// MaterializeToBuffer functions like Materialize except that it writes the data
+// to a single Buffer pulled from the given BufferPool.
+//
+// As a special case, if the input BufferSlice only actually has one Buffer, this
+// function simply increases the refcount before returning said Buffer. Freeing this
+// buffer won't release it until the BufferSlice is itself released.
+func (s BufferSlice) MaterializeToBuffer(pool BufferPool) Buffer {
+ if len(s) == 1 {
+ s[0].Ref()
+ return s[0]
+ }
+ sLen := s.Len()
+ if sLen == 0 {
+ return emptyBuffer{}
+ }
+ buf := pool.Get(sLen)
+ s.CopyTo(*buf)
+ return NewBuffer(buf, pool)
+}
+
+// Reader returns a new Reader for the input slice after taking references to
+// each underlying buffer.
+func (s BufferSlice) Reader() Reader {
+ s.Ref()
+ return &sliceReader{
+ data: s,
+ len: s.Len(),
+ }
+}
+
+// Reader exposes a BufferSlice's data as an io.Reader, allowing it to interface
+// with other parts systems. It also provides an additional convenience method
+// Remaining(), which returns the number of unread bytes remaining in the slice.
+// Buffers will be freed as they are read.
+type Reader interface {
+ io.Reader
+ io.ByteReader
+ // Close frees the underlying BufferSlice and never returns an error. Subsequent
+ // calls to Read will return (0, io.EOF).
+ Close() error
+ // Remaining returns the number of unread bytes remaining in the slice.
+ Remaining() int
+}
+
+type sliceReader struct {
+ data BufferSlice
+ len int
+ // The index into data[0].ReadOnlyData().
+ bufferIdx int
+}
+
+func (r *sliceReader) Remaining() int {
+ return r.len
+}
+
+func (r *sliceReader) Close() error {
+ r.data.Free()
+ r.data = nil
+ r.len = 0
+ return nil
+}
+
+func (r *sliceReader) freeFirstBufferIfEmpty() bool {
+ if len(r.data) == 0 || r.bufferIdx != len(r.data[0].ReadOnlyData()) {
+ return false
+ }
+
+ r.data[0].Free()
+ r.data = r.data[1:]
+ r.bufferIdx = 0
+ return true
+}
+
+func (r *sliceReader) Read(buf []byte) (n int, _ error) {
+ if r.len == 0 {
+ return 0, io.EOF
+ }
+
+ for len(buf) != 0 && r.len != 0 {
+ // Copy as much as possible from the first Buffer in the slice into the
+ // given byte slice.
+ data := r.data[0].ReadOnlyData()
+ copied := copy(buf, data[r.bufferIdx:])
+ r.len -= copied // Reduce len by the number of bytes copied.
+ r.bufferIdx += copied // Increment the buffer index.
+ n += copied // Increment the total number of bytes read.
+ buf = buf[copied:] // Shrink the given byte slice.
+
+ // If we have copied all the data from the first Buffer, free it and advance to
+ // the next in the slice.
+ r.freeFirstBufferIfEmpty()
+ }
+
+ return n, nil
+}
+
+func (r *sliceReader) ReadByte() (byte, error) {
+ if r.len == 0 {
+ return 0, io.EOF
+ }
+
+ // There may be any number of empty buffers in the slice, clear them all until a
+ // non-empty buffer is reached. This is guaranteed to exit since r.len is not 0.
+ for r.freeFirstBufferIfEmpty() {
+ }
+
+ b := r.data[0].ReadOnlyData()[r.bufferIdx]
+ r.len--
+ r.bufferIdx++
+ // Free the first buffer in the slice if the last byte was read
+ r.freeFirstBufferIfEmpty()
+ return b, nil
+}
+
+var _ io.Writer = (*writer)(nil)
+
+type writer struct {
+ buffers *BufferSlice
+ pool BufferPool
+}
+
+func (w *writer) Write(p []byte) (n int, err error) {
+ b := Copy(p, w.pool)
+ *w.buffers = append(*w.buffers, b)
+ return b.Len(), nil
+}
+
+// NewWriter wraps the given BufferSlice and BufferPool to implement the
+// io.Writer interface. Every call to Write copies the contents of the given
+// buffer into a new Buffer pulled from the given pool and the Buffer is added to
+// the given BufferSlice.
+func NewWriter(buffers *BufferSlice, pool BufferPool) io.Writer {
+ return &writer{buffers: buffers, pool: pool}
+}
diff --git a/vendor/google.golang.org/grpc/mem/buffers.go b/vendor/google.golang.org/grpc/mem/buffers.go
new file mode 100644
index 0000000000000..4d66b2ccc2be6
--- /dev/null
+++ b/vendor/google.golang.org/grpc/mem/buffers.go
@@ -0,0 +1,252 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package mem provides utilities that facilitate memory reuse in byte slices
+// that are used as buffers.
+//
+// # Experimental
+//
+// Notice: All APIs in this package are EXPERIMENTAL and may be changed or
+// removed in a later release.
+package mem
+
+import (
+ "fmt"
+ "sync"
+ "sync/atomic"
+)
+
+// A Buffer represents a reference counted piece of data (in bytes) that can be
+// acquired by a call to NewBuffer() or Copy(). A reference to a Buffer may be
+// released by calling Free(), which invokes the free function given at creation
+// only after all references are released.
+//
+// Note that a Buffer is not safe for concurrent access and instead each
+// goroutine should use its own reference to the data, which can be acquired via
+// a call to Ref().
+//
+// Attempts to access the underlying data after releasing the reference to the
+// Buffer will panic.
+type Buffer interface {
+ // ReadOnlyData returns the underlying byte slice. Note that it is undefined
+ // behavior to modify the contents of this slice in any way.
+ ReadOnlyData() []byte
+ // Ref increases the reference counter for this Buffer.
+ Ref()
+ // Free decrements this Buffer's reference counter and frees the underlying
+ // byte slice if the counter reaches 0 as a result of this call.
+ Free()
+ // Len returns the Buffer's size.
+ Len() int
+
+ split(n int) (left, right Buffer)
+ read(buf []byte) (int, Buffer)
+}
+
+var (
+ bufferPoolingThreshold = 1 << 10
+
+ bufferObjectPool = sync.Pool{New: func() any { return new(buffer) }}
+ refObjectPool = sync.Pool{New: func() any { return new(atomic.Int32) }}
+)
+
+func IsBelowBufferPoolingThreshold(size int) bool {
+ return size <= bufferPoolingThreshold
+}
+
+type buffer struct {
+ origData *[]byte
+ data []byte
+ refs *atomic.Int32
+ pool BufferPool
+}
+
+func newBuffer() *buffer {
+ return bufferObjectPool.Get().(*buffer)
+}
+
+// NewBuffer creates a new Buffer from the given data, initializing the reference
+// counter to 1. The data will then be returned to the given pool when all
+// references to the returned Buffer are released. As a special case to avoid
+// additional allocations, if the given buffer pool is nil, the returned buffer
+// will be a "no-op" Buffer where invoking Buffer.Free() does nothing and the
+// underlying data is never freed.
+//
+// Note that the backing array of the given data is not copied.
+func NewBuffer(data *[]byte, pool BufferPool) Buffer {
+ if pool == nil || IsBelowBufferPoolingThreshold(len(*data)) {
+ return (SliceBuffer)(*data)
+ }
+ b := newBuffer()
+ b.origData = data
+ b.data = *data
+ b.pool = pool
+ b.refs = refObjectPool.Get().(*atomic.Int32)
+ b.refs.Add(1)
+ return b
+}
+
+// Copy creates a new Buffer from the given data, initializing the reference
+// counter to 1.
+//
+// It acquires a []byte from the given pool and copies over the backing array
+// of the given data. The []byte acquired from the pool is returned to the
+// pool when all references to the returned Buffer are released.
+func Copy(data []byte, pool BufferPool) Buffer {
+ if IsBelowBufferPoolingThreshold(len(data)) {
+ buf := make(SliceBuffer, len(data))
+ copy(buf, data)
+ return buf
+ }
+
+ buf := pool.Get(len(data))
+ copy(*buf, data)
+ return NewBuffer(buf, pool)
+}
+
+func (b *buffer) ReadOnlyData() []byte {
+ if b.refs == nil {
+ panic("Cannot read freed buffer")
+ }
+ return b.data
+}
+
+func (b *buffer) Ref() {
+ if b.refs == nil {
+ panic("Cannot ref freed buffer")
+ }
+ b.refs.Add(1)
+}
+
+func (b *buffer) Free() {
+ if b.refs == nil {
+ panic("Cannot free freed buffer")
+ }
+
+ refs := b.refs.Add(-1)
+ switch {
+ case refs > 0:
+ return
+ case refs == 0:
+ if b.pool != nil {
+ b.pool.Put(b.origData)
+ }
+
+ refObjectPool.Put(b.refs)
+ b.origData = nil
+ b.data = nil
+ b.refs = nil
+ b.pool = nil
+ bufferObjectPool.Put(b)
+ default:
+ panic("Cannot free freed buffer")
+ }
+}
+
+func (b *buffer) Len() int {
+ return len(b.ReadOnlyData())
+}
+
+func (b *buffer) split(n int) (Buffer, Buffer) {
+ if b.refs == nil {
+ panic("Cannot split freed buffer")
+ }
+
+ b.refs.Add(1)
+ split := newBuffer()
+ split.origData = b.origData
+ split.data = b.data[n:]
+ split.refs = b.refs
+ split.pool = b.pool
+
+ b.data = b.data[:n]
+
+ return b, split
+}
+
+func (b *buffer) read(buf []byte) (int, Buffer) {
+ if b.refs == nil {
+ panic("Cannot read freed buffer")
+ }
+
+ n := copy(buf, b.data)
+ if n == len(b.data) {
+ b.Free()
+ return n, nil
+ }
+
+ b.data = b.data[n:]
+ return n, b
+}
+
+// String returns a string representation of the buffer. May be used for
+// debugging purposes.
+func (b *buffer) String() string {
+ return fmt.Sprintf("mem.Buffer(%p, data: %p, length: %d)", b, b.ReadOnlyData(), len(b.ReadOnlyData()))
+}
+
+func ReadUnsafe(dst []byte, buf Buffer) (int, Buffer) {
+ return buf.read(dst)
+}
+
+// SplitUnsafe modifies the receiver to point to the first n bytes while it
+// returns a new reference to the remaining bytes. The returned Buffer functions
+// just like a normal reference acquired using Ref().
+func SplitUnsafe(buf Buffer, n int) (left, right Buffer) {
+ return buf.split(n)
+}
+
+type emptyBuffer struct{}
+
+func (e emptyBuffer) ReadOnlyData() []byte {
+ return nil
+}
+
+func (e emptyBuffer) Ref() {}
+func (e emptyBuffer) Free() {}
+
+func (e emptyBuffer) Len() int {
+ return 0
+}
+
+func (e emptyBuffer) split(int) (left, right Buffer) {
+ return e, e
+}
+
+func (e emptyBuffer) read([]byte) (int, Buffer) {
+ return 0, e
+}
+
+type SliceBuffer []byte
+
+func (s SliceBuffer) ReadOnlyData() []byte { return s }
+func (s SliceBuffer) Ref() {}
+func (s SliceBuffer) Free() {}
+func (s SliceBuffer) Len() int { return len(s) }
+
+func (s SliceBuffer) split(n int) (left, right Buffer) {
+ return s[:n], s[n:]
+}
+
+func (s SliceBuffer) read(buf []byte) (int, Buffer) {
+ n := copy(buf, s)
+ if n == len(s) {
+ return n, nil
+ }
+ return n, s[n:]
+}
diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go
index 1e9485fd6e268..d2e15253bbfbc 100644
--- a/vendor/google.golang.org/grpc/metadata/metadata.go
+++ b/vendor/google.golang.org/grpc/metadata/metadata.go
@@ -213,11 +213,6 @@ func FromIncomingContext(ctx context.Context) (MD, bool) {
// ValueFromIncomingContext returns the metadata value corresponding to the metadata
// key from the incoming metadata if it exists. Keys are matched in a case insensitive
// manner.
-//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a
-// later release.
func ValueFromIncomingContext(ctx context.Context, key string) []string {
md, ok := ctx.Value(mdIncomingKey{}).(MD)
if !ok {
@@ -228,7 +223,7 @@ func ValueFromIncomingContext(ctx context.Context, key string) []string {
return copyOf(v)
}
for k, v := range md {
- // Case insenitive comparison: MD is a map, and there's no guarantee
+ // Case insensitive comparison: MD is a map, and there's no guarantee
// that the MD attached to the context is created using our helper
// functions.
if strings.EqualFold(k, key) {
diff --git a/vendor/google.golang.org/grpc/orca/call_metrics.go b/vendor/google.golang.org/grpc/orca/call_metrics.go
index 157dad49c6571..9ae7721420316 100644
--- a/vendor/google.golang.org/grpc/orca/call_metrics.go
+++ b/vendor/google.golang.org/grpc/orca/call_metrics.go
@@ -156,7 +156,7 @@ func unaryInt(smp ServerMetricsProvider) func(ctx context.Context, req any, _ *g
}
func streamInt(smp ServerMetricsProvider) func(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
- return func(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
+ return func(srv any, ss grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
// We don't allocate the metric recorder here. It will be allocated the
// first time the user calls CallMetricsRecorderFromContext().
rw := &recorderWrapper{smp: smp}
diff --git a/vendor/google.golang.org/grpc/orca/producer.go b/vendor/google.golang.org/grpc/orca/producer.go
index 04edae6de66f1..6e7c4c9f301a9 100644
--- a/vendor/google.golang.org/grpc/orca/producer.go
+++ b/vendor/google.golang.org/grpc/orca/producer.go
@@ -72,7 +72,7 @@ type OOBListenerOptions struct {
// returned stop function must be called when no longer needed. Do not
// register a single OOBListener more than once per SubConn.
func RegisterOOBListener(sc balancer.SubConn, l OOBListener, opts OOBListenerOptions) (stop func()) {
- pr, close := sc.GetOrBuildProducer(producerBuilderSingleton)
+ pr, closeFn := sc.GetOrBuildProducer(producerBuilderSingleton)
p := pr.(*producer)
p.registerListener(l, opts.ReportInterval)
@@ -84,7 +84,7 @@ func RegisterOOBListener(sc balancer.SubConn, l OOBListener, opts OOBListenerOpt
// subsequent calls.
return grpcsync.OnceFunc(func() {
p.unregisterListener(l, opts.ReportInterval)
- close()
+ closeFn()
})
}
diff --git a/vendor/google.golang.org/grpc/orca/server_metrics.go b/vendor/google.golang.org/grpc/orca/server_metrics.go
index 67d1fa9d7f2b3..bb664d6a08143 100644
--- a/vendor/google.golang.org/grpc/orca/server_metrics.go
+++ b/vendor/google.golang.org/grpc/orca/server_metrics.go
@@ -108,7 +108,7 @@ type ServerMetricsRecorder interface {
// SetMemoryUtilization sets the memory utilization server metric. Must be
// in the range [0, 1].
SetMemoryUtilization(float64)
- // DeleteMemoryUtilization deletes the memory utiliztion server metric to
+ // DeleteMemoryUtilization deletes the memory utilization server metric to
// prevent it from being sent.
DeleteMemoryUtilization()
diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go
index 73bd63364335e..e87a17f36a50b 100644
--- a/vendor/google.golang.org/grpc/preloader.go
+++ b/vendor/google.golang.org/grpc/preloader.go
@@ -20,6 +20,7 @@ package grpc
import (
"google.golang.org/grpc/codes"
+ "google.golang.org/grpc/mem"
"google.golang.org/grpc/status"
)
@@ -31,9 +32,10 @@ import (
// later release.
type PreparedMsg struct {
// Struct for preparing msg before sending them
- encodedData []byte
+ encodedData mem.BufferSlice
hdr []byte
- payload []byte
+ payload mem.BufferSlice
+ pf payloadFormat
}
// Encode marshalls and compresses the message using the codec and compressor for the stream.
@@ -57,11 +59,27 @@ func (p *PreparedMsg) Encode(s Stream, msg any) error {
if err != nil {
return err
}
- p.encodedData = data
- compData, err := compress(data, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp)
+
+ materializedData := data.Materialize()
+ data.Free()
+ p.encodedData = mem.BufferSlice{mem.NewBuffer(&materializedData, nil)}
+
+ // TODO: it should be possible to grab the bufferPool from the underlying
+ // stream implementation with a type cast to its actual type (such as
+ // addrConnStream) and accessing the buffer pool directly.
+ var compData mem.BufferSlice
+ compData, p.pf, err = compress(p.encodedData, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp, mem.DefaultBufferPool())
if err != nil {
return err
}
- p.hdr, p.payload = msgHeader(data, compData)
+
+ if p.pf.isCompressed() {
+ materializedCompData := compData.Materialize()
+ compData.Free()
+ compData = mem.BufferSlice{mem.NewBuffer(&materializedCompData, nil)}
+ }
+
+ p.hdr, p.payload = msgHeader(p.encodedData, compData, p.pf)
+
return nil
}
diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh
deleted file mode 100644
index 3edca296c224c..0000000000000
--- a/vendor/google.golang.org/grpc/regenerate.sh
+++ /dev/null
@@ -1,123 +0,0 @@
-#!/bin/bash
-# Copyright 2020 gRPC authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-set -eu -o pipefail
-
-WORKDIR=$(mktemp -d)
-
-function finish {
- rm -rf "$WORKDIR"
-}
-trap finish EXIT
-
-export GOBIN=${WORKDIR}/bin
-export PATH=${GOBIN}:${PATH}
-mkdir -p ${GOBIN}
-
-echo "remove existing generated files"
-# grpc_testing_not_regenerate/*.pb.go is not re-generated,
-# see grpc_testing_not_regenerate/README.md for details.
-rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testing_not_regenerate')
-
-echo "go install google.golang.org/protobuf/cmd/protoc-gen-go"
-(cd test/tools && go install google.golang.org/protobuf/cmd/protoc-gen-go)
-
-echo "go install cmd/protoc-gen-go-grpc"
-(cd cmd/protoc-gen-go-grpc && go install .)
-
-echo "git clone https://github.com/grpc/grpc-proto"
-git clone --quiet https://github.com/grpc/grpc-proto ${WORKDIR}/grpc-proto
-
-echo "git clone https://github.com/protocolbuffers/protobuf"
-git clone --quiet https://github.com/protocolbuffers/protobuf ${WORKDIR}/protobuf
-
-# Pull in code.proto as a proto dependency
-mkdir -p ${WORKDIR}/googleapis/google/rpc
-echo "curl https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto"
-curl --silent https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto > ${WORKDIR}/googleapis/google/rpc/code.proto
-
-mkdir -p ${WORKDIR}/out
-
-# Generates sources without the embed requirement
-LEGACY_SOURCES=(
- ${WORKDIR}/grpc-proto/grpc/binlog/v1/binarylog.proto
- ${WORKDIR}/grpc-proto/grpc/channelz/v1/channelz.proto
- ${WORKDIR}/grpc-proto/grpc/health/v1/health.proto
- ${WORKDIR}/grpc-proto/grpc/lb/v1/load_balancer.proto
- profiling/proto/service.proto
- ${WORKDIR}/grpc-proto/grpc/reflection/v1alpha/reflection.proto
- ${WORKDIR}/grpc-proto/grpc/reflection/v1/reflection.proto
-)
-
-# Generates only the new gRPC Service symbols
-SOURCES=(
- $(git ls-files --exclude-standard --cached --others "*.proto" | grep -v '^profiling/proto/service.proto$')
- ${WORKDIR}/grpc-proto/grpc/gcp/altscontext.proto
- ${WORKDIR}/grpc-proto/grpc/gcp/handshaker.proto
- ${WORKDIR}/grpc-proto/grpc/gcp/transport_security_common.proto
- ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls.proto
- ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls_config.proto
- ${WORKDIR}/grpc-proto/grpc/testing/*.proto
- ${WORKDIR}/grpc-proto/grpc/core/*.proto
-)
-
-# These options of the form 'Mfoo.proto=bar' instruct the codegen to use an
-# import path of 'bar' in the generated code when 'foo.proto' is imported in
-# one of the sources.
-#
-# Note that the protos listed here are all for testing purposes. All protos to
-# be used externally should have a go_package option (and they don't need to be
-# listed here).
-OPTS=Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core,\
-Mgrpc/testing/benchmark_service.proto=google.golang.org/grpc/interop/grpc_testing,\
-Mgrpc/testing/stats.proto=google.golang.org/grpc/interop/grpc_testing,\
-Mgrpc/testing/report_qps_scenario_service.proto=google.golang.org/grpc/interop/grpc_testing,\
-Mgrpc/testing/messages.proto=google.golang.org/grpc/interop/grpc_testing,\
-Mgrpc/testing/worker_service.proto=google.golang.org/grpc/interop/grpc_testing,\
-Mgrpc/testing/control.proto=google.golang.org/grpc/interop/grpc_testing,\
-Mgrpc/testing/test.proto=google.golang.org/grpc/interop/grpc_testing,\
-Mgrpc/testing/payloads.proto=google.golang.org/grpc/interop/grpc_testing,\
-Mgrpc/testing/empty.proto=google.golang.org/grpc/interop/grpc_testing
-
-for src in ${SOURCES[@]}; do
- echo "protoc ${src}"
- protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},use_generic_streams_experimental=true:${WORKDIR}/out \
- -I"." \
- -I${WORKDIR}/grpc-proto \
- -I${WORKDIR}/googleapis \
- -I${WORKDIR}/protobuf/src \
- ${src}
-done
-
-for src in ${LEGACY_SOURCES[@]}; do
- echo "protoc ${src}"
- protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},require_unimplemented_servers=false:${WORKDIR}/out \
- -I"." \
- -I${WORKDIR}/grpc-proto \
- -I${WORKDIR}/googleapis \
- -I${WORKDIR}/protobuf/src \
- ${src}
-done
-
-# The go_package option in grpc/lookup/v1/rls.proto doesn't match the
-# current location. Move it into the right place.
-mkdir -p ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1
-mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1
-
-# grpc_testing_not_regenerate/*.pb.go are not re-generated,
-# see grpc_testing_not_regenerate/README.md for details.
-rm ${WORKDIR}/out/google.golang.org/grpc/reflection/test/grpc_testing_not_regenerate/*.pb.go
-
-cp -R ${WORKDIR}/out/google.golang.org/grpc/* .
diff --git a/vendor/google.golang.org/grpc/resolver/manual/manual.go b/vendor/google.golang.org/grpc/resolver/manual/manual.go
index f2efa2a2cb5a9..09e864a89d35b 100644
--- a/vendor/google.golang.org/grpc/resolver/manual/manual.go
+++ b/vendor/google.golang.org/grpc/resolver/manual/manual.go
@@ -76,9 +76,11 @@ func (r *Resolver) InitialState(s resolver.State) {
// Build returns itself for Resolver, because it's both a builder and a resolver.
func (r *Resolver) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
- r.BuildCallback(target, cc, opts)
r.mu.Lock()
defer r.mu.Unlock()
+ // Call BuildCallback after locking to avoid a race when UpdateState
+ // or ReportError is called before Build returns.
+ r.BuildCallback(target, cc, opts)
r.CC = cc
if r.lastSeenState != nil {
err := r.CC.UpdateState(*r.lastSeenState)
diff --git a/vendor/google.golang.org/grpc/resolver_wrapper.go b/vendor/google.golang.org/grpc/resolver_wrapper.go
index c5fb45236faf6..23bb3fb258240 100644
--- a/vendor/google.golang.org/grpc/resolver_wrapper.go
+++ b/vendor/google.golang.org/grpc/resolver_wrapper.go
@@ -66,7 +66,7 @@ func newCCResolverWrapper(cc *ClientConn) *ccResolverWrapper {
// any newly created ccResolverWrapper, except that close may be called instead.
func (ccr *ccResolverWrapper) start() error {
errCh := make(chan error)
- ccr.serializer.Schedule(func(ctx context.Context) {
+ ccr.serializer.TrySchedule(func(ctx context.Context) {
if ctx.Err() != nil {
return
}
@@ -85,7 +85,7 @@ func (ccr *ccResolverWrapper) start() error {
}
func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) {
- ccr.serializer.Schedule(func(ctx context.Context) {
+ ccr.serializer.TrySchedule(func(ctx context.Context) {
if ctx.Err() != nil || ccr.resolver == nil {
return
}
@@ -102,7 +102,7 @@ func (ccr *ccResolverWrapper) close() {
ccr.closed = true
ccr.mu.Unlock()
- ccr.serializer.Schedule(func(context.Context) {
+ ccr.serializer.TrySchedule(func(context.Context) {
if ccr.resolver == nil {
return
}
@@ -177,6 +177,9 @@ func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.P
// addChannelzTraceEvent adds a channelz trace event containing the new
// state received from resolver implementations.
func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) {
+ if !logger.V(0) && !channelz.IsOn() {
+ return
+ }
var updates []string
var oldSC, newSC *ServiceConfig
var oldOK, newOK bool
diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go
index fdd49e6e91510..2d96f1405e8da 100644
--- a/vendor/google.golang.org/grpc/rpc_util.go
+++ b/vendor/google.golang.org/grpc/rpc_util.go
@@ -19,7 +19,6 @@
package grpc
import (
- "bytes"
"compress/gzip"
"context"
"encoding/binary"
@@ -35,6 +34,7 @@ import (
"google.golang.org/grpc/encoding"
"google.golang.org/grpc/encoding/proto"
"google.golang.org/grpc/internal/transport"
+ "google.golang.org/grpc/mem"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
@@ -220,8 +220,8 @@ type HeaderCallOption struct {
HeaderAddr *metadata.MD
}
-func (o HeaderCallOption) before(c *callInfo) error { return nil }
-func (o HeaderCallOption) after(c *callInfo, attempt *csAttempt) {
+func (o HeaderCallOption) before(*callInfo) error { return nil }
+func (o HeaderCallOption) after(_ *callInfo, attempt *csAttempt) {
*o.HeaderAddr, _ = attempt.s.Header()
}
@@ -242,8 +242,8 @@ type TrailerCallOption struct {
TrailerAddr *metadata.MD
}
-func (o TrailerCallOption) before(c *callInfo) error { return nil }
-func (o TrailerCallOption) after(c *callInfo, attempt *csAttempt) {
+func (o TrailerCallOption) before(*callInfo) error { return nil }
+func (o TrailerCallOption) after(_ *callInfo, attempt *csAttempt) {
*o.TrailerAddr = attempt.s.Trailer()
}
@@ -264,24 +264,20 @@ type PeerCallOption struct {
PeerAddr *peer.Peer
}
-func (o PeerCallOption) before(c *callInfo) error { return nil }
-func (o PeerCallOption) after(c *callInfo, attempt *csAttempt) {
+func (o PeerCallOption) before(*callInfo) error { return nil }
+func (o PeerCallOption) after(_ *callInfo, attempt *csAttempt) {
if x, ok := peer.FromContext(attempt.s.Context()); ok {
*o.PeerAddr = *x
}
}
-// WaitForReady configures the action to take when an RPC is attempted on broken
-// connections or unreachable servers. If waitForReady is false and the
-// connection is in the TRANSIENT_FAILURE state, the RPC will fail
-// immediately. Otherwise, the RPC client will block the call until a
-// connection is available (or the call is canceled or times out) and will
-// retry the call if it fails due to a transient error. gRPC will not retry if
-// data was written to the wire unless the server indicates it did not process
-// the data. Please refer to
-// https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md.
+// WaitForReady configures the RPC's behavior when the client is in
+// TRANSIENT_FAILURE, which occurs when all addresses fail to connect. If
+// waitForReady is false, the RPC will fail immediately. Otherwise, the client
+// will wait until a connection becomes available or the RPC's deadline is
+// reached.
//
-// By default, RPCs don't "wait for ready".
+// By default, RPCs do not "wait for ready".
func WaitForReady(waitForReady bool) CallOption {
return FailFastCallOption{FailFast: !waitForReady}
}
@@ -308,7 +304,7 @@ func (o FailFastCallOption) before(c *callInfo) error {
c.failFast = o.FailFast
return nil
}
-func (o FailFastCallOption) after(c *callInfo, attempt *csAttempt) {}
+func (o FailFastCallOption) after(*callInfo, *csAttempt) {}
// OnFinish returns a CallOption that configures a callback to be called when
// the call completes. The error passed to the callback is the status of the
@@ -343,7 +339,7 @@ func (o OnFinishCallOption) before(c *callInfo) error {
return nil
}
-func (o OnFinishCallOption) after(c *callInfo, attempt *csAttempt) {}
+func (o OnFinishCallOption) after(*callInfo, *csAttempt) {}
// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size
// in bytes the client can receive. If this is not set, gRPC uses the default
@@ -367,7 +363,7 @@ func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error {
c.maxReceiveMessageSize = &o.MaxRecvMsgSize
return nil
}
-func (o MaxRecvMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {}
+func (o MaxRecvMsgSizeCallOption) after(*callInfo, *csAttempt) {}
// MaxCallSendMsgSize returns a CallOption which sets the maximum message size
// in bytes the client can send. If this is not set, gRPC uses the default
@@ -391,7 +387,7 @@ func (o MaxSendMsgSizeCallOption) before(c *callInfo) error {
c.maxSendMessageSize = &o.MaxSendMsgSize
return nil
}
-func (o MaxSendMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {}
+func (o MaxSendMsgSizeCallOption) after(*callInfo, *csAttempt) {}
// PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials
// for a call.
@@ -414,7 +410,7 @@ func (o PerRPCCredsCallOption) before(c *callInfo) error {
c.creds = o.Creds
return nil
}
-func (o PerRPCCredsCallOption) after(c *callInfo, attempt *csAttempt) {}
+func (o PerRPCCredsCallOption) after(*callInfo, *csAttempt) {}
// UseCompressor returns a CallOption which sets the compressor used when
// sending the request. If WithCompressor is also set, UseCompressor has
@@ -442,7 +438,7 @@ func (o CompressorCallOption) before(c *callInfo) error {
c.compressorType = o.CompressorType
return nil
}
-func (o CompressorCallOption) after(c *callInfo, attempt *csAttempt) {}
+func (o CompressorCallOption) after(*callInfo, *csAttempt) {}
// CallContentSubtype returns a CallOption that will set the content-subtype
// for a call. For example, if content-subtype is "json", the Content-Type over
@@ -479,7 +475,7 @@ func (o ContentSubtypeCallOption) before(c *callInfo) error {
c.contentSubtype = o.ContentSubtype
return nil
}
-func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {}
+func (o ContentSubtypeCallOption) after(*callInfo, *csAttempt) {}
// ForceCodec returns a CallOption that will set codec to be used for all
// request and response messages for a call. The result of calling Name() will
@@ -515,10 +511,50 @@ type ForceCodecCallOption struct {
}
func (o ForceCodecCallOption) before(c *callInfo) error {
- c.codec = o.Codec
+ c.codec = newCodecV1Bridge(o.Codec)
return nil
}
-func (o ForceCodecCallOption) after(c *callInfo, attempt *csAttempt) {}
+func (o ForceCodecCallOption) after(*callInfo, *csAttempt) {}
+
+// ForceCodecV2 returns a CallOption that will set codec to be used for all
+// request and response messages for a call. The result of calling Name() will
+// be used as the content-subtype after converting to lowercase, unless
+// CallContentSubtype is also used.
+//
+// See Content-Type on
+// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
+// more details. Also see the documentation on RegisterCodec and
+// CallContentSubtype for more details on the interaction between Codec and
+// content-subtype.
+//
+// This function is provided for advanced users; prefer to use only
+// CallContentSubtype to select a registered codec instead.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func ForceCodecV2(codec encoding.CodecV2) CallOption {
+ return ForceCodecV2CallOption{CodecV2: codec}
+}
+
+// ForceCodecV2CallOption is a CallOption that indicates the codec used for
+// marshaling messages.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type ForceCodecV2CallOption struct {
+ CodecV2 encoding.CodecV2
+}
+
+func (o ForceCodecV2CallOption) before(c *callInfo) error {
+ c.codec = o.CodecV2
+ return nil
+}
+
+func (o ForceCodecV2CallOption) after(*callInfo, *csAttempt) {}
// CallCustomCodec behaves like ForceCodec, but accepts a grpc.Codec instead of
// an encoding.Codec.
@@ -540,10 +576,10 @@ type CustomCodecCallOption struct {
}
func (o CustomCodecCallOption) before(c *callInfo) error {
- c.codec = o.Codec
+ c.codec = newCodecV0Bridge(o.Codec)
return nil
}
-func (o CustomCodecCallOption) after(c *callInfo, attempt *csAttempt) {}
+func (o CustomCodecCallOption) after(*callInfo, *csAttempt) {}
// MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory
// used for buffering this RPC's requests for retry purposes.
@@ -571,7 +607,7 @@ func (o MaxRetryRPCBufferSizeCallOption) before(c *callInfo) error {
c.maxRetryRPCBufferSize = o.MaxRetryRPCBufferSize
return nil
}
-func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo, attempt *csAttempt) {}
+func (o MaxRetryRPCBufferSizeCallOption) after(*callInfo, *csAttempt) {}
// The format of the payload: compressed or not?
type payloadFormat uint8
@@ -581,19 +617,28 @@ const (
compressionMade payloadFormat = 1 // compressed
)
+func (pf payloadFormat) isCompressed() bool {
+ return pf == compressionMade
+}
+
+type streamReader interface {
+ ReadHeader(header []byte) error
+ Read(n int) (mem.BufferSlice, error)
+}
+
// parser reads complete gRPC messages from the underlying reader.
type parser struct {
// r is the underlying reader.
// See the comment on recvMsg for the permissible
// error types.
- r io.Reader
+ r streamReader
// The header of a gRPC message. Find more detail at
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md
header [5]byte
- // recvBufferPool is the pool of shared receive buffers.
- recvBufferPool SharedBufferPool
+ // bufferPool is the pool of shared receive buffers.
+ bufferPool mem.BufferPool
}
// recvMsg reads a complete gRPC message from the stream.
@@ -608,14 +653,15 @@ type parser struct {
// - an error from the status package
//
// No other error values or types must be returned, which also means
-// that the underlying io.Reader must not return an incompatible
+// that the underlying streamReader must not return an incompatible
// error.
-func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byte, err error) {
- if _, err := p.r.Read(p.header[:]); err != nil {
+func (p *parser) recvMsg(maxReceiveMessageSize int) (payloadFormat, mem.BufferSlice, error) {
+ err := p.r.ReadHeader(p.header[:])
+ if err != nil {
return 0, nil, err
}
- pf = payloadFormat(p.header[0])
+ pf := payloadFormat(p.header[0])
length := binary.BigEndian.Uint32(p.header[1:])
if length == 0 {
@@ -627,20 +673,21 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt
if int(length) > maxReceiveMessageSize {
return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize)
}
- msg = p.recvBufferPool.Get(int(length))
- if _, err := p.r.Read(msg); err != nil {
+
+ data, err := p.r.Read(int(length))
+ if err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return 0, nil, err
}
- return pf, msg, nil
+ return pf, data, nil
}
// encode serializes msg and returns a buffer containing the message, or an
// error if it is too large to be transmitted by grpc. If msg is nil, it
// generates an empty message.
-func encode(c baseCodec, msg any) ([]byte, error) {
+func encode(c baseCodec, msg any) (mem.BufferSlice, error) {
if msg == nil { // NOTE: typed nils will not be caught by this check
return nil, nil
}
@@ -648,7 +695,8 @@ func encode(c baseCodec, msg any) ([]byte, error) {
if err != nil {
return nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error())
}
- if uint(len(b)) > math.MaxUint32 {
+ if uint(b.Len()) > math.MaxUint32 {
+ b.Free()
return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b))
}
return b, nil
@@ -659,34 +707,41 @@ func encode(c baseCodec, msg any) ([]byte, error) {
// indicating no compression was done.
//
// TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor.
-func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) {
- if compressor == nil && cp == nil {
- return nil, nil
- }
- if len(in) == 0 {
- return nil, nil
+func compress(in mem.BufferSlice, cp Compressor, compressor encoding.Compressor, pool mem.BufferPool) (mem.BufferSlice, payloadFormat, error) {
+ if (compressor == nil && cp == nil) || in.Len() == 0 {
+ return nil, compressionNone, nil
}
+ var out mem.BufferSlice
+ w := mem.NewWriter(&out, pool)
wrapErr := func(err error) error {
+ out.Free()
return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error())
}
- cbuf := &bytes.Buffer{}
if compressor != nil {
- z, err := compressor.Compress(cbuf)
+ z, err := compressor.Compress(w)
if err != nil {
- return nil, wrapErr(err)
+ return nil, 0, wrapErr(err)
}
- if _, err := z.Write(in); err != nil {
- return nil, wrapErr(err)
+ for _, b := range in {
+ if _, err := z.Write(b.ReadOnlyData()); err != nil {
+ return nil, 0, wrapErr(err)
+ }
}
if err := z.Close(); err != nil {
- return nil, wrapErr(err)
+ return nil, 0, wrapErr(err)
}
} else {
- if err := cp.Do(cbuf, in); err != nil {
- return nil, wrapErr(err)
+ // This is obviously really inefficient since it fully materializes the data, but
+ // there is no way around this with the old Compressor API. At least it attempts
+ // to return the buffer to the provider, in the hopes it can be reused (maybe
+ // even by a subsequent call to this very function).
+ buf := in.MaterializeToBuffer(pool)
+ defer buf.Free()
+ if err := cp.Do(w, buf.ReadOnlyData()); err != nil {
+ return nil, 0, wrapErr(err)
}
}
- return cbuf.Bytes(), nil
+ return out, compressionMade, nil
}
const (
@@ -697,33 +752,36 @@ const (
// msgHeader returns a 5-byte header for the message being transmitted and the
// payload, which is compData if non-nil or data otherwise.
-func msgHeader(data, compData []byte) (hdr []byte, payload []byte) {
+func msgHeader(data, compData mem.BufferSlice, pf payloadFormat) (hdr []byte, payload mem.BufferSlice) {
hdr = make([]byte, headerLen)
- if compData != nil {
- hdr[0] = byte(compressionMade)
- data = compData
+ hdr[0] = byte(pf)
+
+ var length uint32
+ if pf.isCompressed() {
+ length = uint32(compData.Len())
+ payload = compData
} else {
- hdr[0] = byte(compressionNone)
+ length = uint32(data.Len())
+ payload = data
}
// Write length of payload into buf
- binary.BigEndian.PutUint32(hdr[payloadLen:], uint32(len(data)))
- return hdr, data
+ binary.BigEndian.PutUint32(hdr[payloadLen:], length)
+ return hdr, payload
}
-func outPayload(client bool, msg any, data, payload []byte, t time.Time) *stats.OutPayload {
+func outPayload(client bool, msg any, dataLength, payloadLength int, t time.Time) *stats.OutPayload {
return &stats.OutPayload{
Client: client,
Payload: msg,
- Data: data,
- Length: len(data),
- WireLength: len(payload) + headerLen,
- CompressedLength: len(payload),
+ Length: dataLength,
+ WireLength: payloadLength + headerLen,
+ CompressedLength: payloadLength,
SentTime: t,
}
}
-func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool) *status.Status {
+func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool, isServer bool) *status.Status {
switch pf {
case compressionNone:
case compressionMade:
@@ -731,7 +789,11 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool
return status.New(codes.Internal, "grpc: compressed flag set with identity or empty encoding")
}
if !haveCompressor {
- return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress)
+ if isServer {
+ return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress)
+ } else {
+ return status.Newf(codes.Internal, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress)
+ }
}
default:
return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf)
@@ -741,104 +803,129 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool
type payloadInfo struct {
compressedLength int // The compressed length got from wire.
- uncompressedBytes []byte
+ uncompressedBytes mem.BufferSlice
+}
+
+func (p *payloadInfo) free() {
+ if p != nil && p.uncompressedBytes != nil {
+ p.uncompressedBytes.Free()
+ }
}
// recvAndDecompress reads a message from the stream, decompressing it if necessary.
//
// Cancelling the returned cancel function releases the buffer back to the pool. So the caller should cancel as soon as
// the buffer is no longer needed.
-func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor,
-) (uncompressedBuf []byte, cancel func(), err error) {
- pf, compressedBuf, err := p.recvMsg(maxReceiveMessageSize)
+// TODO: Refactor this function to reduce the number of arguments.
+// See: https://google.github.io/styleguide/go/best-practices.html#function-argument-lists
+func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool,
+) (out mem.BufferSlice, err error) {
+ pf, compressed, err := p.recvMsg(maxReceiveMessageSize)
if err != nil {
- return nil, nil, err
+ return nil, err
}
- if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil {
- return nil, nil, st.Err()
+ compressedLength := compressed.Len()
+
+ if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil, isServer); st != nil {
+ compressed.Free()
+ return nil, st.Err()
}
var size int
- if pf == compressionMade {
+ if pf.isCompressed() {
+ defer compressed.Free()
+
// To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor,
// use this decompressor as the default.
if dc != nil {
- uncompressedBuf, err = dc.Do(bytes.NewReader(compressedBuf))
+ var uncompressedBuf []byte
+ uncompressedBuf, err = dc.Do(compressed.Reader())
+ if err == nil {
+ out = mem.BufferSlice{mem.NewBuffer(&uncompressedBuf, nil)}
+ }
size = len(uncompressedBuf)
} else {
- uncompressedBuf, size, err = decompress(compressor, compressedBuf, maxReceiveMessageSize)
+ out, size, err = decompress(compressor, compressed, maxReceiveMessageSize, p.bufferPool)
}
if err != nil {
- return nil, nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err)
+ return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err)
}
if size > maxReceiveMessageSize {
+ out.Free()
// TODO: Revisit the error code. Currently keep it consistent with java
// implementation.
- return nil, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize)
+ return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize)
}
} else {
- uncompressedBuf = compressedBuf
+ out = compressed
}
if payInfo != nil {
- payInfo.compressedLength = len(compressedBuf)
- payInfo.uncompressedBytes = uncompressedBuf
-
- cancel = func() {}
- } else {
- cancel = func() {
- p.recvBufferPool.Put(&compressedBuf)
- }
+ payInfo.compressedLength = compressedLength
+ out.Ref()
+ payInfo.uncompressedBytes = out
}
- return uncompressedBuf, cancel, nil
+ return out, nil
}
// Using compressor, decompress d, returning data and size.
// Optionally, if data will be over maxReceiveMessageSize, just return the size.
-func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize int) ([]byte, int, error) {
- dcReader, err := compressor.Decompress(bytes.NewReader(d))
+func decompress(compressor encoding.Compressor, d mem.BufferSlice, maxReceiveMessageSize int, pool mem.BufferPool) (mem.BufferSlice, int, error) {
+ dcReader, err := compressor.Decompress(d.Reader())
if err != nil {
return nil, 0, err
}
- if sizer, ok := compressor.(interface {
- DecompressedSize(compressedBytes []byte) int
- }); ok {
- if size := sizer.DecompressedSize(d); size >= 0 {
- if size > maxReceiveMessageSize {
- return nil, size, nil
- }
- // size is used as an estimate to size the buffer, but we
- // will read more data if available.
- // +MinRead so ReadFrom will not reallocate if size is correct.
- //
- // TODO: If we ensure that the buffer size is the same as the DecompressedSize,
- // we can also utilize the recv buffer pool here.
- buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead))
- bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
- return buf.Bytes(), int(bytesRead), err
- }
+
+ // TODO: Can/should this still be preserved with the new BufferSlice API? Are
+ // there any actual benefits to allocating a single large buffer instead of
+ // multiple smaller ones?
+ //if sizer, ok := compressor.(interface {
+ // DecompressedSize(compressedBytes []byte) int
+ //}); ok {
+ // if size := sizer.DecompressedSize(d); size >= 0 {
+ // if size > maxReceiveMessageSize {
+ // return nil, size, nil
+ // }
+ // // size is used as an estimate to size the buffer, but we
+ // // will read more data if available.
+ // // +MinRead so ReadFrom will not reallocate if size is correct.
+ // //
+ // // TODO: If we ensure that the buffer size is the same as the DecompressedSize,
+ // // we can also utilize the recv buffer pool here.
+ // buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead))
+ // bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
+ // return buf.Bytes(), int(bytesRead), err
+ // }
+ //}
+
+ var out mem.BufferSlice
+ _, err = io.Copy(mem.NewWriter(&out, pool), io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
+ if err != nil {
+ out.Free()
+ return nil, 0, err
}
- // Read from LimitReader with limit max+1. So if the underlying
- // reader is over limit, the result will be bigger than max.
- d, err = io.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
- return d, len(d), err
+ return out, out.Len(), nil
}
// For the two compressor parameters, both should not be set, but if they are,
// dc takes precedence over compressor.
// TODO(dfawley): wrap the old compressor/decompressor using the new API?
-func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error {
- buf, cancel, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor)
+func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool) error {
+ data, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor, isServer)
if err != nil {
return err
}
- defer cancel()
- if err := c.Unmarshal(buf, m); err != nil {
+ // If the codec wants its own reference to the data, it can get it. Otherwise, always
+ // free the buffers.
+ defer data.Free()
+
+ if err := c.Unmarshal(data, m); err != nil {
return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err)
}
+
return nil
}
@@ -941,7 +1028,7 @@ func setCallInfoCodec(c *callInfo) error {
// encoding.Codec (Name vs. String method name). We only support
// setting content subtype from encoding.Codec to avoid a behavior
// change with the deprecated version.
- if ec, ok := c.codec.(encoding.Codec); ok {
+ if ec, ok := c.codec.(encoding.CodecV2); ok {
c.contentSubtype = strings.ToLower(ec.Name())
}
}
@@ -950,12 +1037,12 @@ func setCallInfoCodec(c *callInfo) error {
if c.contentSubtype == "" {
// No codec specified in CallOptions; use proto by default.
- c.codec = encoding.GetCodec(proto.Name)
+ c.codec = getCodec(proto.Name)
return nil
}
// c.contentSubtype is already lowercased in CallContentSubtype
- c.codec = encoding.GetCodec(c.contentSubtype)
+ c.codec = getCodec(c.contentSubtype)
if c.codec == nil {
return status.Errorf(codes.Internal, "no codec registered for content-subtype %s", c.contentSubtype)
}
diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go
index 89f8e4792bf15..d1e1415a40f9b 100644
--- a/vendor/google.golang.org/grpc/server.go
+++ b/vendor/google.golang.org/grpc/server.go
@@ -45,6 +45,7 @@ import (
"google.golang.org/grpc/internal/grpcutil"
"google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/keepalive"
+ "google.golang.org/grpc/mem"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
@@ -80,7 +81,7 @@ func init() {
}
internal.BinaryLogger = binaryLogger
internal.JoinServerOptions = newJoinServerOption
- internal.RecvBufferPool = recvBufferPool
+ internal.BufferPool = bufferPool
}
var statusOK = status.New(codes.OK, "")
@@ -170,7 +171,7 @@ type serverOptions struct {
maxHeaderListSize *uint32
headerTableSize *uint32
numServerWorkers uint32
- recvBufferPool SharedBufferPool
+ bufferPool mem.BufferPool
waitForHandlers bool
}
@@ -181,7 +182,7 @@ var defaultServerOptions = serverOptions{
connectionTimeout: 120 * time.Second,
writeBufferSize: defaultWriteBufSize,
readBufferSize: defaultReadBufSize,
- recvBufferPool: nopBufferPool{},
+ bufferPool: mem.DefaultBufferPool(),
}
var globalServerOptions []ServerOption
@@ -313,7 +314,7 @@ func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption {
// Will be supported throughout 1.x.
func CustomCodec(codec Codec) ServerOption {
return newFuncServerOption(func(o *serverOptions) {
- o.codec = codec
+ o.codec = newCodecV0Bridge(codec)
})
}
@@ -342,7 +343,22 @@ func CustomCodec(codec Codec) ServerOption {
// later release.
func ForceServerCodec(codec encoding.Codec) ServerOption {
return newFuncServerOption(func(o *serverOptions) {
- o.codec = codec
+ o.codec = newCodecV1Bridge(codec)
+ })
+}
+
+// ForceServerCodecV2 is the equivalent of ForceServerCodec, but for the new
+// CodecV2 interface.
+//
+// Will be supported throughout 1.x.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func ForceServerCodecV2(codecV2 encoding.CodecV2) ServerOption {
+ return newFuncServerOption(func(o *serverOptions) {
+ o.codec = codecV2
})
}
@@ -592,26 +608,9 @@ func WaitForHandlers(w bool) ServerOption {
})
}
-// RecvBufferPool returns a ServerOption that configures the server
-// to use the provided shared buffer pool for parsing incoming messages. Depending
-// on the application's workload, this could result in reduced memory allocation.
-//
-// If you are unsure about how to implement a memory pool but want to utilize one,
-// begin with grpc.NewSharedBufferPool.
-//
-// Note: The shared buffer pool feature will not be active if any of the following
-// options are used: StatsHandler, EnableTracing, or binary logging. In such
-// cases, the shared buffer pool will be ignored.
-//
-// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in
-// v1.60.0 or later.
-func RecvBufferPool(bufferPool SharedBufferPool) ServerOption {
- return recvBufferPool(bufferPool)
-}
-
-func recvBufferPool(bufferPool SharedBufferPool) ServerOption {
+func bufferPool(bufferPool mem.BufferPool) ServerOption {
return newFuncServerOption(func(o *serverOptions) {
- o.recvBufferPool = bufferPool
+ o.bufferPool = bufferPool
})
}
@@ -622,7 +621,7 @@ func recvBufferPool(bufferPool SharedBufferPool) ServerOption {
// workload (assuming a QPS of a few thousand requests/sec).
const serverWorkerResetThreshold = 1 << 16
-// serverWorkers blocks on a *transport.Stream channel forever and waits for
+// serverWorker blocks on a *transport.Stream channel forever and waits for
// data to be fed by serveStreams. This allows multiple requests to be
// processed by the same goroutine, removing the need for expensive stack
// re-allocations (see the runtime.morestack problem [1]).
@@ -980,6 +979,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport {
ChannelzParent: s.channelz,
MaxHeaderListSize: s.opts.maxHeaderListSize,
HeaderTableSize: s.opts.headerTableSize,
+ BufferPool: s.opts.bufferPool,
}
st, err := transport.NewServerTransport(c, config)
if err != nil {
@@ -1072,7 +1072,7 @@ var _ http.Handler = (*Server)(nil)
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers)
+ st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers, s.opts.bufferPool)
if err != nil {
// Errors returned from transport.NewServerHandlerTransport have
// already been written to w.
@@ -1142,20 +1142,35 @@ func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport,
channelz.Error(logger, s.channelz, "grpc: server failed to encode response: ", err)
return err
}
- compData, err := compress(data, cp, comp)
+
+ compData, pf, err := compress(data, cp, comp, s.opts.bufferPool)
if err != nil {
+ data.Free()
channelz.Error(logger, s.channelz, "grpc: server failed to compress response: ", err)
return err
}
- hdr, payload := msgHeader(data, compData)
+
+ hdr, payload := msgHeader(data, compData, pf)
+
+ defer func() {
+ compData.Free()
+ data.Free()
+ // payload does not need to be freed here, it is either data or compData, both of
+ // which are already freed.
+ }()
+
+ dataLen := data.Len()
+ payloadLen := payload.Len()
// TODO(dfawley): should we be checking len(data) instead?
- if len(payload) > s.opts.maxSendMessageSize {
- return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize)
+ if payloadLen > s.opts.maxSendMessageSize {
+ return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", payloadLen, s.opts.maxSendMessageSize)
}
err = t.Write(stream, hdr, payload, opts)
if err == nil {
- for _, sh := range s.opts.statsHandlers {
- sh.HandleRPC(ctx, outPayload(false, msg, data, payload, time.Now()))
+ if len(s.opts.statsHandlers) != 0 {
+ for _, sh := range s.opts.statsHandlers {
+ sh.HandleRPC(ctx, outPayload(false, msg, dataLen, payloadLen, time.Now()))
+ }
}
}
return err
@@ -1334,37 +1349,37 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor
var payInfo *payloadInfo
if len(shs) != 0 || len(binlogs) != 0 {
payInfo = &payloadInfo{}
+ defer payInfo.free()
}
- d, cancel, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp)
+ d, err := recvAndDecompress(&parser{r: stream, bufferPool: s.opts.bufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp, true)
if err != nil {
if e := t.WriteStatus(stream, status.Convert(err)); e != nil {
channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e)
}
return err
}
+ defer d.Free()
if channelz.IsOn() {
t.IncrMsgRecv()
}
df := func(v any) error {
- defer cancel()
-
if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil {
return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err)
}
+
for _, sh := range shs {
sh.HandleRPC(ctx, &stats.InPayload{
RecvTime: time.Now(),
Payload: v,
- Length: len(d),
+ Length: d.Len(),
WireLength: payInfo.compressedLength + headerLen,
CompressedLength: payInfo.compressedLength,
- Data: d,
})
}
if len(binlogs) != 0 {
cm := &binarylog.ClientMessage{
- Message: d,
+ Message: d.Materialize(),
}
for _, binlog := range binlogs {
binlog.Log(ctx, cm)
@@ -1548,7 +1563,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran
ctx: ctx,
t: t,
s: stream,
- p: &parser{r: stream, recvBufferPool: s.opts.recvBufferPool},
+ p: &parser{r: stream, bufferPool: s.opts.bufferPool},
codec: s.getCodec(stream.ContentSubtype()),
maxReceiveMessageSize: s.opts.maxReceiveMessageSize,
maxSendMessageSize: s.opts.maxSendMessageSize,
@@ -1963,12 +1978,12 @@ func (s *Server) getCodec(contentSubtype string) baseCodec {
return s.opts.codec
}
if contentSubtype == "" {
- return encoding.GetCodec(proto.Name)
+ return getCodec(proto.Name)
}
- codec := encoding.GetCodec(contentSubtype)
+ codec := getCodec(contentSubtype)
if codec == nil {
logger.Warningf("Unsupported codec %q. Defaulting to %q for now. This will start to fail in future releases.", contentSubtype, proto.Name)
- return encoding.GetCodec(proto.Name)
+ return getCodec(proto.Name)
}
return codec
}
diff --git a/vendor/google.golang.org/grpc/shared_buffer_pool.go b/vendor/google.golang.org/grpc/shared_buffer_pool.go
deleted file mode 100644
index 48a64cfe8e256..0000000000000
--- a/vendor/google.golang.org/grpc/shared_buffer_pool.go
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- *
- * Copyright 2023 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import "sync"
-
-// SharedBufferPool is a pool of buffers that can be shared, resulting in
-// decreased memory allocation. Currently, in gRPC-go, it is only utilized
-// for parsing incoming messages.
-//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a
-// later release.
-type SharedBufferPool interface {
- // Get returns a buffer with specified length from the pool.
- //
- // The returned byte slice may be not zero initialized.
- Get(length int) []byte
-
- // Put returns a buffer to the pool.
- Put(*[]byte)
-}
-
-// NewSharedBufferPool creates a simple SharedBufferPool with buckets
-// of different sizes to optimize memory usage. This prevents the pool from
-// wasting large amounts of memory, even when handling messages of varying sizes.
-//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a
-// later release.
-func NewSharedBufferPool() SharedBufferPool {
- return &simpleSharedBufferPool{
- pools: [poolArraySize]simpleSharedBufferChildPool{
- newBytesPool(level0PoolMaxSize),
- newBytesPool(level1PoolMaxSize),
- newBytesPool(level2PoolMaxSize),
- newBytesPool(level3PoolMaxSize),
- newBytesPool(level4PoolMaxSize),
- newBytesPool(0),
- },
- }
-}
-
-// simpleSharedBufferPool is a simple implementation of SharedBufferPool.
-type simpleSharedBufferPool struct {
- pools [poolArraySize]simpleSharedBufferChildPool
-}
-
-func (p *simpleSharedBufferPool) Get(size int) []byte {
- return p.pools[p.poolIdx(size)].Get(size)
-}
-
-func (p *simpleSharedBufferPool) Put(bs *[]byte) {
- p.pools[p.poolIdx(cap(*bs))].Put(bs)
-}
-
-func (p *simpleSharedBufferPool) poolIdx(size int) int {
- switch {
- case size <= level0PoolMaxSize:
- return level0PoolIdx
- case size <= level1PoolMaxSize:
- return level1PoolIdx
- case size <= level2PoolMaxSize:
- return level2PoolIdx
- case size <= level3PoolMaxSize:
- return level3PoolIdx
- case size <= level4PoolMaxSize:
- return level4PoolIdx
- default:
- return levelMaxPoolIdx
- }
-}
-
-const (
- level0PoolMaxSize = 16 // 16 B
- level1PoolMaxSize = level0PoolMaxSize * 16 // 256 B
- level2PoolMaxSize = level1PoolMaxSize * 16 // 4 KB
- level3PoolMaxSize = level2PoolMaxSize * 16 // 64 KB
- level4PoolMaxSize = level3PoolMaxSize * 16 // 1 MB
-)
-
-const (
- level0PoolIdx = iota
- level1PoolIdx
- level2PoolIdx
- level3PoolIdx
- level4PoolIdx
- levelMaxPoolIdx
- poolArraySize
-)
-
-type simpleSharedBufferChildPool interface {
- Get(size int) []byte
- Put(any)
-}
-
-type bufferPool struct {
- sync.Pool
-
- defaultSize int
-}
-
-func (p *bufferPool) Get(size int) []byte {
- bs := p.Pool.Get().(*[]byte)
-
- if cap(*bs) < size {
- p.Pool.Put(bs)
-
- return make([]byte, size)
- }
-
- return (*bs)[:size]
-}
-
-func newBytesPool(size int) simpleSharedBufferChildPool {
- return &bufferPool{
- Pool: sync.Pool{
- New: func() any {
- bs := make([]byte, size)
- return &bs
- },
- },
- defaultSize: size,
- }
-}
-
-// nopBufferPool is a buffer pool just makes new buffer without pooling.
-type nopBufferPool struct {
-}
-
-func (nopBufferPool) Get(length int) []byte {
- return make([]byte, length)
-}
-
-func (nopBufferPool) Put(*[]byte) {
-}
diff --git a/vendor/google.golang.org/grpc/stats/opentelemetry/LICENSE b/vendor/google.golang.org/grpc/stats/opentelemetry/LICENSE
new file mode 100644
index 0000000000000..d645695673349
--- /dev/null
+++ b/vendor/google.golang.org/grpc/stats/opentelemetry/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/google.golang.org/grpc/stats/opentelemetry/client_metrics.go b/vendor/google.golang.org/grpc/stats/opentelemetry/client_metrics.go
new file mode 100644
index 0000000000000..4af7f933c8bae
--- /dev/null
+++ b/vendor/google.golang.org/grpc/stats/opentelemetry/client_metrics.go
@@ -0,0 +1,277 @@
+/*
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package opentelemetry
+
+import (
+ "context"
+ "sync/atomic"
+ "time"
+
+ "google.golang.org/grpc"
+ estats "google.golang.org/grpc/experimental/stats"
+ istats "google.golang.org/grpc/internal/stats"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/stats"
+ "google.golang.org/grpc/status"
+
+ otelattribute "go.opentelemetry.io/otel/attribute"
+ otelmetric "go.opentelemetry.io/otel/metric"
+)
+
+type clientStatsHandler struct {
+ estats.MetricsRecorder
+ options Options
+ clientMetrics clientMetrics
+}
+
+func (h *clientStatsHandler) initializeMetrics() {
+ // Will set no metrics to record, logically making this stats handler a
+ // no-op.
+ if h.options.MetricsOptions.MeterProvider == nil {
+ return
+ }
+
+ meter := h.options.MetricsOptions.MeterProvider.Meter("grpc-go", otelmetric.WithInstrumentationVersion(grpc.Version))
+ if meter == nil {
+ return
+ }
+
+ metrics := h.options.MetricsOptions.Metrics
+ if metrics == nil {
+ metrics = DefaultMetrics()
+ }
+
+ h.clientMetrics.attemptStarted = createInt64Counter(metrics.Metrics(), "grpc.client.attempt.started", meter, otelmetric.WithUnit("attempt"), otelmetric.WithDescription("Number of client call attempts started."))
+ h.clientMetrics.attemptDuration = createFloat64Histogram(metrics.Metrics(), "grpc.client.attempt.duration", meter, otelmetric.WithUnit("s"), otelmetric.WithDescription("End-to-end time taken to complete a client call attempt."), otelmetric.WithExplicitBucketBoundaries(DefaultLatencyBounds...))
+ h.clientMetrics.attemptSentTotalCompressedMessageSize = createInt64Histogram(metrics.Metrics(), "grpc.client.attempt.sent_total_compressed_message_size", meter, otelmetric.WithUnit("By"), otelmetric.WithDescription("Compressed message bytes sent per client call attempt."), otelmetric.WithExplicitBucketBoundaries(DefaultSizeBounds...))
+ h.clientMetrics.attemptRcvdTotalCompressedMessageSize = createInt64Histogram(metrics.Metrics(), "grpc.client.attempt.rcvd_total_compressed_message_size", meter, otelmetric.WithUnit("By"), otelmetric.WithDescription("Compressed message bytes received per call attempt."), otelmetric.WithExplicitBucketBoundaries(DefaultSizeBounds...))
+ h.clientMetrics.callDuration = createFloat64Histogram(metrics.Metrics(), "grpc.client.call.duration", meter, otelmetric.WithUnit("s"), otelmetric.WithDescription("Time taken by gRPC to complete an RPC from application's perspective."), otelmetric.WithExplicitBucketBoundaries(DefaultLatencyBounds...))
+
+ rm := ®istryMetrics{
+ optionalLabels: h.options.MetricsOptions.OptionalLabels,
+ }
+ h.MetricsRecorder = rm
+ rm.registerMetrics(metrics, meter)
+}
+
+func (h *clientStatsHandler) unaryInterceptor(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
+ ci := &callInfo{
+ target: cc.CanonicalTarget(),
+ method: h.determineMethod(method, opts...),
+ }
+ ctx = setCallInfo(ctx, ci)
+
+ if h.options.MetricsOptions.pluginOption != nil {
+ md := h.options.MetricsOptions.pluginOption.GetMetadata()
+ for k, vs := range md {
+ for _, v := range vs {
+ ctx = metadata.AppendToOutgoingContext(ctx, k, v)
+ }
+ }
+ }
+
+ startTime := time.Now()
+ err := invoker(ctx, method, req, reply, cc, opts...)
+ h.perCallMetrics(ctx, err, startTime, ci)
+ return err
+}
+
+// determineMethod determines the method to record attributes with. This will be
+// "other" if StaticMethod isn't specified or if method filter is set and
+// specifies, the method name as is otherwise.
+func (h *clientStatsHandler) determineMethod(method string, opts ...grpc.CallOption) string {
+ for _, opt := range opts {
+ if _, ok := opt.(grpc.StaticMethodCallOption); ok {
+ return removeLeadingSlash(method)
+ }
+ }
+ return "other"
+}
+
+func (h *clientStatsHandler) streamInterceptor(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
+ ci := &callInfo{
+ target: cc.CanonicalTarget(),
+ method: h.determineMethod(method, opts...),
+ }
+ ctx = setCallInfo(ctx, ci)
+
+ if h.options.MetricsOptions.pluginOption != nil {
+ md := h.options.MetricsOptions.pluginOption.GetMetadata()
+ for k, vs := range md {
+ for _, v := range vs {
+ ctx = metadata.AppendToOutgoingContext(ctx, k, v)
+ }
+ }
+ }
+
+ startTime := time.Now()
+
+ callback := func(err error) {
+ h.perCallMetrics(ctx, err, startTime, ci)
+ }
+ opts = append([]grpc.CallOption{grpc.OnFinish(callback)}, opts...)
+ return streamer(ctx, desc, cc, method, opts...)
+}
+
+func (h *clientStatsHandler) perCallMetrics(ctx context.Context, err error, startTime time.Time, ci *callInfo) {
+ callLatency := float64(time.Since(startTime)) / float64(time.Second) // calculate ASAP
+ attrs := otelmetric.WithAttributeSet(otelattribute.NewSet(
+ otelattribute.String("grpc.method", ci.method),
+ otelattribute.String("grpc.target", ci.target),
+ otelattribute.String("grpc.status", canonicalString(status.Code(err))),
+ ))
+ h.clientMetrics.callDuration.Record(ctx, callLatency, attrs)
+}
+
+// TagConn exists to satisfy stats.Handler.
+func (h *clientStatsHandler) TagConn(ctx context.Context, _ *stats.ConnTagInfo) context.Context {
+ return ctx
+}
+
+// HandleConn exists to satisfy stats.Handler.
+func (h *clientStatsHandler) HandleConn(context.Context, stats.ConnStats) {}
+
+// TagRPC implements per RPC attempt context management.
+func (h *clientStatsHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context {
+ // Numerous stats handlers can be used for the same channel. The cluster
+ // impl balancer which writes to this will only write once, thus have this
+ // stats handler's per attempt scoped context point to the same optional
+ // labels map if set.
+ var labels *istats.Labels
+ if labels = istats.GetLabels(ctx); labels == nil {
+ labels = &istats.Labels{
+ // The defaults for all the per call labels from a plugin that
+ // executes on the callpath that this OpenTelemetry component
+ // currently supports.
+ TelemetryLabels: map[string]string{
+ "grpc.lb.locality": "",
+ },
+ }
+ ctx = istats.SetLabels(ctx, labels)
+ }
+ ai := &attemptInfo{ // populates information about RPC start.
+ startTime: time.Now(),
+ xdsLabels: labels.TelemetryLabels,
+ method: info.FullMethodName,
+ }
+ ri := &rpcInfo{
+ ai: ai,
+ }
+ return setRPCInfo(ctx, ri)
+}
+
+func (h *clientStatsHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) {
+ ri := getRPCInfo(ctx)
+ if ri == nil {
+ logger.Error("ctx passed into client side stats handler metrics event handling has no client attempt data present")
+ return
+ }
+ h.processRPCEvent(ctx, rs, ri.ai)
+}
+
+func (h *clientStatsHandler) processRPCEvent(ctx context.Context, s stats.RPCStats, ai *attemptInfo) {
+ switch st := s.(type) {
+ case *stats.Begin:
+ ci := getCallInfo(ctx)
+ if ci == nil {
+ logger.Error("ctx passed into client side stats handler metrics event handling has no metrics data present")
+ return
+ }
+
+ attrs := otelmetric.WithAttributeSet(otelattribute.NewSet(
+ otelattribute.String("grpc.method", ci.method),
+ otelattribute.String("grpc.target", ci.target),
+ ))
+ h.clientMetrics.attemptStarted.Add(ctx, 1, attrs)
+ case *stats.OutPayload:
+ atomic.AddInt64(&ai.sentCompressedBytes, int64(st.CompressedLength))
+ case *stats.InPayload:
+ atomic.AddInt64(&ai.recvCompressedBytes, int64(st.CompressedLength))
+ case *stats.InHeader:
+ h.setLabelsFromPluginOption(ai, st.Header)
+ case *stats.InTrailer:
+ h.setLabelsFromPluginOption(ai, st.Trailer)
+ case *stats.End:
+ h.processRPCEnd(ctx, ai, st)
+ default:
+ }
+}
+
+func (h *clientStatsHandler) setLabelsFromPluginOption(ai *attemptInfo, incomingMetadata metadata.MD) {
+ if ai.pluginOptionLabels == nil && h.options.MetricsOptions.pluginOption != nil {
+ labels := h.options.MetricsOptions.pluginOption.GetLabels(incomingMetadata)
+ if labels == nil {
+ labels = map[string]string{} // Shouldn't return a nil map. Make it empty if so to ignore future Get Calls for this Attempt.
+ }
+ ai.pluginOptionLabels = labels
+ }
+}
+
+func (h *clientStatsHandler) processRPCEnd(ctx context.Context, ai *attemptInfo, e *stats.End) {
+ ci := getCallInfo(ctx)
+ if ci == nil {
+ logger.Error("ctx passed into client side stats handler metrics event handling has no metrics data present")
+ return
+ }
+ latency := float64(time.Since(ai.startTime)) / float64(time.Second)
+ st := "OK"
+ if e.Error != nil {
+ s, _ := status.FromError(e.Error)
+ st = canonicalString(s.Code())
+ }
+
+ attributes := []otelattribute.KeyValue{
+ otelattribute.String("grpc.method", ci.method),
+ otelattribute.String("grpc.target", ci.target),
+ otelattribute.String("grpc.status", st),
+ }
+
+ for k, v := range ai.pluginOptionLabels {
+ attributes = append(attributes, otelattribute.String(k, v))
+ }
+
+ for _, o := range h.options.MetricsOptions.OptionalLabels {
+ // TODO: Add a filter for converting to unknown if not present in the
+ // CSM Plugin Option layer by adding an optional labels API.
+ if val, ok := ai.xdsLabels[o]; ok {
+ attributes = append(attributes, otelattribute.String(o, val))
+ }
+ }
+
+ // Allocate vararg slice once.
+ opts := []otelmetric.RecordOption{otelmetric.WithAttributeSet(otelattribute.NewSet(attributes...))}
+ h.clientMetrics.attemptDuration.Record(ctx, latency, opts...)
+ h.clientMetrics.attemptSentTotalCompressedMessageSize.Record(ctx, atomic.LoadInt64(&ai.sentCompressedBytes), opts...)
+ h.clientMetrics.attemptRcvdTotalCompressedMessageSize.Record(ctx, atomic.LoadInt64(&ai.recvCompressedBytes), opts...)
+}
+
+const (
+ // ClientAttemptStarted is the number of client call attempts started.
+ ClientAttemptStarted estats.Metric = "grpc.client.attempt.started"
+ // ClientAttemptDuration is the end-to-end time taken to complete a client
+ // call attempt.
+ ClientAttemptDuration estats.Metric = "grpc.client.attempt.duration"
+ // ClientAttemptSentCompressedTotalMessageSize is the compressed message
+ // bytes sent per client call attempt.
+ ClientAttemptSentCompressedTotalMessageSize estats.Metric = "grpc.client.attempt.sent_total_compressed_message_size"
+ // ClientAttemptRcvdCompressedTotalMessageSize is the compressed message
+ // bytes received per call attempt.
+ ClientAttemptRcvdCompressedTotalMessageSize estats.Metric = "grpc.client.attempt.rcvd_total_compressed_message_size"
+ // ClientCallDuration is the time taken by gRPC to complete an RPC from
+ // application's perspective.
+ ClientCallDuration estats.Metric = "grpc.client.call.duration"
+)
diff --git a/vendor/google.golang.org/grpc/stats/opentelemetry/internal/pluginoption.go b/vendor/google.golang.org/grpc/stats/opentelemetry/internal/pluginoption.go
new file mode 100644
index 0000000000000..b595aa85ffbec
--- /dev/null
+++ b/vendor/google.golang.org/grpc/stats/opentelemetry/internal/pluginoption.go
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Package internal defines the PluginOption interface.
+package internal
+
+import (
+ "google.golang.org/grpc/metadata"
+)
+
+// SetPluginOption sets the plugin option on Options.
+var SetPluginOption any // func(*Options, PluginOption)
+
+// PluginOption is the interface which represents a plugin option for the
+// OpenTelemetry instrumentation component. This plugin option emits labels from
+// metadata and also creates metadata containing labels. These labels are
+// intended to be added to applicable OpenTelemetry metrics recorded in the
+// OpenTelemetry instrumentation component.
+//
+// In the future, we hope to stabilize and expose this API to allow plugins to
+// inject labels of their choosing into metrics recorded.
+type PluginOption interface {
+ // GetMetadata creates a MD with metadata exchange labels.
+ GetMetadata() metadata.MD
+ // GetLabels emits labels to be attached to metrics for the RPC that
+ // contains the provided incomingMetadata.
+ GetLabels(incomingMetadata metadata.MD) map[string]string
+}
diff --git a/vendor/google.golang.org/grpc/stats/opentelemetry/opentelemetry.go b/vendor/google.golang.org/grpc/stats/opentelemetry/opentelemetry.go
new file mode 100644
index 0000000000000..cc5ad387fb4c4
--- /dev/null
+++ b/vendor/google.golang.org/grpc/stats/opentelemetry/opentelemetry.go
@@ -0,0 +1,390 @@
+/*
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Package opentelemetry implements opentelemetry instrumentation code for
+// gRPC-Go clients and servers.
+package opentelemetry
+
+import (
+ "context"
+ "strings"
+ "time"
+
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ estats "google.golang.org/grpc/experimental/stats"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/internal"
+ otelinternal "google.golang.org/grpc/stats/opentelemetry/internal"
+
+ otelattribute "go.opentelemetry.io/otel/attribute"
+ otelmetric "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/metric/noop"
+)
+
+func init() {
+ otelinternal.SetPluginOption = func(o *Options, po otelinternal.PluginOption) {
+ o.MetricsOptions.pluginOption = po
+ }
+}
+
+var logger = grpclog.Component("otel-plugin")
+
+var canonicalString = internal.CanonicalString.(func(codes.Code) string)
+
+var joinDialOptions = internal.JoinDialOptions.(func(...grpc.DialOption) grpc.DialOption)
+
+// Options are the options for OpenTelemetry instrumentation.
+type Options struct {
+ // MetricsOptions are the metrics options for OpenTelemetry instrumentation.
+ MetricsOptions MetricsOptions
+}
+
+// MetricsOptions are the metrics options for OpenTelemetry instrumentation.
+type MetricsOptions struct {
+ // MeterProvider is the MeterProvider instance that will be used to create
+ // instruments. To enable metrics collection, set a meter provider. If
+ // unset, no metrics will be recorded. Any implementation knobs (i.e. views,
+ // bounds) set in the MeterProvider take precedence over the API calls from
+ // this interface. (i.e. it will create default views for unset views).
+ MeterProvider otelmetric.MeterProvider
+
+ // Metrics are the metrics to instrument. Will create instrument and record telemetry
+ // for corresponding metric supported by the client and server
+ // instrumentation components if applicable. If not set, the default metrics
+ // will be recorded.
+ Metrics *estats.Metrics
+
+ // MethodAttributeFilter is to record the method name of RPCs handled by
+ // grpc.UnknownServiceHandler, but take care to limit the values allowed, as
+ // allowing too many will increase cardinality and could cause severe memory
+ // or performance problems. On Client Side, pass a
+ // grpc.StaticMethodCallOption as a call option into Invoke or NewStream.
+ // This only applies for server side metrics.
+ MethodAttributeFilter func(string) bool
+
+ // OptionalLabels are labels received from LB Policies that this component
+ // should add to metrics that record after receiving incoming metadata.
+ OptionalLabels []string
+
+ // pluginOption is used to get labels to attach to certain metrics, if set.
+ pluginOption otelinternal.PluginOption
+}
+
+// DialOption returns a dial option which enables OpenTelemetry instrumentation
+// code for a grpc.ClientConn.
+//
+// Client applications interested in instrumenting their grpc.ClientConn should
+// pass the dial option returned from this function as a dial option to
+// grpc.NewClient().
+//
+// For the metrics supported by this instrumentation code, specify the client
+// metrics to record in metrics options. Also provide an implementation of a
+// MeterProvider. If the passed in Meter Provider does not have the view
+// configured for an individual metric turned on, the API call in this component
+// will create a default view for that metric.
+func DialOption(o Options) grpc.DialOption {
+ csh := &clientStatsHandler{options: o}
+ csh.initializeMetrics()
+ return joinDialOptions(grpc.WithChainUnaryInterceptor(csh.unaryInterceptor), grpc.WithChainStreamInterceptor(csh.streamInterceptor), grpc.WithStatsHandler(csh))
+}
+
+var joinServerOptions = internal.JoinServerOptions.(func(...grpc.ServerOption) grpc.ServerOption)
+
+// ServerOption returns a server option which enables OpenTelemetry
+// instrumentation code for a grpc.Server.
+//
+// Server applications interested in instrumenting their grpc.Server should pass
+// the server option returned from this function as an argument to
+// grpc.NewServer().
+//
+// For the metrics supported by this instrumentation code, specify the server
+// metrics to record in metrics options. Also provide an implementation of a
+// MeterProvider. If the passed in Meter Provider does not have the view
+// configured for an individual metric turned on, the API call in this component
+// will create a default view for that metric.
+func ServerOption(o Options) grpc.ServerOption {
+ ssh := &serverStatsHandler{options: o}
+ ssh.initializeMetrics()
+ return joinServerOptions(grpc.ChainUnaryInterceptor(ssh.unaryInterceptor), grpc.ChainStreamInterceptor(ssh.streamInterceptor), grpc.StatsHandler(ssh))
+}
+
+// callInfo is information pertaining to the lifespan of the RPC client side.
+type callInfo struct {
+ target string
+
+ method string
+}
+
+type callInfoKey struct{}
+
+func setCallInfo(ctx context.Context, ci *callInfo) context.Context {
+ return context.WithValue(ctx, callInfoKey{}, ci)
+}
+
+// getCallInfo returns the callInfo stored in the context, or nil
+// if there isn't one.
+func getCallInfo(ctx context.Context) *callInfo {
+ ci, _ := ctx.Value(callInfoKey{}).(*callInfo)
+ return ci
+}
+
+// rpcInfo is RPC information scoped to the RPC attempt life span client side,
+// and the RPC life span server side.
+type rpcInfo struct {
+ ai *attemptInfo
+}
+
+type rpcInfoKey struct{}
+
+func setRPCInfo(ctx context.Context, ri *rpcInfo) context.Context {
+ return context.WithValue(ctx, rpcInfoKey{}, ri)
+}
+
+// getRPCInfo returns the rpcInfo stored in the context, or nil
+// if there isn't one.
+func getRPCInfo(ctx context.Context) *rpcInfo {
+ ri, _ := ctx.Value(rpcInfoKey{}).(*rpcInfo)
+ return ri
+}
+
+func removeLeadingSlash(mn string) string {
+ return strings.TrimLeft(mn, "/")
+}
+
+// attemptInfo is RPC information scoped to the RPC attempt life span client
+// side, and the RPC life span server side.
+type attemptInfo struct {
+ // access these counts atomically for hedging in the future:
+ // number of bytes after compression (within each message) from side (client
+ // || server).
+ sentCompressedBytes int64
+ // number of compressed bytes received (within each message) received on
+ // side (client || server).
+ recvCompressedBytes int64
+
+ startTime time.Time
+ method string
+
+ pluginOptionLabels map[string]string // pluginOptionLabels to attach to metrics emitted
+ xdsLabels map[string]string
+}
+
+type clientMetrics struct {
+ // "grpc.client.attempt.started"
+ attemptStarted otelmetric.Int64Counter
+ // "grpc.client.attempt.duration"
+ attemptDuration otelmetric.Float64Histogram
+ // "grpc.client.attempt.sent_total_compressed_message_size"
+ attemptSentTotalCompressedMessageSize otelmetric.Int64Histogram
+ // "grpc.client.attempt.rcvd_total_compressed_message_size"
+ attemptRcvdTotalCompressedMessageSize otelmetric.Int64Histogram
+ // "grpc.client.call.duration"
+ callDuration otelmetric.Float64Histogram
+}
+
+type serverMetrics struct {
+ // "grpc.server.call.started"
+ callStarted otelmetric.Int64Counter
+ // "grpc.server.call.sent_total_compressed_message_size"
+ callSentTotalCompressedMessageSize otelmetric.Int64Histogram
+ // "grpc.server.call.rcvd_total_compressed_message_size"
+ callRcvdTotalCompressedMessageSize otelmetric.Int64Histogram
+ // "grpc.server.call.duration"
+ callDuration otelmetric.Float64Histogram
+}
+
+func createInt64Counter(setOfMetrics map[estats.Metric]bool, metricName estats.Metric, meter otelmetric.Meter, options ...otelmetric.Int64CounterOption) otelmetric.Int64Counter {
+ if _, ok := setOfMetrics[metricName]; !ok {
+ return noop.Int64Counter{}
+ }
+ ret, err := meter.Int64Counter(string(metricName), options...)
+ if err != nil {
+ logger.Errorf("failed to register metric \"%v\", will not record: %v", metricName, err)
+ return noop.Int64Counter{}
+ }
+ return ret
+}
+
+func createFloat64Counter(setOfMetrics map[estats.Metric]bool, metricName estats.Metric, meter otelmetric.Meter, options ...otelmetric.Float64CounterOption) otelmetric.Float64Counter {
+ if _, ok := setOfMetrics[metricName]; !ok {
+ return noop.Float64Counter{}
+ }
+ ret, err := meter.Float64Counter(string(metricName), options...)
+ if err != nil {
+ logger.Errorf("failed to register metric \"%v\", will not record: %v", metricName, err)
+ return noop.Float64Counter{}
+ }
+ return ret
+}
+
+func createInt64Histogram(setOfMetrics map[estats.Metric]bool, metricName estats.Metric, meter otelmetric.Meter, options ...otelmetric.Int64HistogramOption) otelmetric.Int64Histogram {
+ if _, ok := setOfMetrics[metricName]; !ok {
+ return noop.Int64Histogram{}
+ }
+ ret, err := meter.Int64Histogram(string(metricName), options...)
+ if err != nil {
+ logger.Errorf("failed to register metric \"%v\", will not record: %v", metricName, err)
+ return noop.Int64Histogram{}
+ }
+ return ret
+}
+
+func createFloat64Histogram(setOfMetrics map[estats.Metric]bool, metricName estats.Metric, meter otelmetric.Meter, options ...otelmetric.Float64HistogramOption) otelmetric.Float64Histogram {
+ if _, ok := setOfMetrics[metricName]; !ok {
+ return noop.Float64Histogram{}
+ }
+ ret, err := meter.Float64Histogram(string(metricName), options...)
+ if err != nil {
+ logger.Errorf("failed to register metric \"%v\", will not record: %v", metricName, err)
+ return noop.Float64Histogram{}
+ }
+ return ret
+}
+
+func createInt64Gauge(setOfMetrics map[estats.Metric]bool, metricName estats.Metric, meter otelmetric.Meter, options ...otelmetric.Int64GaugeOption) otelmetric.Int64Gauge {
+ if _, ok := setOfMetrics[metricName]; !ok {
+ return noop.Int64Gauge{}
+ }
+ ret, err := meter.Int64Gauge(string(metricName), options...)
+ if err != nil {
+ logger.Errorf("failed to register metric \"%v\", will not record: %v", metricName, err)
+ return noop.Int64Gauge{}
+ }
+ return ret
+}
+
+func optionFromLabels(labelKeys []string, optionalLabelKeys []string, optionalLabels []string, labelVals ...string) otelmetric.MeasurementOption {
+ var attributes []otelattribute.KeyValue
+
+ // Once it hits here lower level has guaranteed length of labelVals matches
+ // labelKeys + optionalLabelKeys.
+ for i, label := range labelKeys {
+ attributes = append(attributes, otelattribute.String(label, labelVals[i]))
+ }
+
+ for i, label := range optionalLabelKeys {
+ for _, optLabel := range optionalLabels { // o(n) could build out a set but n is currently capped at < 5
+ if label == optLabel {
+ attributes = append(attributes, otelattribute.String(label, labelVals[i+len(labelKeys)]))
+ }
+ }
+ }
+ return otelmetric.WithAttributeSet(otelattribute.NewSet(attributes...))
+}
+
+// registryMetrics implements MetricsRecorder for the client and server stats
+// handlers.
+type registryMetrics struct {
+ intCounts map[*estats.MetricDescriptor]otelmetric.Int64Counter
+ floatCounts map[*estats.MetricDescriptor]otelmetric.Float64Counter
+ intHistos map[*estats.MetricDescriptor]otelmetric.Int64Histogram
+ floatHistos map[*estats.MetricDescriptor]otelmetric.Float64Histogram
+ intGauges map[*estats.MetricDescriptor]otelmetric.Int64Gauge
+
+ optionalLabels []string
+}
+
+func (rm *registryMetrics) registerMetrics(metrics *estats.Metrics, meter otelmetric.Meter) {
+ rm.intCounts = make(map[*estats.MetricDescriptor]otelmetric.Int64Counter)
+ rm.floatCounts = make(map[*estats.MetricDescriptor]otelmetric.Float64Counter)
+ rm.intHistos = make(map[*estats.MetricDescriptor]otelmetric.Int64Histogram)
+ rm.floatHistos = make(map[*estats.MetricDescriptor]otelmetric.Float64Histogram)
+ rm.intGauges = make(map[*estats.MetricDescriptor]otelmetric.Int64Gauge)
+
+ for metric := range metrics.Metrics() {
+ desc := estats.DescriptorForMetric(metric)
+ if desc == nil {
+ // Either the metric was per call or the metric is not registered.
+ // Thus, if this component ever receives the desc as a handle in
+ // record it will be a no-op.
+ continue
+ }
+ switch desc.Type {
+ case estats.MetricTypeIntCount:
+ rm.intCounts[desc] = createInt64Counter(metrics.Metrics(), desc.Name, meter, otelmetric.WithUnit(desc.Unit), otelmetric.WithDescription(desc.Description))
+ case estats.MetricTypeFloatCount:
+ rm.floatCounts[desc] = createFloat64Counter(metrics.Metrics(), desc.Name, meter, otelmetric.WithUnit(desc.Unit), otelmetric.WithDescription(desc.Description))
+ case estats.MetricTypeIntHisto:
+ rm.intHistos[desc] = createInt64Histogram(metrics.Metrics(), desc.Name, meter, otelmetric.WithUnit(desc.Unit), otelmetric.WithDescription(desc.Description), otelmetric.WithExplicitBucketBoundaries(desc.Bounds...))
+ case estats.MetricTypeFloatHisto:
+ rm.floatHistos[desc] = createFloat64Histogram(metrics.Metrics(), desc.Name, meter, otelmetric.WithUnit(desc.Unit), otelmetric.WithDescription(desc.Description), otelmetric.WithExplicitBucketBoundaries(desc.Bounds...))
+ case estats.MetricTypeIntGauge:
+ rm.intGauges[desc] = createInt64Gauge(metrics.Metrics(), desc.Name, meter, otelmetric.WithUnit(desc.Unit), otelmetric.WithDescription(desc.Description))
+ }
+ }
+}
+
+func (rm *registryMetrics) RecordInt64Count(handle *estats.Int64CountHandle, incr int64, labels ...string) {
+ desc := handle.Descriptor()
+ if ic, ok := rm.intCounts[desc]; ok {
+ ao := optionFromLabels(desc.Labels, desc.OptionalLabels, rm.optionalLabels, labels...)
+ ic.Add(context.TODO(), incr, ao)
+ }
+}
+
+func (rm *registryMetrics) RecordFloat64Count(handle *estats.Float64CountHandle, incr float64, labels ...string) {
+ desc := handle.Descriptor()
+ if fc, ok := rm.floatCounts[desc]; ok {
+ ao := optionFromLabels(desc.Labels, desc.OptionalLabels, rm.optionalLabels, labels...)
+ fc.Add(context.TODO(), incr, ao)
+ }
+}
+
+func (rm *registryMetrics) RecordInt64Histo(handle *estats.Int64HistoHandle, incr int64, labels ...string) {
+ desc := handle.Descriptor()
+ if ih, ok := rm.intHistos[desc]; ok {
+ ao := optionFromLabels(desc.Labels, desc.OptionalLabels, rm.optionalLabels, labels...)
+ ih.Record(context.TODO(), incr, ao)
+ }
+}
+
+func (rm *registryMetrics) RecordFloat64Histo(handle *estats.Float64HistoHandle, incr float64, labels ...string) {
+ desc := handle.Descriptor()
+ if fh, ok := rm.floatHistos[desc]; ok {
+ ao := optionFromLabels(desc.Labels, desc.OptionalLabels, rm.optionalLabels, labels...)
+ fh.Record(context.TODO(), incr, ao)
+ }
+}
+
+func (rm *registryMetrics) RecordInt64Gauge(handle *estats.Int64GaugeHandle, incr int64, labels ...string) {
+ desc := handle.Descriptor()
+ if ig, ok := rm.intGauges[desc]; ok {
+ ao := optionFromLabels(desc.Labels, desc.OptionalLabels, rm.optionalLabels, labels...)
+ ig.Record(context.TODO(), incr, ao)
+ }
+}
+
+// Users of this component should use these bucket boundaries as part of their
+// SDK MeterProvider passed in. This component sends this as "advice" to the
+// API, which works, however this stability is not guaranteed, so for safety the
+// SDK Meter Provider provided should set these bounds for corresponding
+// metrics.
+var (
+ // DefaultLatencyBounds are the default bounds for latency metrics.
+ DefaultLatencyBounds = []float64{0, 0.00001, 0.00005, 0.0001, 0.0003, 0.0006, 0.0008, 0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.008, 0.01, 0.013, 0.016, 0.02, 0.025, 0.03, 0.04, 0.05, 0.065, 0.08, 0.1, 0.13, 0.16, 0.2, 0.25, 0.3, 0.4, 0.5, 0.65, 0.8, 1, 2, 5, 10, 20, 50, 100} // provide "advice" through API, SDK should set this too
+ // DefaultSizeBounds are the default bounds for metrics which record size.
+ DefaultSizeBounds = []float64{0, 1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296}
+ // defaultPerCallMetrics are the default metrics provided by this module.
+ defaultPerCallMetrics = estats.NewMetrics(ClientAttemptStarted, ClientAttemptDuration, ClientAttemptSentCompressedTotalMessageSize, ClientAttemptRcvdCompressedTotalMessageSize, ClientCallDuration, ServerCallStarted, ServerCallSentCompressedTotalMessageSize, ServerCallRcvdCompressedTotalMessageSize, ServerCallDuration)
+)
+
+// DefaultMetrics returns a set of default OpenTelemetry metrics.
+//
+// This should only be invoked after init time.
+func DefaultMetrics() *estats.Metrics {
+ return defaultPerCallMetrics.Join(estats.DefaultMetrics)
+}
diff --git a/vendor/google.golang.org/grpc/stats/opentelemetry/server_metrics.go b/vendor/google.golang.org/grpc/stats/opentelemetry/server_metrics.go
new file mode 100644
index 0000000000000..eaea559b2c103
--- /dev/null
+++ b/vendor/google.golang.org/grpc/stats/opentelemetry/server_metrics.go
@@ -0,0 +1,278 @@
+/*
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package opentelemetry
+
+import (
+ "context"
+ "sync/atomic"
+ "time"
+
+ "google.golang.org/grpc"
+ estats "google.golang.org/grpc/experimental/stats"
+ "google.golang.org/grpc/internal"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/stats"
+ "google.golang.org/grpc/status"
+
+ otelattribute "go.opentelemetry.io/otel/attribute"
+ otelmetric "go.opentelemetry.io/otel/metric"
+)
+
+type serverStatsHandler struct {
+ estats.MetricsRecorder
+ options Options
+ serverMetrics serverMetrics
+}
+
+func (h *serverStatsHandler) initializeMetrics() {
+ // Will set no metrics to record, logically making this stats handler a
+ // no-op.
+ if h.options.MetricsOptions.MeterProvider == nil {
+ return
+ }
+
+ meter := h.options.MetricsOptions.MeterProvider.Meter("grpc-go", otelmetric.WithInstrumentationVersion(grpc.Version))
+ if meter == nil {
+ return
+ }
+ metrics := h.options.MetricsOptions.Metrics
+ if metrics == nil {
+ metrics = DefaultMetrics()
+ }
+
+ h.serverMetrics.callStarted = createInt64Counter(metrics.Metrics(), "grpc.server.call.started", meter, otelmetric.WithUnit("call"), otelmetric.WithDescription("Number of server calls started."))
+ h.serverMetrics.callSentTotalCompressedMessageSize = createInt64Histogram(metrics.Metrics(), "grpc.server.call.sent_total_compressed_message_size", meter, otelmetric.WithUnit("By"), otelmetric.WithDescription("Compressed message bytes sent per server call."), otelmetric.WithExplicitBucketBoundaries(DefaultSizeBounds...))
+ h.serverMetrics.callRcvdTotalCompressedMessageSize = createInt64Histogram(metrics.Metrics(), "grpc.server.call.rcvd_total_compressed_message_size", meter, otelmetric.WithUnit("By"), otelmetric.WithDescription("Compressed message bytes received per server call."), otelmetric.WithExplicitBucketBoundaries(DefaultSizeBounds...))
+ h.serverMetrics.callDuration = createFloat64Histogram(metrics.Metrics(), "grpc.server.call.duration", meter, otelmetric.WithUnit("s"), otelmetric.WithDescription("End-to-end time taken to complete a call from server transport's perspective."), otelmetric.WithExplicitBucketBoundaries(DefaultLatencyBounds...))
+
+ rm := ®istryMetrics{
+ optionalLabels: h.options.MetricsOptions.OptionalLabels,
+ }
+ h.MetricsRecorder = rm
+ rm.registerMetrics(metrics, meter)
+}
+
+// attachLabelsTransportStream intercepts SetHeader and SendHeader calls of the
+// underlying ServerTransportStream to attach metadataExchangeLabels.
+type attachLabelsTransportStream struct {
+ grpc.ServerTransportStream
+
+ attachedLabels atomic.Bool
+ metadataExchangeLabels metadata.MD
+}
+
+func (s *attachLabelsTransportStream) SetHeader(md metadata.MD) error {
+ if !s.attachedLabels.Swap(true) {
+ s.ServerTransportStream.SetHeader(s.metadataExchangeLabels)
+ }
+ return s.ServerTransportStream.SetHeader(md)
+}
+
+func (s *attachLabelsTransportStream) SendHeader(md metadata.MD) error {
+ if !s.attachedLabels.Swap(true) {
+ s.ServerTransportStream.SetHeader(s.metadataExchangeLabels)
+ }
+
+ return s.ServerTransportStream.SendHeader(md)
+}
+
+func (h *serverStatsHandler) unaryInterceptor(ctx context.Context, req any, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) {
+ var metadataExchangeLabels metadata.MD
+ if h.options.MetricsOptions.pluginOption != nil {
+ metadataExchangeLabels = h.options.MetricsOptions.pluginOption.GetMetadata()
+ }
+
+ sts := grpc.ServerTransportStreamFromContext(ctx)
+
+ alts := &attachLabelsTransportStream{
+ ServerTransportStream: sts,
+ metadataExchangeLabels: metadataExchangeLabels,
+ }
+ ctx = grpc.NewContextWithServerTransportStream(ctx, alts)
+
+ res, err := handler(ctx, req)
+ if err != nil { // maybe trailers-only if headers haven't already been sent
+ if !alts.attachedLabels.Swap(true) {
+ alts.SetTrailer(alts.metadataExchangeLabels)
+ }
+ } else { // headers will be written; a message was sent
+ if !alts.attachedLabels.Swap(true) {
+ alts.SetHeader(alts.metadataExchangeLabels)
+ }
+ }
+
+ return res, err
+}
+
+// attachLabelsStream embeds a grpc.ServerStream, and intercepts the
+// SetHeader/SendHeader/SendMsg/SendTrailer call to attach metadata exchange
+// labels.
+type attachLabelsStream struct {
+ grpc.ServerStream
+
+ attachedLabels atomic.Bool
+ metadataExchangeLabels metadata.MD
+}
+
+func (s *attachLabelsStream) SetHeader(md metadata.MD) error {
+ if !s.attachedLabels.Swap(true) {
+ s.ServerStream.SetHeader(s.metadataExchangeLabels)
+ }
+
+ return s.ServerStream.SetHeader(md)
+}
+
+func (s *attachLabelsStream) SendHeader(md metadata.MD) error {
+ if !s.attachedLabels.Swap(true) {
+ s.ServerStream.SetHeader(s.metadataExchangeLabels)
+ }
+
+ return s.ServerStream.SendHeader(md)
+}
+
+func (s *attachLabelsStream) SendMsg(m any) error {
+ if !s.attachedLabels.Swap(true) {
+ s.ServerStream.SetHeader(s.metadataExchangeLabels)
+ }
+ return s.ServerStream.SendMsg(m)
+}
+
+func (h *serverStatsHandler) streamInterceptor(srv any, ss grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
+ var metadataExchangeLabels metadata.MD
+ if h.options.MetricsOptions.pluginOption != nil {
+ metadataExchangeLabels = h.options.MetricsOptions.pluginOption.GetMetadata()
+ }
+ als := &attachLabelsStream{
+ ServerStream: ss,
+ metadataExchangeLabels: metadataExchangeLabels,
+ }
+ err := handler(srv, als)
+
+ // Add metadata exchange labels to trailers if never sent in headers,
+ // irrespective of whether or not RPC failed.
+ if !als.attachedLabels.Load() {
+ als.SetTrailer(als.metadataExchangeLabels)
+ }
+ return err
+}
+
+// TagConn exists to satisfy stats.Handler.
+func (h *serverStatsHandler) TagConn(ctx context.Context, _ *stats.ConnTagInfo) context.Context {
+ return ctx
+}
+
+// HandleConn exists to satisfy stats.Handler.
+func (h *serverStatsHandler) HandleConn(context.Context, stats.ConnStats) {}
+
+// TagRPC implements per RPC context management.
+func (h *serverStatsHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context {
+ method := info.FullMethodName
+ if h.options.MetricsOptions.MethodAttributeFilter != nil {
+ if !h.options.MetricsOptions.MethodAttributeFilter(method) {
+ method = "other"
+ }
+ }
+ server := internal.ServerFromContext.(func(context.Context) *grpc.Server)(ctx)
+ if server == nil { // Shouldn't happen, defensive programming.
+ logger.Error("ctx passed into server side stats handler has no grpc server ref")
+ method = "other"
+ } else {
+ isRegisteredMethod := internal.IsRegisteredMethod.(func(*grpc.Server, string) bool)
+ if !isRegisteredMethod(server, method) {
+ method = "other"
+ }
+ }
+
+ ai := &attemptInfo{
+ startTime: time.Now(),
+ method: removeLeadingSlash(method),
+ }
+ ri := &rpcInfo{
+ ai: ai,
+ }
+ return setRPCInfo(ctx, ri)
+}
+
+// HandleRPC implements per RPC tracing and stats implementation.
+func (h *serverStatsHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) {
+ ri := getRPCInfo(ctx)
+ if ri == nil {
+ logger.Error("ctx passed into server side stats handler metrics event handling has no server call data present")
+ return
+ }
+ h.processRPCData(ctx, rs, ri.ai)
+}
+
+func (h *serverStatsHandler) processRPCData(ctx context.Context, s stats.RPCStats, ai *attemptInfo) {
+ switch st := s.(type) {
+ case *stats.InHeader:
+ if ai.pluginOptionLabels == nil && h.options.MetricsOptions.pluginOption != nil {
+ labels := h.options.MetricsOptions.pluginOption.GetLabels(st.Header)
+ if labels == nil {
+ labels = map[string]string{} // Shouldn't return a nil map. Make it empty if so to ignore future Get Calls for this Attempt.
+ }
+ ai.pluginOptionLabels = labels
+ }
+ attrs := otelmetric.WithAttributeSet(otelattribute.NewSet(
+ otelattribute.String("grpc.method", ai.method),
+ ))
+ h.serverMetrics.callStarted.Add(ctx, 1, attrs)
+ case *stats.OutPayload:
+ atomic.AddInt64(&ai.sentCompressedBytes, int64(st.CompressedLength))
+ case *stats.InPayload:
+ atomic.AddInt64(&ai.recvCompressedBytes, int64(st.CompressedLength))
+ case *stats.End:
+ h.processRPCEnd(ctx, ai, st)
+ default:
+ }
+}
+
+func (h *serverStatsHandler) processRPCEnd(ctx context.Context, ai *attemptInfo, e *stats.End) {
+ latency := float64(time.Since(ai.startTime)) / float64(time.Second)
+ st := "OK"
+ if e.Error != nil {
+ s, _ := status.FromError(e.Error)
+ st = canonicalString(s.Code())
+ }
+ attributes := []otelattribute.KeyValue{
+ otelattribute.String("grpc.method", ai.method),
+ otelattribute.String("grpc.status", st),
+ }
+ for k, v := range ai.pluginOptionLabels {
+ attributes = append(attributes, otelattribute.String(k, v))
+ }
+
+ // Allocate vararg slice once.
+ opts := []otelmetric.RecordOption{otelmetric.WithAttributeSet(otelattribute.NewSet(attributes...))}
+ h.serverMetrics.callDuration.Record(ctx, latency, opts...)
+ h.serverMetrics.callSentTotalCompressedMessageSize.Record(ctx, atomic.LoadInt64(&ai.sentCompressedBytes), opts...)
+ h.serverMetrics.callRcvdTotalCompressedMessageSize.Record(ctx, atomic.LoadInt64(&ai.recvCompressedBytes), opts...)
+}
+
+const (
+ // ServerCallStarted is the number of server calls started.
+ ServerCallStarted estats.Metric = "grpc.server.call.started"
+ // ServerCallSentCompressedTotalMessageSize is the compressed message bytes
+ // sent per server call.
+ ServerCallSentCompressedTotalMessageSize estats.Metric = "grpc.server.call.sent_total_compressed_message_size"
+ // ServerCallRcvdCompressedTotalMessageSize is the compressed message bytes
+ // received per server call.
+ ServerCallRcvdCompressedTotalMessageSize estats.Metric = "grpc.server.call.rcvd_total_compressed_message_size"
+ // ServerCallDuration is the end-to-end time taken to complete a call from
+ // server transport's perspective.
+ ServerCallDuration estats.Metric = "grpc.server.call.duration"
+)
diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go
index fdb0bd65182c5..71195c4943d7a 100644
--- a/vendor/google.golang.org/grpc/stats/stats.go
+++ b/vendor/google.golang.org/grpc/stats/stats.go
@@ -77,9 +77,6 @@ type InPayload struct {
// the call to HandleRPC which provides the InPayload returns and must be
// copied if needed later.
Payload any
- // Data is the serialized message payload.
- // Deprecated: Data will be removed in the next release.
- Data []byte
// Length is the size of the uncompressed payload data. Does not include any
// framing (gRPC or HTTP/2).
@@ -150,9 +147,6 @@ type OutPayload struct {
// the call to HandleRPC which provides the OutPayload returns and must be
// copied if needed later.
Payload any
- // Data is the serialized message payload.
- // Deprecated: Data will be removed in the next release.
- Data []byte
// Length is the size of the uncompressed payload data. Does not include any
// framing (gRPC or HTTP/2).
Length int
diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go
index 8051ef5b514a3..bb2b2a216ce24 100644
--- a/vendor/google.golang.org/grpc/stream.go
+++ b/vendor/google.golang.org/grpc/stream.go
@@ -41,6 +41,7 @@ import (
"google.golang.org/grpc/internal/serviceconfig"
istatus "google.golang.org/grpc/internal/status"
"google.golang.org/grpc/internal/transport"
+ "google.golang.org/grpc/mem"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
@@ -359,7 +360,7 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client
cs.attempt = a
return nil
}
- if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil {
+ if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op, nil) }); err != nil {
return nil, err
}
@@ -517,7 +518,7 @@ func (a *csAttempt) newStream() error {
}
a.s = s
a.ctx = s.Context()
- a.p = &parser{r: s, recvBufferPool: a.cs.cc.dopts.recvBufferPool}
+ a.p = &parser{r: s, bufferPool: a.cs.cc.dopts.copts.BufferPool}
return nil
}
@@ -566,10 +567,15 @@ type clientStream struct {
// place where we need to check if the attempt is nil.
attempt *csAttempt
// TODO(hedging): hedging will have multiple attempts simultaneously.
- committed bool // active attempt committed for retry?
- onCommit func()
- buffer []func(a *csAttempt) error // operations to replay on retry
- bufferSize int // current size of buffer
+ committed bool // active attempt committed for retry?
+ onCommit func()
+ replayBuffer []replayOp // operations to replay on retry
+ replayBufferSize int // current size of replayBuffer
+}
+
+type replayOp struct {
+ op func(a *csAttempt) error
+ cleanup func()
}
// csAttempt implements a single transport stream attempt within a
@@ -607,7 +613,12 @@ func (cs *clientStream) commitAttemptLocked() {
cs.onCommit()
}
cs.committed = true
- cs.buffer = nil
+ for _, op := range cs.replayBuffer {
+ if op.cleanup != nil {
+ op.cleanup()
+ }
+ }
+ cs.replayBuffer = nil
}
func (cs *clientStream) commitAttempt() {
@@ -732,7 +743,7 @@ func (cs *clientStream) retryLocked(attempt *csAttempt, lastErr error) error {
// the stream is canceled.
return err
}
- // Note that the first op in the replay buffer always sets cs.attempt
+ // Note that the first op in replayBuffer always sets cs.attempt
// if it is able to pick a transport and create a stream.
if lastErr = cs.replayBufferLocked(attempt); lastErr == nil {
return nil
@@ -761,7 +772,7 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func())
// already be status errors.
return toRPCErr(op(cs.attempt))
}
- if len(cs.buffer) == 0 {
+ if len(cs.replayBuffer) == 0 {
// For the first op, which controls creation of the stream and
// assigns cs.attempt, we need to create a new attempt inline
// before executing the first op. On subsequent ops, the attempt
@@ -851,25 +862,26 @@ func (cs *clientStream) Trailer() metadata.MD {
}
func (cs *clientStream) replayBufferLocked(attempt *csAttempt) error {
- for _, f := range cs.buffer {
- if err := f(attempt); err != nil {
+ for _, f := range cs.replayBuffer {
+ if err := f.op(attempt); err != nil {
return err
}
}
return nil
}
-func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error) {
+func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error, cleanup func()) {
// Note: we still will buffer if retry is disabled (for transparent retries).
if cs.committed {
return
}
- cs.bufferSize += sz
- if cs.bufferSize > cs.callInfo.maxRetryRPCBufferSize {
+ cs.replayBufferSize += sz
+ if cs.replayBufferSize > cs.callInfo.maxRetryRPCBufferSize {
cs.commitAttemptLocked()
+ cleanup()
return
}
- cs.buffer = append(cs.buffer, op)
+ cs.replayBuffer = append(cs.replayBuffer, replayOp{op: op, cleanup: cleanup})
}
func (cs *clientStream) SendMsg(m any) (err error) {
@@ -891,23 +903,50 @@ func (cs *clientStream) SendMsg(m any) (err error) {
}
// load hdr, payload, data
- hdr, payload, data, err := prepareMsg(m, cs.codec, cs.cp, cs.comp)
+ hdr, data, payload, pf, err := prepareMsg(m, cs.codec, cs.cp, cs.comp, cs.cc.dopts.copts.BufferPool)
if err != nil {
return err
}
+ defer func() {
+ data.Free()
+ // only free payload if compression was made, and therefore it is a different set
+ // of buffers from data.
+ if pf.isCompressed() {
+ payload.Free()
+ }
+ }()
+
+ dataLen := data.Len()
+ payloadLen := payload.Len()
// TODO(dfawley): should we be checking len(data) instead?
- if len(payload) > *cs.callInfo.maxSendMessageSize {
- return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize)
+ if payloadLen > *cs.callInfo.maxSendMessageSize {
+ return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, *cs.callInfo.maxSendMessageSize)
}
+
+ // always take an extra ref in case data == payload (i.e. when the data isn't
+ // compressed). The original ref will always be freed by the deferred free above.
+ payload.Ref()
op := func(a *csAttempt) error {
- return a.sendMsg(m, hdr, payload, data)
+ return a.sendMsg(m, hdr, payload, dataLen, payloadLen)
+ }
+
+ // onSuccess is invoked when the op is captured for a subsequent retry. If the
+ // stream was established by a previous message and therefore retries are
+ // disabled, onSuccess will not be invoked, and payloadRef can be freed
+ // immediately.
+ onSuccessCalled := false
+ err = cs.withRetry(op, func() {
+ cs.bufferForRetryLocked(len(hdr)+payloadLen, op, payload.Free)
+ onSuccessCalled = true
+ })
+ if !onSuccessCalled {
+ payload.Free()
}
- err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) })
if len(cs.binlogs) != 0 && err == nil {
cm := &binarylog.ClientMessage{
OnClientSide: true,
- Message: data,
+ Message: data.Materialize(),
}
for _, binlog := range cs.binlogs {
binlog.Log(cs.ctx, cm)
@@ -924,6 +963,7 @@ func (cs *clientStream) RecvMsg(m any) error {
var recvInfo *payloadInfo
if len(cs.binlogs) != 0 {
recvInfo = &payloadInfo{}
+ defer recvInfo.free()
}
err := cs.withRetry(func(a *csAttempt) error {
return a.recvMsg(m, recvInfo)
@@ -931,7 +971,7 @@ func (cs *clientStream) RecvMsg(m any) error {
if len(cs.binlogs) != 0 && err == nil {
sm := &binarylog.ServerMessage{
OnClientSide: true,
- Message: recvInfo.uncompressedBytes,
+ Message: recvInfo.uncompressedBytes.Materialize(),
}
for _, binlog := range cs.binlogs {
binlog.Log(cs.ctx, sm)
@@ -958,7 +998,7 @@ func (cs *clientStream) CloseSend() error {
// RecvMsg. This also matches historical behavior.
return nil
}
- cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) })
+ cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op, nil) })
if len(cs.binlogs) != 0 {
chc := &binarylog.ClientHalfClose{
OnClientSide: true,
@@ -1034,7 +1074,7 @@ func (cs *clientStream) finish(err error) {
cs.cancel()
}
-func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error {
+func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength, payloadLength int) error {
cs := a.cs
if a.trInfo != nil {
a.mu.Lock()
@@ -1052,8 +1092,10 @@ func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error {
}
return io.EOF
}
- for _, sh := range a.statsHandlers {
- sh.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now()))
+ if len(a.statsHandlers) != 0 {
+ for _, sh := range a.statsHandlers {
+ sh.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now()))
+ }
}
if channelz.IsOn() {
a.t.IncrMsgSent()
@@ -1065,6 +1107,7 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) {
cs := a.cs
if len(a.statsHandlers) != 0 && payInfo == nil {
payInfo = &payloadInfo{}
+ defer payInfo.free()
}
if !a.decompSet {
@@ -1083,8 +1126,7 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) {
// Only initialize this state once per stream.
a.decompSet = true
}
- err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp)
- if err != nil {
+ if err := recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp, false); err != nil {
if err == io.EOF {
if statusErr := a.s.Status().Err(); statusErr != nil {
return statusErr
@@ -1103,14 +1145,12 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) {
}
for _, sh := range a.statsHandlers {
sh.HandleRPC(a.ctx, &stats.InPayload{
- Client: true,
- RecvTime: time.Now(),
- Payload: m,
- // TODO truncate large payload.
- Data: payInfo.uncompressedBytes,
+ Client: true,
+ RecvTime: time.Now(),
+ Payload: m,
WireLength: payInfo.compressedLength + headerLen,
CompressedLength: payInfo.compressedLength,
- Length: len(payInfo.uncompressedBytes),
+ Length: payInfo.uncompressedBytes.Len(),
})
}
if channelz.IsOn() {
@@ -1122,14 +1162,12 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) {
}
// Special handling for non-server-stream rpcs.
// This recv expects EOF or errors, so we don't collect inPayload.
- err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp)
- if err == nil {
- return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want "))
- }
- if err == io.EOF {
+ if err := recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp, false); err == io.EOF {
return a.s.Status().Err() // non-server streaming Recv returns nil on success
+ } else if err != nil {
+ return toRPCErr(err)
}
- return toRPCErr(err)
+ return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want "))
}
func (a *csAttempt) finish(err error) {
@@ -1185,12 +1223,12 @@ func (a *csAttempt) finish(err error) {
a.mu.Unlock()
}
-// newClientStream creates a ClientStream with the specified transport, on the
+// newNonRetryClientStream creates a ClientStream with the specified transport, on the
// given addrConn.
//
// It's expected that the given transport is either the same one in addrConn, or
// is already closed. To avoid race, transport is specified separately, instead
-// of using ac.transpot.
+// of using ac.transport.
//
// Main difference between this and ClientConn.NewStream:
// - no retry
@@ -1276,7 +1314,7 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin
return nil, err
}
as.s = s
- as.p = &parser{r: s, recvBufferPool: ac.dopts.recvBufferPool}
+ as.p = &parser{r: s, bufferPool: ac.dopts.copts.BufferPool}
ac.incrCallsStarted()
if desc != unaryStreamDesc {
// Listen on stream context to cleanup when the stream context is
@@ -1373,17 +1411,26 @@ func (as *addrConnStream) SendMsg(m any) (err error) {
}
// load hdr, payload, data
- hdr, payld, _, err := prepareMsg(m, as.codec, as.cp, as.comp)
+ hdr, data, payload, pf, err := prepareMsg(m, as.codec, as.cp, as.comp, as.ac.dopts.copts.BufferPool)
if err != nil {
return err
}
+ defer func() {
+ data.Free()
+ // only free payload if compression was made, and therefore it is a different set
+ // of buffers from data.
+ if pf.isCompressed() {
+ payload.Free()
+ }
+ }()
+
// TODO(dfawley): should we be checking len(data) instead?
- if len(payld) > *as.callInfo.maxSendMessageSize {
- return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payld), *as.callInfo.maxSendMessageSize)
+ if payload.Len() > *as.callInfo.maxSendMessageSize {
+ return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payload.Len(), *as.callInfo.maxSendMessageSize)
}
- if err := as.t.Write(as.s, hdr, payld, &transport.Options{Last: !as.desc.ClientStreams}); err != nil {
+ if err := as.t.Write(as.s, hdr, payload, &transport.Options{Last: !as.desc.ClientStreams}); err != nil {
if !as.desc.ClientStreams {
// For non-client-streaming RPCs, we return nil instead of EOF on error
// because the generated code requires it. finish is not called; RecvMsg()
@@ -1423,8 +1470,7 @@ func (as *addrConnStream) RecvMsg(m any) (err error) {
// Only initialize this state once per stream.
as.decompSet = true
}
- err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp)
- if err != nil {
+ if err := recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp, false); err != nil {
if err == io.EOF {
if statusErr := as.s.Status().Err(); statusErr != nil {
return statusErr
@@ -1444,14 +1490,12 @@ func (as *addrConnStream) RecvMsg(m any) (err error) {
// Special handling for non-server-stream rpcs.
// This recv expects EOF or errors, so we don't collect inPayload.
- err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp)
- if err == nil {
- return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want "))
- }
- if err == io.EOF {
+ if err := recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp, false); err == io.EOF {
return as.s.Status().Err() // non-server streaming Recv returns nil on success
+ } else if err != nil {
+ return toRPCErr(err)
}
- return toRPCErr(err)
+ return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want "))
}
func (as *addrConnStream) finish(err error) {
@@ -1645,18 +1689,31 @@ func (ss *serverStream) SendMsg(m any) (err error) {
}
// load hdr, payload, data
- hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp)
+ hdr, data, payload, pf, err := prepareMsg(m, ss.codec, ss.cp, ss.comp, ss.p.bufferPool)
if err != nil {
return err
}
+ defer func() {
+ data.Free()
+ // only free payload if compression was made, and therefore it is a different set
+ // of buffers from data.
+ if pf.isCompressed() {
+ payload.Free()
+ }
+ }()
+
+ dataLen := data.Len()
+ payloadLen := payload.Len()
+
// TODO(dfawley): should we be checking len(data) instead?
- if len(payload) > ss.maxSendMessageSize {
- return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize)
+ if payloadLen > ss.maxSendMessageSize {
+ return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, ss.maxSendMessageSize)
}
if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil {
return toRPCErr(err)
}
+
if len(ss.binlogs) != 0 {
if !ss.serverHeaderBinlogged {
h, _ := ss.s.Header()
@@ -1669,7 +1726,7 @@ func (ss *serverStream) SendMsg(m any) (err error) {
}
}
sm := &binarylog.ServerMessage{
- Message: data,
+ Message: data.Materialize(),
}
for _, binlog := range ss.binlogs {
binlog.Log(ss.ctx, sm)
@@ -1677,7 +1734,7 @@ func (ss *serverStream) SendMsg(m any) (err error) {
}
if len(ss.statsHandler) != 0 {
for _, sh := range ss.statsHandler {
- sh.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now()))
+ sh.HandleRPC(ss.s.Context(), outPayload(false, m, dataLen, payloadLen, time.Now()))
}
}
return nil
@@ -1714,8 +1771,9 @@ func (ss *serverStream) RecvMsg(m any) (err error) {
var payInfo *payloadInfo
if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 {
payInfo = &payloadInfo{}
+ defer payInfo.free()
}
- if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil {
+ if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp, true); err != nil {
if err == io.EOF {
if len(ss.binlogs) != 0 {
chc := &binarylog.ClientHalfClose{}
@@ -1733,11 +1791,9 @@ func (ss *serverStream) RecvMsg(m any) (err error) {
if len(ss.statsHandler) != 0 {
for _, sh := range ss.statsHandler {
sh.HandleRPC(ss.s.Context(), &stats.InPayload{
- RecvTime: time.Now(),
- Payload: m,
- // TODO truncate large payload.
- Data: payInfo.uncompressedBytes,
- Length: len(payInfo.uncompressedBytes),
+ RecvTime: time.Now(),
+ Payload: m,
+ Length: payInfo.uncompressedBytes.Len(),
WireLength: payInfo.compressedLength + headerLen,
CompressedLength: payInfo.compressedLength,
})
@@ -1745,7 +1801,7 @@ func (ss *serverStream) RecvMsg(m any) (err error) {
}
if len(ss.binlogs) != 0 {
cm := &binarylog.ClientMessage{
- Message: payInfo.uncompressedBytes,
+ Message: payInfo.uncompressedBytes.Materialize(),
}
for _, binlog := range ss.binlogs {
binlog.Log(ss.ctx, cm)
@@ -1760,23 +1816,26 @@ func MethodFromServerStream(stream ServerStream) (string, bool) {
return Method(stream.Context())
}
-// prepareMsg returns the hdr, payload and data
-// using the compressors passed or using the
-// passed preparedmsg
-func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) {
+// prepareMsg returns the hdr, payload and data using the compressors passed or
+// using the passed preparedmsg. The returned boolean indicates whether
+// compression was made and therefore whether the payload needs to be freed in
+// addition to the returned data. Freeing the payload if the returned boolean is
+// false can lead to undefined behavior.
+func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor, pool mem.BufferPool) (hdr []byte, data, payload mem.BufferSlice, pf payloadFormat, err error) {
if preparedMsg, ok := m.(*PreparedMsg); ok {
- return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil
+ return preparedMsg.hdr, preparedMsg.encodedData, preparedMsg.payload, preparedMsg.pf, nil
}
// The input interface is not a prepared msg.
// Marshal and Compress the data at this point
data, err = encode(codec, m)
if err != nil {
- return nil, nil, nil, err
+ return nil, nil, nil, 0, err
}
- compData, err := compress(data, cp, comp)
+ compData, pf, err := compress(data, cp, comp, pool)
if err != nil {
- return nil, nil, nil, err
+ data.Free()
+ return nil, nil, nil, 0, err
}
- hdr, payload = msgHeader(data, compData)
- return hdr, payload, data, nil
+ hdr, payload = msgHeader(data, compData, pf)
+ return hdr, data, payload, pf, nil
}
diff --git a/vendor/google.golang.org/grpc/stream_interfaces.go b/vendor/google.golang.org/grpc/stream_interfaces.go
index 8b813529c0cc5..0037fee0bd71a 100644
--- a/vendor/google.golang.org/grpc/stream_interfaces.go
+++ b/vendor/google.golang.org/grpc/stream_interfaces.go
@@ -22,15 +22,35 @@ package grpc
// request, many responses) RPC. It is generic over the type of the response
// message. It is used in generated code.
type ServerStreamingClient[Res any] interface {
+ // Recv receives the next response message from the server. The client may
+ // repeatedly call Recv to read messages from the response stream. If
+ // io.EOF is returned, the stream has terminated with an OK status. Any
+ // other error is compatible with the status package and indicates the
+ // RPC's status code and message.
Recv() (*Res, error)
+
+ // ClientStream is embedded to provide Context, Header, and Trailer
+ // functionality. No other methods in the ClientStream should be called
+ // directly.
ClientStream
}
// ServerStreamingServer represents the server side of a server-streaming (one
// request, many responses) RPC. It is generic over the type of the response
// message. It is used in generated code.
+//
+// To terminate the response stream, return from the handler method and return
+// an error from the status package, or use nil to indicate an OK status code.
type ServerStreamingServer[Res any] interface {
+ // Send sends a response message to the client. The server handler may
+ // call Send multiple times to send multiple messages to the client. An
+ // error is returned if the stream was terminated unexpectedly, and the
+ // handler method should return, as the stream is no longer usable.
Send(*Res) error
+
+ // ServerStream is embedded to provide Context, SetHeader, SendHeader, and
+ // SetTrailer functionality. No other methods in the ServerStream should
+ // be called directly.
ServerStream
}
@@ -39,8 +59,22 @@ type ServerStreamingServer[Res any] interface {
// message stream and the type of the unary response message. It is used in
// generated code.
type ClientStreamingClient[Req any, Res any] interface {
+ // Send sends a request message to the server. The client may call Send
+ // multiple times to send multiple messages to the server. On error, Send
+ // aborts the stream. If the error was generated by the client, the status
+ // is returned directly. Otherwise, io.EOF is returned, and the status of
+ // the stream may be discovered using CloseAndRecv().
Send(*Req) error
+
+ // CloseAndRecv closes the request stream and waits for the server's
+ // response. This method must be called once and only once after sending
+ // all request messages. Any error returned is implemented by the status
+ // package.
CloseAndRecv() (*Res, error)
+
+ // ClientStream is embedded to provide Context, Header, and Trailer
+ // functionality. No other methods in the ClientStream should be called
+ // directly.
ClientStream
}
@@ -48,9 +82,28 @@ type ClientStreamingClient[Req any, Res any] interface {
// requests, one response) RPC. It is generic over both the type of the request
// message stream and the type of the unary response message. It is used in
// generated code.
+//
+// To terminate the RPC, call SendAndClose and return nil from the method
+// handler or do not call SendAndClose and return an error from the status
+// package.
type ClientStreamingServer[Req any, Res any] interface {
+ // Recv receives the next request message from the client. The server may
+ // repeatedly call Recv to read messages from the request stream. If
+ // io.EOF is returned, it indicates the client called CloseAndRecv on its
+ // ClientStreamingClient. Any other error indicates the stream was
+ // terminated unexpectedly, and the handler method should return, as the
+ // stream is no longer usable.
Recv() (*Req, error)
+
+ // SendAndClose sends a single response message to the client and closes
+ // the stream. This method must be called once and only once after all
+ // request messages have been processed. Recv should not be called after
+ // calling SendAndClose.
SendAndClose(*Res) error
+
+ // ServerStream is embedded to provide Context, SetHeader, SendHeader, and
+ // SetTrailer functionality. No other methods in the ServerStream should
+ // be called directly.
ServerStream
}
@@ -59,8 +112,23 @@ type ClientStreamingServer[Req any, Res any] interface {
// request message stream and the type of the response message stream. It is
// used in generated code.
type BidiStreamingClient[Req any, Res any] interface {
+ // Send sends a request message to the server. The client may call Send
+ // multiple times to send multiple messages to the server. On error, Send
+ // aborts the stream. If the error was generated by the client, the status
+ // is returned directly. Otherwise, io.EOF is returned, and the status of
+ // the stream may be discovered using Recv().
Send(*Req) error
+
+ // Recv receives the next response message from the server. The client may
+ // repeatedly call Recv to read messages from the response stream. If
+ // io.EOF is returned, the stream has terminated with an OK status. Any
+ // other error is compatible with the status package and indicates the
+ // RPC's status code and message.
Recv() (*Res, error)
+
+ // ClientStream is embedded to provide Context, Header, Trailer, and
+ // CloseSend functionality. No other methods in the ClientStream should be
+ // called directly.
ClientStream
}
@@ -68,9 +136,27 @@ type BidiStreamingClient[Req any, Res any] interface {
// (many requests, many responses) RPC. It is generic over both the type of the
// request message stream and the type of the response message stream. It is
// used in generated code.
+//
+// To terminate the stream, return from the handler method and return
+// an error from the status package, or use nil to indicate an OK status code.
type BidiStreamingServer[Req any, Res any] interface {
+ // Recv receives the next request message from the client. The server may
+ // repeatedly call Recv to read messages from the request stream. If
+ // io.EOF is returned, it indicates the client called CloseSend on its
+ // BidiStreamingClient. Any other error indicates the stream was
+ // terminated unexpectedly, and the handler method should return, as the
+ // stream is no longer usable.
Recv() (*Req, error)
+
+ // Send sends a response message to the client. The server handler may
+ // call Send multiple times to send multiple messages to the client. An
+ // error is returned if the stream was terminated unexpectedly, and the
+ // handler method should return, as the stream is no longer usable.
Send(*Res) error
+
+ // ServerStream is embedded to provide Context, SetHeader, SendHeader, and
+ // SetTrailer functionality. No other methods in the ServerStream should
+ // be called directly.
ServerStream
}
diff --git a/vendor/google.golang.org/grpc/test/bufconn/bufconn.go b/vendor/google.golang.org/grpc/test/bufconn/bufconn.go
index 3f77f4876eb86..e6eb4feebb99f 100644
--- a/vendor/google.golang.org/grpc/test/bufconn/bufconn.go
+++ b/vendor/google.golang.org/grpc/test/bufconn/bufconn.go
@@ -109,7 +109,7 @@ type pipe struct {
mu sync.Mutex
// buf contains the data in the pipe. It is a ring buffer of fixed capacity,
- // with r and w pointing to the offset to read and write, respsectively.
+ // with r and w pointing to the offset to read and write, respectively.
//
// Data is read between [r, w) and written to [w, r), wrapping around the end
// of the slice if necessary.
diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go
index bafaef99be989..a96b6a6bff8e2 100644
--- a/vendor/google.golang.org/grpc/version.go
+++ b/vendor/google.golang.org/grpc/version.go
@@ -19,4 +19,4 @@
package grpc
// Version is the current grpc version.
-const Version = "1.65.0"
+const Version = "1.67.1"
diff --git a/vendor/google.golang.org/grpc/xds/bootstrap/credentials.go b/vendor/google.golang.org/grpc/xds/bootstrap/credentials.go
index cb022b45de186..578e1278970d4 100644
--- a/vendor/google.golang.org/grpc/xds/bootstrap/credentials.go
+++ b/vendor/google.golang.org/grpc/xds/bootstrap/credentials.go
@@ -58,7 +58,7 @@ func (t *tlsCredsBuilder) Name() string {
}
// googleDefaultCredsBuilder implements the `Credentials` interface defined in
-// package `xds/boostrap` and encapsulates a Google Default credential.
+// package `xds/bootstrap` and encapsulates a Google Default credential.
type googleDefaultCredsBuilder struct{}
func (d *googleDefaultCredsBuilder) Build(json.RawMessage) (credentials.Bundle, func(), error) {
diff --git a/vendor/google.golang.org/grpc/xds/csds/csds.go b/vendor/google.golang.org/grpc/xds/csds/csds.go
index 6266f60e86d94..3d8398a72ff09 100644
--- a/vendor/google.golang.org/grpc/xds/csds/csds.go
+++ b/vendor/google.golang.org/grpc/xds/csds/csds.go
@@ -27,7 +27,6 @@ import (
"context"
"fmt"
"io"
- "sync"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
@@ -55,22 +54,14 @@ func prefixLogger(s *ClientStatusDiscoveryServer) *internalgrpclog.PrefixLogger
// https://github.com/grpc/proposal/blob/master/A40-csds-support.md.
type ClientStatusDiscoveryServer struct {
logger *internalgrpclog.PrefixLogger
-
- mu sync.Mutex
- xdsClient xdsclient.XDSClient
- xdsClientClose func()
}
// NewClientStatusDiscoveryServer returns an implementation of the CSDS server
// that can be registered on a gRPC server.
func NewClientStatusDiscoveryServer() (*ClientStatusDiscoveryServer, error) {
- c, close, err := xdsclient.New()
- if err != nil {
- logger.Warningf("Failed to create xDS client: %v", err)
- }
- s := &ClientStatusDiscoveryServer{xdsClient: c, xdsClientClose: close}
+ s := &ClientStatusDiscoveryServer{}
s.logger = prefixLogger(s)
- s.logger.Infof("Created CSDS server, with xdsClient %p", c)
+ s.logger.Infof("Created CSDS server")
return s, nil
}
@@ -104,24 +95,14 @@ func (s *ClientStatusDiscoveryServer) FetchClientStatus(_ context.Context, req *
//
// If it returns an error, the error is a status error.
func (s *ClientStatusDiscoveryServer) buildClientStatusRespForReq(req *v3statuspb.ClientStatusRequest) (*v3statuspb.ClientStatusResponse, error) {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- if s.xdsClient == nil {
- return &v3statuspb.ClientStatusResponse{}, nil
- }
// Field NodeMatchers is unsupported, by design
// https://github.com/grpc/proposal/blob/master/A40-csds-support.md#detail-node-matching.
if len(req.NodeMatchers) != 0 {
return nil, status.Errorf(codes.InvalidArgument, "node_matchers are not supported, request contains node_matchers: %v", req.NodeMatchers)
}
- return s.xdsClient.DumpResources()
+ return xdsclient.DumpResources(), nil
}
// Close cleans up the resources.
-func (s *ClientStatusDiscoveryServer) Close() {
- if s.xdsClientClose != nil {
- s.xdsClientClose()
- }
-}
+func (s *ClientStatusDiscoveryServer) Close() {}
diff --git a/vendor/google.golang.org/grpc/xds/googledirectpath/googlec2p.go b/vendor/google.golang.org/grpc/xds/googledirectpath/googlec2p.go
index 6ab7fb03f2dcf..936bf2da32742 100644
--- a/vendor/google.golang.org/grpc/xds/googledirectpath/googlec2p.go
+++ b/vendor/google.golang.org/grpc/xds/googledirectpath/googlec2p.go
@@ -26,6 +26,7 @@
package googledirectpath
import (
+ "encoding/json"
"fmt"
"math/rand"
"net/url"
@@ -37,7 +38,6 @@ import (
internalgrpclog "google.golang.org/grpc/internal/grpclog"
"google.golang.org/grpc/internal/xds/bootstrap"
"google.golang.org/grpc/resolver"
- "google.golang.org/grpc/xds/internal/xdsclient"
_ "google.golang.org/grpc/xds" // To register xds resolvers and balancers.
)
@@ -46,30 +46,21 @@ const (
c2pScheme = "google-c2p"
c2pAuthority = "traffic-director-c2p.xds.googleapis.com"
- tdURL = "dns:///directpath-pa.googleapis.com"
- httpReqTimeout = 10 * time.Second
- zoneURL = "http://metadata.google.internal/computeMetadata/v1/instance/zone"
- ipv6URL = "http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ipv6s"
-
- gRPCUserAgentName = "gRPC Go"
- clientFeatureNoOverprovisioning = "envoy.lb.does_not_support_overprovisioning"
- clientFeatureResourceWrapper = "xds.config.resource-in-sotw"
- ipv6CapableMetadataName = "TRAFFICDIRECTOR_DIRECTPATH_C2P_IPV6_CAPABLE"
-
- logPrefix = "[google-c2p-resolver]"
+ tdURL = "dns:///directpath-pa.googleapis.com"
+ zoneURL = "http://metadata.google.internal/computeMetadata/v1/instance/zone"
+ ipv6URL = "http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ipv6s"
+ ipv6CapableMetadataName = "TRAFFICDIRECTOR_DIRECTPATH_C2P_IPV6_CAPABLE"
+ httpReqTimeout = 10 * time.Second
+ logPrefix = "[google-c2p-resolver]"
dnsName, xdsName = "dns", "xds"
)
// For overriding in unittests.
var (
- onGCE = googlecloud.OnGCE
-
- newClientWithConfig = func(config *bootstrap.Config) (xdsclient.XDSClient, func(), error) {
- return xdsclient.NewWithConfig(config)
- }
-
- logger = internalgrpclog.NewPrefixLogger(grpclog.Component("directpath"), logPrefix)
+ onGCE = googlecloud.OnGCE
+ randInt = rand.Int
+ logger = internalgrpclog.NewPrefixLogger(grpclog.Component("directpath"), logPrefix)
)
func init() {
@@ -108,23 +99,18 @@ func (c2pResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, opts
xdsServerCfg := newXdsServerConfig(xdsServerURI)
authoritiesCfg := newAuthoritiesConfig(xdsServerCfg)
- config, err := bootstrap.NewConfigFromContents([]byte(fmt.Sprintf(`
- {
- "xds_servers": [%s],
- "client_default_listener_resource_name_template": "%%s",
- "authorities": %s,
- "node": %s
- }`, xdsServerCfg, authoritiesCfg, nodeCfg)))
-
- if err != nil {
- return nil, fmt.Errorf("failed to build bootstrap configuration: %v", err)
+ cfg := map[string]any{
+ "xds_servers": []any{xdsServerCfg},
+ "client_default_listener_resource_name_template": "%s",
+ "authorities": authoritiesCfg,
+ "node": nodeCfg,
}
-
- // Create singleton xds client with this config. The xds client will be
- // used by the xds resolver later.
- _, close, err := newClientWithConfig(config)
+ cfgJSON, err := json.Marshal(cfg)
if err != nil {
- return nil, fmt.Errorf("failed to start xDS client: %v", err)
+ return nil, fmt.Errorf("failed to marshal bootstrap configuration: %v", err)
+ }
+ if err := bootstrap.SetFallbackBootstrapConfig(cfgJSON); err != nil {
+ return nil, fmt.Errorf("failed to set fallback bootstrap configuration: %v", err)
}
t = resolver.Target{
@@ -134,66 +120,36 @@ func (c2pResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, opts
Path: t.URL.Path,
},
}
- xdsR, err := resolver.Get(xdsName).Build(t, cc, opts)
- if err != nil {
- close()
- return nil, err
- }
- return &c2pResolver{
- Resolver: xdsR,
- clientCloseFunc: close,
- }, nil
+ return resolver.Get(xdsName).Build(t, cc, opts)
}
func (b c2pResolverBuilder) Scheme() string {
return c2pScheme
}
-type c2pResolver struct {
- resolver.Resolver
- clientCloseFunc func()
-}
-
-func (r *c2pResolver) Close() {
- r.Resolver.Close()
- r.clientCloseFunc()
-}
-
-var id = fmt.Sprintf("C2P-%d", rand.Int())
-
-func newNodeConfig(zone string, ipv6Capable bool) string {
- metadata := ""
+func newNodeConfig(zone string, ipv6Capable bool) map[string]any {
+ node := map[string]any{
+ "id": fmt.Sprintf("C2P-%d", randInt()),
+ "locality": map[string]any{"zone": zone},
+ }
if ipv6Capable {
- metadata = fmt.Sprintf(`, "metadata": { "%s": true }`, ipv6CapableMetadataName)
+ node["metadata"] = map[string]any{ipv6CapableMetadataName: true}
}
-
- return fmt.Sprintf(`
- {
- "id": "%s",
- "locality": {
- "zone": "%s"
- }
- %s
- }`, id, zone, metadata)
+ return node
}
-func newAuthoritiesConfig(xdsServer string) string {
- return fmt.Sprintf(`
- {
- "%s": {
- "xds_servers": [%s]
- }
+func newAuthoritiesConfig(serverCfg map[string]any) map[string]any {
+ return map[string]any{
+ c2pAuthority: map[string]any{"xds_servers": []any{serverCfg}},
}
- `, c2pAuthority, xdsServer)
}
-func newXdsServerConfig(xdsServerURI string) string {
- return fmt.Sprintf(`
- {
- "server_uri": "%s",
- "channel_creds": [{"type": "google_default"}],
- "server_features": ["xds_v3", "ignore_resource_deletion", "xds.config.resource-in-sotw"]
- }`, xdsServerURI)
+func newXdsServerConfig(uri string) map[string]any {
+ return map[string]any{
+ "server_uri": uri,
+ "channel_creds": []map[string]any{{"type": "google_default"}},
+ "server_features": []any{"ignore_resource_deletion"},
+ }
}
// runDirectPath returns whether this resolver should use direct path.
diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cdsbalancer.go b/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cdsbalancer.go
index 8e97e104ed4b1..9a112e276977d 100644
--- a/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cdsbalancer.go
+++ b/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cdsbalancer.go
@@ -207,7 +207,7 @@ func (b *cdsBalancer) handleSecurityConfig(config *xdsresource.SecurityConfig) e
}
// A root provider is required whether we are using TLS or mTLS.
- cpc := b.xdsClient.BootstrapConfig().CertProviderConfigs
+ cpc := b.xdsClient.BootstrapConfig().CertProviderConfigs()
rootProvider, err := buildProvider(cpc, config.RootInstanceName, config.RootCertName, false, true)
if err != nil {
return err
@@ -309,8 +309,8 @@ func (b *cdsBalancer) UpdateClientConnState(state balancer.ClientConnState) erro
b.lbCfg = lbCfg
// Handle the update in a blocking fashion.
- done := make(chan struct{})
- ok = b.serializer.Schedule(func(context.Context) {
+ errCh := make(chan error, 1)
+ callback := func(context.Context) {
// A config update with a changed top-level cluster name means that none
// of our old watchers make any sense any more.
b.closeAllWatchers()
@@ -319,20 +319,20 @@ func (b *cdsBalancer) UpdateClientConnState(state balancer.ClientConnState) erro
// could end up creating more watchers if turns out to be an aggregate
// cluster.
b.createAndAddWatcherForCluster(lbCfg.ClusterName)
- close(done)
- })
- if !ok {
+ errCh <- nil
+ }
+ onFailure := func() {
// The call to Schedule returns false *only* if the serializer has been
// closed, which happens only when we receive an update after close.
- return errBalancerClosed
+ errCh <- errBalancerClosed
}
- <-done
- return nil
+ b.serializer.ScheduleOr(callback, onFailure)
+ return <-errCh
}
// ResolverError handles errors reported by the xdsResolver.
func (b *cdsBalancer) ResolverError(err error) {
- b.serializer.Schedule(func(context.Context) {
+ b.serializer.TrySchedule(func(context.Context) {
// Resource not found error is reported by the resolver when the
// top-level cluster resource is removed by the management server.
if xdsresource.ErrType(err) == xdsresource.ErrorTypeResourceNotFound {
@@ -351,7 +351,7 @@ func (b *cdsBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Sub
b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", sc, state)
}
-// Closes all registered cluster wathers and removes them from the internal map.
+// Closes all registered cluster watchers and removes them from the internal map.
//
// Only executed in the context of a serializer callback.
func (b *cdsBalancer) closeAllWatchers() {
@@ -364,7 +364,7 @@ func (b *cdsBalancer) closeAllWatchers() {
// Close cancels the CDS watch, closes the child policy and closes the
// cdsBalancer.
func (b *cdsBalancer) Close() {
- b.serializer.Schedule(func(ctx context.Context) {
+ b.serializer.TrySchedule(func(context.Context) {
b.closeAllWatchers()
if b.childLB != nil {
@@ -384,7 +384,7 @@ func (b *cdsBalancer) Close() {
}
func (b *cdsBalancer) ExitIdle() {
- b.serializer.Schedule(func(context.Context) {
+ b.serializer.TrySchedule(func(context.Context) {
if b.childLB == nil {
b.logger.Warningf("Received ExitIdle with no child policy")
return
diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_watcher.go b/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_watcher.go
index 0b0d168376d74..835461d0997bc 100644
--- a/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_watcher.go
+++ b/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_watcher.go
@@ -32,22 +32,19 @@ type clusterWatcher struct {
parent *cdsBalancer
}
-func (cw *clusterWatcher) OnUpdate(u *xdsresource.ClusterResourceData) {
- cw.parent.serializer.Schedule(func(context.Context) {
- cw.parent.onClusterUpdate(cw.name, u.Resource)
- })
+func (cw *clusterWatcher) OnUpdate(u *xdsresource.ClusterResourceData, onDone xdsresource.OnDoneFunc) {
+ handleUpdate := func(context.Context) { cw.parent.onClusterUpdate(cw.name, u.Resource); onDone() }
+ cw.parent.serializer.ScheduleOr(handleUpdate, onDone)
}
-func (cw *clusterWatcher) OnError(err error) {
- cw.parent.serializer.Schedule(func(context.Context) {
- cw.parent.onClusterError(cw.name, err)
- })
+func (cw *clusterWatcher) OnError(err error, onDone xdsresource.OnDoneFunc) {
+ handleError := func(context.Context) { cw.parent.onClusterError(cw.name, err); onDone() }
+ cw.parent.serializer.ScheduleOr(handleError, onDone)
}
-func (cw *clusterWatcher) OnResourceDoesNotExist() {
- cw.parent.serializer.Schedule(func(context.Context) {
- cw.parent.onClusterResourceNotFound(cw.name)
- })
+func (cw *clusterWatcher) OnResourceDoesNotExist(onDone xdsresource.OnDoneFunc) {
+ handleNotFound := func(context.Context) { cw.parent.onClusterResourceNotFound(cw.name); onDone() }
+ cw.parent.serializer.ScheduleOr(handleNotFound, onDone)
}
// watcherState groups the state associated with a clusterWatcher.
diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/clusterimpl.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/clusterimpl.go
index 164f3099d2805..0dc71dfedebc5 100644
--- a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/clusterimpl.go
+++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/clusterimpl.go
@@ -24,6 +24,7 @@
package clusterimpl
import (
+ "context"
"encoding/json"
"fmt"
"sync"
@@ -31,8 +32,8 @@ import (
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/connectivity"
+ "google.golang.org/grpc/internal"
"google.golang.org/grpc/internal/balancer/gracefulswitch"
- "google.golang.org/grpc/internal/buffer"
"google.golang.org/grpc/internal/grpclog"
"google.golang.org/grpc/internal/grpcsync"
"google.golang.org/grpc/internal/pretty"
@@ -52,6 +53,11 @@ const (
defaultRequestCountMax = 1024
)
+var (
+ connectedAddress = internal.ConnectedAddress.(func(balancer.SubConnState) resolver.Address)
+ errBalancerClosed = fmt.Errorf("%s LB policy is closed", Name)
+)
+
func init() {
balancer.Register(bb{})
}
@@ -59,18 +65,17 @@ func init() {
type bb struct{}
func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer {
+ ctx, cancel := context.WithCancel(context.Background())
b := &clusterImplBalancer{
- ClientConn: cc,
- bOpts: bOpts,
- closed: grpcsync.NewEvent(),
- done: grpcsync.NewEvent(),
- loadWrapper: loadstore.NewWrapper(),
- pickerUpdateCh: buffer.NewUnbounded(),
- requestCountMax: defaultRequestCountMax,
+ ClientConn: cc,
+ bOpts: bOpts,
+ loadWrapper: loadstore.NewWrapper(),
+ requestCountMax: defaultRequestCountMax,
+ serializer: grpcsync.NewCallbackSerializer(ctx),
+ serializerCancel: cancel,
}
b.logger = prefixLogger(b)
b.child = gracefulswitch.NewBalancer(b, bOpts)
- go b.run()
b.logger.Infof("Created")
return b
}
@@ -86,18 +91,6 @@ func (bb) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, err
type clusterImplBalancer struct {
balancer.ClientConn
- // mu guarantees mutual exclusion between Close() and handling of picker
- // update to the parent ClientConn in run(). It's to make sure that the
- // run() goroutine doesn't send picker update to parent after the balancer
- // is closed.
- //
- // It's only used by the run() goroutine, but not the other exported
- // functions. Because the exported functions are guaranteed to be
- // synchronized with Close().
- mu sync.Mutex
- closed *grpcsync.Event
- done *grpcsync.Event
-
bOpts balancer.BuildOptions
logger *grpclog.PrefixLogger
xdsClient xdsclient.XDSClient
@@ -112,10 +105,11 @@ type clusterImplBalancer struct {
clusterNameMu sync.Mutex
clusterName string
+ serializer *grpcsync.CallbackSerializer
+ serializerCancel context.CancelFunc
+
// childState/drops/requestCounter keeps the state used by the most recently
- // generated picker. All fields can only be accessed in run(). And run() is
- // the only goroutine that sends picker to the parent ClientConn. All
- // requests to update picker need to be sent to pickerUpdateCh.
+ // generated picker.
childState balancer.State
dropCategories []DropConfig // The categories for drops.
drops []*dropper
@@ -124,7 +118,6 @@ type clusterImplBalancer struct {
requestCounter *xdsclient.ClusterRequestsCounter
requestCountMax uint32
telemetryLabels map[string]string
- pickerUpdateCh *buffer.Unbounded
}
// updateLoadStore checks the config for load store, and decides whether it
@@ -205,14 +198,9 @@ func (b *clusterImplBalancer) updateLoadStore(newConfig *LBConfig) error {
return nil
}
-func (b *clusterImplBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
- if b.closed.HasFired() {
- b.logger.Warningf("xds: received ClientConnState {%+v} after clusterImplBalancer was closed", s)
- return nil
- }
-
+func (b *clusterImplBalancer) updateClientConnState(s balancer.ClientConnState) error {
if b.logger.V(2) {
- b.logger.Infof("Received update from resolver, balancer config: %s", pretty.ToJSON(s.BalancerConfig))
+ b.logger.Infof("Received configuration: %s", pretty.ToJSON(s.BalancerConfig))
}
newConfig, ok := s.BalancerConfig.(*LBConfig)
if !ok {
@@ -224,7 +212,7 @@ func (b *clusterImplBalancer) UpdateClientConnState(s balancer.ClientConnState)
// it.
bb := balancer.Get(newConfig.ChildPolicy.Name)
if bb == nil {
- return fmt.Errorf("balancer %q not registered", newConfig.ChildPolicy.Name)
+ return fmt.Errorf("child policy %q not registered", newConfig.ChildPolicy.Name)
}
if b.xdsClient == nil {
@@ -250,9 +238,14 @@ func (b *clusterImplBalancer) UpdateClientConnState(s balancer.ClientConnState)
}
b.config = newConfig
- // Notify run() of this new config, in case drop and request counter need
- // update (which means a new picker needs to be generated).
- b.pickerUpdateCh.Put(newConfig)
+ b.telemetryLabels = newConfig.TelemetryLabels
+ dc := b.handleDropAndRequestCount(newConfig)
+ if dc != nil && b.childState.Picker != nil {
+ b.ClientConn.UpdateState(balancer.State{
+ ConnectivityState: b.childState.ConnectivityState,
+ Picker: b.newPicker(dc),
+ })
+ }
// Addresses and sub-balancer config are sent to sub-balancer.
return b.child.UpdateClientConnState(balancer.ClientConnState{
@@ -261,20 +254,28 @@ func (b *clusterImplBalancer) UpdateClientConnState(s balancer.ClientConnState)
})
}
-func (b *clusterImplBalancer) ResolverError(err error) {
- if b.closed.HasFired() {
- b.logger.Warningf("xds: received resolver error {%+v} after clusterImplBalancer was closed", err)
- return
+func (b *clusterImplBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
+ // Handle the update in a blocking fashion.
+ errCh := make(chan error, 1)
+ callback := func(context.Context) {
+ errCh <- b.updateClientConnState(s)
+ }
+ onFailure := func() {
+ // An attempt to schedule callback fails only when an update is received
+ // after Close().
+ errCh <- errBalancerClosed
}
- b.child.ResolverError(err)
+ b.serializer.ScheduleOr(callback, onFailure)
+ return <-errCh
}
-func (b *clusterImplBalancer) updateSubConnState(sc balancer.SubConn, s balancer.SubConnState, cb func(balancer.SubConnState)) {
- if b.closed.HasFired() {
- b.logger.Warningf("xds: received subconn state change {%+v, %+v} after clusterImplBalancer was closed", sc, s)
- return
- }
+func (b *clusterImplBalancer) ResolverError(err error) {
+ b.serializer.TrySchedule(func(context.Context) {
+ b.child.ResolverError(err)
+ })
+}
+func (b *clusterImplBalancer) updateSubConnState(_ balancer.SubConn, s balancer.SubConnState, cb func(balancer.SubConnState)) {
// Trigger re-resolution when a SubConn turns transient failure. This is
// necessary for the LogicalDNS in cluster_resolver policy to re-resolve.
//
@@ -296,26 +297,40 @@ func (b *clusterImplBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer
}
func (b *clusterImplBalancer) Close() {
- b.mu.Lock()
- b.closed.Fire()
- b.mu.Unlock()
-
- b.child.Close()
- b.childState = balancer.State{}
- b.pickerUpdateCh.Close()
- <-b.done.Done()
- b.logger.Infof("Shutdown")
+ b.serializer.TrySchedule(func(_ context.Context) {
+ b.child.Close()
+ b.childState = balancer.State{}
+
+ if b.cancelLoadReport != nil {
+ b.cancelLoadReport()
+ b.cancelLoadReport = nil
+ }
+ b.logger.Infof("Shutdown")
+ })
+ b.serializerCancel()
+ <-b.serializer.Done()
}
func (b *clusterImplBalancer) ExitIdle() {
- b.child.ExitIdle()
+ b.serializer.TrySchedule(func(context.Context) {
+ b.child.ExitIdle()
+ })
}
// Override methods to accept updates from the child LB.
func (b *clusterImplBalancer) UpdateState(state balancer.State) {
- // Instead of updating parent ClientConn inline, send state to run().
- b.pickerUpdateCh.Put(state)
+ b.serializer.TrySchedule(func(context.Context) {
+ b.childState = state
+ b.ClientConn.UpdateState(balancer.State{
+ ConnectivityState: b.childState.ConnectivityState,
+ Picker: b.newPicker(&dropConfigs{
+ drops: b.drops,
+ requestCounter: b.requestCounter,
+ requestCountMax: b.requestCountMax,
+ }),
+ })
+ })
}
func (b *clusterImplBalancer) setClusterName(n string) {
@@ -360,22 +375,37 @@ func (scw *scWrapper) localityID() xdsinternal.LocalityID {
func (b *clusterImplBalancer) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
clusterName := b.getClusterName()
newAddrs := make([]resolver.Address, len(addrs))
- var lID xdsinternal.LocalityID
for i, addr := range addrs {
newAddrs[i] = xds.SetXDSHandshakeClusterName(addr, clusterName)
- lID = xdsinternal.GetLocalityID(newAddrs[i])
}
var sc balancer.SubConn
+ scw := &scWrapper{}
oldListener := opts.StateListener
- opts.StateListener = func(state balancer.SubConnState) { b.updateSubConnState(sc, state, oldListener) }
+ opts.StateListener = func(state balancer.SubConnState) {
+ b.serializer.TrySchedule(func(context.Context) {
+ b.updateSubConnState(sc, state, oldListener)
+ if state.ConnectivityState != connectivity.Ready {
+ return
+ }
+ // Read connected address and call updateLocalityID() based on the connected
+ // address's locality. https://github.com/grpc/grpc-go/issues/7339
+ addr := connectedAddress(state)
+ lID := xdsinternal.GetLocalityID(addr)
+ if lID.Empty() {
+ if b.logger.V(2) {
+ b.logger.Infof("Locality ID for %s unexpectedly empty", addr)
+ }
+ return
+ }
+ scw.updateLocalityID(lID)
+ })
+ }
sc, err := b.ClientConn.NewSubConn(newAddrs, opts)
if err != nil {
return nil, err
}
- // Wrap this SubConn in a wrapper, and add it to the map.
- ret := &scWrapper{SubConn: sc}
- ret.updateLocalityID(lID)
- return ret, nil
+ scw.SubConn = sc
+ return scw, nil
}
func (b *clusterImplBalancer) RemoveSubConn(sc balancer.SubConn) {
@@ -448,49 +478,3 @@ func (b *clusterImplBalancer) handleDropAndRequestCount(newConfig *LBConfig) *dr
requestCountMax: b.requestCountMax,
}
}
-
-func (b *clusterImplBalancer) run() {
- defer b.done.Fire()
- for {
- select {
- case update, ok := <-b.pickerUpdateCh.Get():
- if !ok {
- return
- }
- b.pickerUpdateCh.Load()
- b.mu.Lock()
- if b.closed.HasFired() {
- b.mu.Unlock()
- return
- }
- switch u := update.(type) {
- case balancer.State:
- b.childState = u
- b.ClientConn.UpdateState(balancer.State{
- ConnectivityState: b.childState.ConnectivityState,
- Picker: b.newPicker(&dropConfigs{
- drops: b.drops,
- requestCounter: b.requestCounter,
- requestCountMax: b.requestCountMax,
- }),
- })
- case *LBConfig:
- b.telemetryLabels = u.TelemetryLabels
- dc := b.handleDropAndRequestCount(u)
- if dc != nil && b.childState.Picker != nil {
- b.ClientConn.UpdateState(balancer.State{
- ConnectivityState: b.childState.ConnectivityState,
- Picker: b.newPicker(dc),
- })
- }
- }
- b.mu.Unlock()
- case <-b.closed.Done():
- if b.cancelLoadReport != nil {
- b.cancelLoadReport()
- b.cancelLoadReport = nil
- }
- return
- }
- }
-}
diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/picker.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/picker.go
index d8cb8df1a81c7..fbadbb92ba39c 100644
--- a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/picker.go
+++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/picker.go
@@ -19,6 +19,8 @@
package clusterimpl
import (
+ "context"
+
v3orcapb "github.com/cncf/xds/go/xds/data/orca/v3"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/codes"
@@ -96,14 +98,23 @@ func (b *clusterImplBalancer) newPicker(config *dropConfigs) *picker {
}
}
+func telemetryLabels(ctx context.Context) map[string]string {
+ if ctx == nil {
+ return nil
+ }
+ labels := stats.GetLabels(ctx)
+ if labels == nil {
+ return nil
+ }
+ return labels.TelemetryLabels
+}
+
func (d *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
// Unconditionally set labels if present, even dropped or queued RPC's can
// use these labels.
- if info.Ctx != nil {
- if labels := stats.GetLabels(info.Ctx); labels != nil && labels.TelemetryLabels != nil {
- for key, value := range d.telemetryLabels {
- labels.TelemetryLabels[key] = value
- }
+ if labels := telemetryLabels(info.Ctx); labels != nil {
+ for key, value := range d.telemetryLabels {
+ labels[key] = value
}
}
@@ -156,6 +167,10 @@ func (d *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
return pr, err
}
+ if labels := telemetryLabels(info.Ctx); labels != nil {
+ labels["grpc.lb.locality"] = lIDStr
+ }
+
if d.loadStore != nil {
d.loadStore.CallStarted(lIDStr)
oldDone := pr.Done
diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/clusterresolver.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/clusterresolver.go
index 83ead92a4a697..749945059b88d 100644
--- a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/clusterresolver.go
+++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/clusterresolver.go
@@ -207,11 +207,6 @@ func (b *clusterResolverBalancer) handleClientConnUpdate(update *ccUpdate) {
// handleResourceUpdate handles a resource update or error from the resource
// resolver by propagating the same to the child LB policy.
func (b *clusterResolverBalancer) handleResourceUpdate(update *resourceUpdate) {
- if err := update.err; err != nil {
- b.handleErrorFromUpdate(err, false)
- return
- }
-
b.watchUpdateReceived = true
b.priorities = update.priorities
@@ -219,6 +214,10 @@ func (b *clusterResolverBalancer) handleResourceUpdate(update *resourceUpdate) {
// for all configured discovery mechanisms ordered by priority. This is used
// to generate configuration for the priority LB policy.
b.updateChildConfig()
+
+ if update.onDone != nil {
+ update.onDone()
+ }
}
// updateChildConfig builds child policy configuration using endpoint addresses
diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go
index 151c54dae6d09..5bc64b86305c6 100644
--- a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go
+++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go
@@ -30,8 +30,14 @@ import (
// resourceUpdate is a combined update from all the resources, in the order of
// priority. For example, it can be {EDS, EDS, DNS}.
type resourceUpdate struct {
+ // A discovery mechanism would return an empty update when it runs into
+ // errors, and this would result in the priority LB policy reporting
+ // TRANSIENT_FAILURE (if there was a single discovery mechanism), or would
+ // fallback to the next highest priority that is available.
priorities []priorityConfig
- err error
+ // To be invoked once the update is completely processed, or is dropped in
+ // favor of a newer update.
+ onDone xdsresource.OnDoneFunc
}
// topLevelResolver is used by concrete endpointsResolver implementations for
@@ -39,7 +45,11 @@ type resourceUpdate struct {
// interface and takes appropriate actions upon receipt of updates and errors
// from underlying concrete resolvers.
type topLevelResolver interface {
- onUpdate()
+ // onUpdate is called when a new update is received from the underlying
+ // endpointsResolver implementation. The onDone callback is to be invoked
+ // once the update is completely processed, or is dropped in favor of a
+ // newer update.
+ onUpdate(onDone xdsresource.OnDoneFunc)
}
// endpointsResolver wraps the functionality to resolve a given resource name to
@@ -205,7 +215,7 @@ func (rr *resourceResolver) updateMechanisms(mechanisms []DiscoveryMechanism) {
}
// Regenerate even if there's no change in discovery mechanism, in case
// priority order changed.
- rr.generateLocked()
+ rr.generateLocked(func() {})
}
// resolveNow is typically called to trigger re-resolve of DNS. The EDS
@@ -252,7 +262,10 @@ func (rr *resourceResolver) stop(closing bool) {
// after they are stopped. Therefore, we don't have to worry about another
// write to this channel happening at the same time as this one.
select {
- case <-rr.updateChannel:
+ case ru := <-rr.updateChannel:
+ if ru.onDone != nil {
+ ru.onDone()
+ }
default:
}
rr.updateChannel <- &resourceUpdate{}
@@ -262,14 +275,20 @@ func (rr *resourceResolver) stop(closing bool) {
// result on the update channel if all child resolvers have received at least
// one update. Otherwise it returns early.
//
-// caller must hold rr.mu.
-func (rr *resourceResolver) generateLocked() {
+// The onDone callback is invoked inline if not all child resolvers have
+// received at least one update. If all child resolvers have received at least
+// one update, onDone is invoked when the combined update is processed by the
+// clusterresolver LB policy.
+//
+// Caller must hold rr.mu.
+func (rr *resourceResolver) generateLocked(onDone xdsresource.OnDoneFunc) {
var ret []priorityConfig
for _, rDM := range rr.children {
u, ok := rDM.r.lastUpdate()
if !ok {
// Don't send updates to parent until all resolvers have update to
// send.
+ onDone()
return
}
switch uu := u.(type) {
@@ -280,16 +299,23 @@ func (rr *resourceResolver) generateLocked() {
}
}
select {
- case <-rr.updateChannel:
+ // A previously unprocessed update is dropped in favor of the new one, and
+ // the former's onDone callback is invoked to unblock the xDS client's
+ // receive path.
+ case ru := <-rr.updateChannel:
+ if ru.onDone != nil {
+ ru.onDone()
+ }
default:
}
- rr.updateChannel <- &resourceUpdate{priorities: ret}
+ rr.updateChannel <- &resourceUpdate{priorities: ret, onDone: onDone}
}
-func (rr *resourceResolver) onUpdate() {
- rr.serializer.Schedule(func(context.Context) {
+func (rr *resourceResolver) onUpdate(onDone xdsresource.OnDoneFunc) {
+ handleUpdate := func(context.Context) {
rr.mu.Lock()
- rr.generateLocked()
+ rr.generateLocked(onDone)
rr.mu.Unlock()
- })
+ }
+ rr.serializer.ScheduleOr(handleUpdate, func() { onDone() })
}
diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_dns.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_dns.go
index efdc3088a395c..cfc871d3b59d6 100644
--- a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_dns.go
+++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_dns.go
@@ -79,7 +79,7 @@ func newDNSResolver(target string, topLevelResolver topLevelResolver, logger *gr
ret.logger.Infof("Failed to parse dns hostname %q in clusterresolver LB policy", target)
}
ret.updateReceived = true
- ret.topLevelResolver.onUpdate()
+ ret.topLevelResolver.onUpdate(func() {})
return ret
}
@@ -89,7 +89,7 @@ func newDNSResolver(target string, topLevelResolver topLevelResolver, logger *gr
ret.logger.Infof("Failed to build DNS resolver for target %q: %v", target, err)
}
ret.updateReceived = true
- ret.topLevelResolver.onUpdate()
+ ret.topLevelResolver.onUpdate(func() {})
return ret
}
ret.dnsR = r
@@ -153,7 +153,7 @@ func (dr *dnsDiscoveryMechanism) UpdateState(state resolver.State) error {
dr.updateReceived = true
dr.mu.Unlock()
- dr.topLevelResolver.onUpdate()
+ dr.topLevelResolver.onUpdate(func() {})
return nil
}
@@ -176,7 +176,7 @@ func (dr *dnsDiscoveryMechanism) ReportError(err error) {
dr.updateReceived = true
dr.mu.Unlock()
- dr.topLevelResolver.onUpdate()
+ dr.topLevelResolver.onUpdate(func() {})
}
func (dr *dnsDiscoveryMechanism) NewAddress(addresses []resolver.Address) {
diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_eds.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_eds.go
index 3d0ec356e93a9..ddb949019ee5b 100644
--- a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_eds.go
+++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_eds.go
@@ -76,8 +76,9 @@ func newEDSResolver(nameToWatch string, producer xdsresource.Producer, topLevelR
}
// OnUpdate is invoked to report an update for the resource being watched.
-func (er *edsDiscoveryMechanism) OnUpdate(update *xdsresource.EndpointsResourceData) {
+func (er *edsDiscoveryMechanism) OnUpdate(update *xdsresource.EndpointsResourceData, onDone xdsresource.OnDoneFunc) {
if er.stopped.HasFired() {
+ onDone()
return
}
@@ -85,11 +86,12 @@ func (er *edsDiscoveryMechanism) OnUpdate(update *xdsresource.EndpointsResourceD
er.update = &update.Resource
er.mu.Unlock()
- er.topLevelResolver.onUpdate()
+ er.topLevelResolver.onUpdate(onDone)
}
-func (er *edsDiscoveryMechanism) OnError(err error) {
+func (er *edsDiscoveryMechanism) OnError(err error, onDone xdsresource.OnDoneFunc) {
if er.stopped.HasFired() {
+ onDone()
return
}
@@ -102,6 +104,7 @@ func (er *edsDiscoveryMechanism) OnError(err error) {
// Continue using a previously received good configuration if one
// exists.
er.mu.Unlock()
+ onDone()
return
}
@@ -114,11 +117,12 @@ func (er *edsDiscoveryMechanism) OnError(err error) {
er.update = &xdsresource.EndpointsUpdate{}
er.mu.Unlock()
- er.topLevelResolver.onUpdate()
+ er.topLevelResolver.onUpdate(onDone)
}
-func (er *edsDiscoveryMechanism) OnResourceDoesNotExist() {
+func (er *edsDiscoveryMechanism) OnResourceDoesNotExist(onDone xdsresource.OnDoneFunc) {
if er.stopped.HasFired() {
+ onDone()
return
}
@@ -136,5 +140,5 @@ func (er *edsDiscoveryMechanism) OnResourceDoesNotExist() {
er.update = &xdsresource.EndpointsUpdate{}
er.mu.Unlock()
- er.topLevelResolver.onUpdate()
+ er.topLevelResolver.onUpdate(onDone)
}
diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/loadstore/load_store_wrapper.go b/vendor/google.golang.org/grpc/xds/internal/balancer/loadstore/load_store_wrapper.go
index 8ce958d71ca86..f5605df83276b 100644
--- a/vendor/google.golang.org/grpc/xds/internal/balancer/loadstore/load_store_wrapper.go
+++ b/vendor/google.golang.org/grpc/xds/internal/balancer/loadstore/load_store_wrapper.go
@@ -36,7 +36,7 @@ func NewWrapper() *Wrapper {
// update its internal perCluster store so that new stats will be added to the
// correct perCluster.
//
-// Note that this struct is a temporary walkaround before we implement graceful
+// Note that this struct is a temporary workaround before we implement graceful
// switch for EDS. Any update to the clusterName and serviceName is too early,
// the perfect timing is when the picker is updated with the new connection.
// This early update could cause picks for the old SubConn being reported to the
diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/balancer.go b/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/balancer.go
index 80d3d444697ed..53ba72c0813f3 100644
--- a/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/balancer.go
+++ b/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/balancer.go
@@ -592,20 +592,6 @@ func (b *outlierDetectionBalancer) Target() string {
return b.cc.Target()
}
-func max(x, y time.Duration) time.Duration {
- if x < y {
- return y
- }
- return x
-}
-
-func min(x, y time.Duration) time.Duration {
- if x < y {
- return x
- }
- return y
-}
-
// handleSubConnUpdate stores the recent state and forward the update
// if the SubConn is not ejected.
func (b *outlierDetectionBalancer) handleSubConnUpdate(u *scUpdate) {
diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer.go b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer.go
index 988ca280789ed..c17c62f23a597 100644
--- a/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer.go
+++ b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer.go
@@ -270,6 +270,7 @@ func (b *priorityBalancer) run() {
// deadlock.
b.mu.Lock()
if b.done.HasFired() {
+ b.mu.Unlock()
return
}
switch s := u.(type) {
diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer_priority.go b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer_priority.go
index 4655bf418474e..0be807c134a16 100644
--- a/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer_priority.go
+++ b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer_priority.go
@@ -83,7 +83,9 @@ var (
// Caller must hold b.mu.
func (b *priorityBalancer) syncPriority(childUpdating string) {
if b.inhibitPickerUpdates {
- b.logger.Debugf("Skipping update from child policy %q", childUpdating)
+ if b.logger.V(2) {
+ b.logger.Infof("Skipping update from child policy %q", childUpdating)
+ }
return
}
for p, name := range b.priorities {
@@ -99,12 +101,16 @@ func (b *priorityBalancer) syncPriority(childUpdating string) {
(child.state.ConnectivityState == connectivity.Connecting && child.initTimer != nil) ||
p == len(b.priorities)-1 {
if b.childInUse != child.name || child.name == childUpdating {
- b.logger.Debugf("childInUse, childUpdating: %q, %q", b.childInUse, child.name)
+ if b.logger.V(2) {
+ b.logger.Infof("childInUse, childUpdating: %q, %q", b.childInUse, child.name)
+ }
// If we switch children or the child in use just updated its
// picker, push the child's picker to the parent.
b.cc.UpdateState(child.state)
}
- b.logger.Debugf("Switching to (%q, %v) in syncPriority", child.name, p)
+ if b.logger.V(2) {
+ b.logger.Infof("Switching to (%q, %v) in syncPriority", child.name, p)
+ }
b.switchToChild(child, p)
break
}
diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/picker.go b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/picker.go
index b450716fa0f05..5ce72caded482 100644
--- a/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/picker.go
+++ b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/picker.go
@@ -159,28 +159,3 @@ func nextSkippingDuplicates(ring *ring, entry *ringEntry) *ringEntry {
// There's no qualifying next entry.
return nil
}
-
-// nextSkippingDuplicatesSubConn finds the next subconn in the ring, that's
-// different from the given subconn.
-func nextSkippingDuplicatesSubConn(ring *ring, sc *subConn) *subConn {
- var entry *ringEntry
- for _, it := range ring.items {
- if it.sc == sc {
- entry = it
- break
- }
- }
- if entry == nil {
- // If the given subconn is not in the ring (e.g. it was deleted), return
- // the first one.
- if len(ring.items) > 0 {
- return ring.items[0].sc
- }
- return nil
- }
- ee := nextSkippingDuplicates(ring, entry)
- if ee == nil {
- return nil
- }
- return ee.sc
-}
diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ring.go b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ring.go
index eac89b5b4d05a..45dbb2d2a83f8 100644
--- a/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ring.go
+++ b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ring.go
@@ -67,11 +67,15 @@ type ringEntry struct {
//
// Must be called with a non-empty subConns map.
func newRing(subConns *resolver.AddressMap, minRingSize, maxRingSize uint64, logger *grpclog.PrefixLogger) *ring {
- logger.Debugf("newRing: number of subConns is %d, minRingSize is %d, maxRingSize is %d", subConns.Len(), minRingSize, maxRingSize)
+ if logger.V(2) {
+ logger.Infof("newRing: number of subConns is %d, minRingSize is %d, maxRingSize is %d", subConns.Len(), minRingSize, maxRingSize)
+ }
// https://github.com/envoyproxy/envoy/blob/765c970f06a4c962961a0e03a467e165b276d50f/source/common/upstream/ring_hash_lb.cc#L114
normalizedWeights, minWeight := normalizeWeights(subConns)
- logger.Debugf("newRing: normalized subConn weights is %v", normalizedWeights)
+ if logger.V(2) {
+ logger.Infof("newRing: normalized subConn weights is %v", normalizedWeights)
+ }
// Normalized weights for {3,3,4} is {0.3,0.3,0.4}.
@@ -82,7 +86,9 @@ func newRing(subConns *resolver.AddressMap, minRingSize, maxRingSize uint64, log
scale := math.Min(math.Ceil(minWeight*float64(minRingSize))/minWeight, float64(maxRingSize))
ringSize := math.Ceil(scale)
items := make([]*ringEntry, 0, int(ringSize))
- logger.Debugf("newRing: creating new ring of size %v", ringSize)
+ if logger.V(2) {
+ logger.Infof("newRing: creating new ring of size %v", ringSize)
+ }
// For each entry, scale*weight nodes are generated in the ring.
//
diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ringhash.go b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ringhash.go
index e63c6f653904a..ef054d48aa4e7 100644
--- a/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ringhash.go
+++ b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ringhash.go
@@ -44,12 +44,13 @@ func init() {
type bb struct{}
-func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer {
+func (bb) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer {
b := &ringhashBalancer{
- cc: cc,
- subConns: resolver.NewAddressMap(),
- scStates: make(map[balancer.SubConn]*subConn),
- csEvltr: &connectivityStateEvaluator{},
+ cc: cc,
+ subConns: resolver.NewAddressMap(),
+ scStates: make(map[balancer.SubConn]*subConn),
+ csEvltr: &connectivityStateEvaluator{},
+ orderedSubConns: make([]*subConn, 0),
}
b.logger = prefixLogger(b)
b.logger.Infof("Created")
@@ -197,6 +198,14 @@ type ringhashBalancer struct {
resolverErr error // the last error reported by the resolver; cleared on successful resolution
connErr error // the last connection error; cleared upon leaving TransientFailure
+
+ // orderedSubConns contains the list of subconns in the order that addresses
+ // appear from the resolver. Together with lastInternallyTriggeredSCIndex,
+ // this allows triggering connection attempts to all SubConns independently
+ // of the order they appear on the ring. Always in sync with ring and
+ // subConns. The index is reset when addresses change.
+ orderedSubConns []*subConn
+ lastInternallyTriggeredSCIndex int
}
// updateAddresses creates new SubConns and removes SubConns, based on the
@@ -214,6 +223,9 @@ func (b *ringhashBalancer) updateAddresses(addrs []resolver.Address) bool {
var addrsUpdated bool
// addrsSet is the set converted from addrs, used for quick lookup.
addrsSet := resolver.NewAddressMap()
+
+ b.orderedSubConns = b.orderedSubConns[:0] // reuse the underlying array.
+
for _, addr := range addrs {
addrsSet.Set(addr, true)
newWeight := getWeightAttribute(addr)
@@ -234,6 +246,7 @@ func (b *ringhashBalancer) updateAddresses(addrs []resolver.Address) bool {
b.state = b.csEvltr.recordTransition(connectivity.Shutdown, connectivity.Idle)
b.subConns.Set(addr, scs)
b.scStates[sc] = scs
+ b.orderedSubConns = append(b.orderedSubConns, scs)
addrsUpdated = true
} else {
// We have seen this address before and created a subConn for it. If the
@@ -244,6 +257,7 @@ func (b *ringhashBalancer) updateAddresses(addrs []resolver.Address) bool {
// since *only* the weight attribute has changed, and that does not affect
// subConn uniqueness.
scInfo := val.(*subConn)
+ b.orderedSubConns = append(b.orderedSubConns, scInfo)
if oldWeight := scInfo.weight; oldWeight != newWeight {
scInfo.weight = newWeight
b.subConns.Set(addr, scInfo)
@@ -264,6 +278,9 @@ func (b *ringhashBalancer) updateAddresses(addrs []resolver.Address) bool {
// The entry will be deleted in updateSubConnState.
}
}
+ if addrsUpdated {
+ b.lastInternallyTriggeredSCIndex = 0
+ }
return addrsUpdated
}
@@ -399,19 +416,11 @@ func (b *ringhashBalancer) updateSubConnState(sc balancer.SubConn, state balance
return
}
}
- // Trigger a SubConn (this updated SubConn's next SubConn in the ring)
- // to connect if nobody is attempting to connect.
- sc := nextSkippingDuplicatesSubConn(b.ring, scs)
- if sc != nil {
- sc.queueConnect()
- return
- }
- // This handles the edge case where we have a single subConn in the
- // ring. nextSkippingDuplicatesSubCon() would have returned nil. We
- // still need to ensure that some subConn is attempting to connect, in
- // order to give the LB policy a chance to move out of
- // TRANSIENT_FAILURE. Hence, we try connecting on the current subConn.
- scs.queueConnect()
+
+ // Trigger a SubConn (the next in the order addresses appear in the
+ // resolver) to connect if nobody is attempting to connect.
+ b.lastInternallyTriggeredSCIndex = (b.lastInternallyTriggeredSCIndex + 1) % len(b.orderedSubConns)
+ b.orderedSubConns[b.lastInternallyTriggeredSCIndex].queueConnect()
}
}
diff --git a/vendor/google.golang.org/grpc/xds/internal/clusterspecifier/rls/rls.go b/vendor/google.golang.org/grpc/xds/internal/clusterspecifier/rls/rls.go
index 74abfec1fa88e..89837605c1d19 100644
--- a/vendor/google.golang.org/grpc/xds/internal/clusterspecifier/rls/rls.go
+++ b/vendor/google.golang.org/grpc/xds/internal/clusterspecifier/rls/rls.go
@@ -65,13 +65,13 @@ func (rls) ParseClusterSpecifierConfig(cfg proto.Message) (clusterspecifier.Bala
if cfg == nil {
return nil, fmt.Errorf("rls_csp: nil configuration message provided")
}
- any, ok := cfg.(*anypb.Any)
+ m, ok := cfg.(*anypb.Any)
if !ok {
return nil, fmt.Errorf("rls_csp: error parsing config %v: unknown type %T", cfg, cfg)
}
rlcs := new(rlspb.RouteLookupClusterSpecifier)
- if err := any.UnmarshalTo(rlcs); err != nil {
+ if err := m.UnmarshalTo(rlcs); err != nil {
return nil, fmt.Errorf("rls_csp: error parsing config %v: %v", cfg, err)
}
rlcJSON, err := protojson.Marshal(rlcs.GetRouteLookupConfig())
diff --git a/vendor/google.golang.org/grpc/xds/internal/httpfilter/fault/fault.go b/vendor/google.golang.org/grpc/xds/internal/httpfilter/fault/fault.go
index 90155d80be325..5a82490598a34 100644
--- a/vendor/google.golang.org/grpc/xds/internal/httpfilter/fault/fault.go
+++ b/vendor/google.golang.org/grpc/xds/internal/httpfilter/fault/fault.go
@@ -81,12 +81,12 @@ func parseConfig(cfg proto.Message) (httpfilter.FilterConfig, error) {
if cfg == nil {
return nil, fmt.Errorf("fault: nil configuration message provided")
}
- any, ok := cfg.(*anypb.Any)
+ m, ok := cfg.(*anypb.Any)
if !ok {
return nil, fmt.Errorf("fault: error parsing config %v: unknown type %T", cfg, cfg)
}
msg := new(fpb.HTTPFault)
- if err := any.UnmarshalTo(msg); err != nil {
+ if err := m.UnmarshalTo(msg); err != nil {
return nil, fmt.Errorf("fault: error parsing config %v: %v", cfg, err)
}
return config{config: msg}, nil
@@ -139,7 +139,7 @@ type interceptor struct {
var activeFaults uint32 // global active faults; accessed atomically
-func (i *interceptor) NewStream(ctx context.Context, ri iresolver.RPCInfo, done func(), newStream func(ctx context.Context, done func()) (iresolver.ClientStream, error)) (iresolver.ClientStream, error) {
+func (i *interceptor) NewStream(ctx context.Context, _ iresolver.RPCInfo, done func(), newStream func(ctx context.Context, done func()) (iresolver.ClientStream, error)) (iresolver.ClientStream, error) {
if maxAF := i.config.GetMaxActiveFaults(); maxAF != nil {
defer atomic.AddUint32(&activeFaults, ^uint32(0)) // decrement counter
if af := atomic.AddUint32(&activeFaults, 1); af > maxAF.GetValue() {
@@ -296,5 +296,5 @@ func (*okStream) Header() (metadata.MD, error) { return nil, nil }
func (*okStream) Trailer() metadata.MD { return nil }
func (*okStream) CloseSend() error { return nil }
func (o *okStream) Context() context.Context { return o.ctx }
-func (*okStream) SendMsg(m any) error { return io.EOF }
-func (*okStream) RecvMsg(m any) error { return io.EOF }
+func (*okStream) SendMsg(any) error { return io.EOF }
+func (*okStream) RecvMsg(any) error { return io.EOF }
diff --git a/vendor/google.golang.org/grpc/xds/internal/httpfilter/rbac/rbac.go b/vendor/google.golang.org/grpc/xds/internal/httpfilter/rbac/rbac.go
index 37de3a39b64f6..bcda2ab05fc8d 100644
--- a/vendor/google.golang.org/grpc/xds/internal/httpfilter/rbac/rbac.go
+++ b/vendor/google.golang.org/grpc/xds/internal/httpfilter/rbac/rbac.go
@@ -117,7 +117,7 @@ func parseConfig(rbacCfg *rpb.RBAC) (httpfilter.FilterConfig, error) {
// "If absent, no enforcing RBAC policy will be applied" - RBAC
// Documentation for Rules field.
// "At this time, if the RBAC.action is Action.LOG then the policy will be
- // completely ignored, as if RBAC was not configurated." - A41
+ // completely ignored, as if RBAC was not configured." - A41
if rbacCfg.Rules == nil || rbacCfg.GetRules().GetAction() == v3rbacpb.RBAC_LOG {
return config{}, nil
}
@@ -128,7 +128,7 @@ func parseConfig(rbacCfg *rpb.RBAC) (httpfilter.FilterConfig, error) {
ce, err := rbac.NewChainEngine([]*v3rbacpb.RBAC{rbacCfg.GetRules()}, "")
if err != nil {
// "At this time, if the RBAC.action is Action.LOG then the policy will be
- // completely ignored, as if RBAC was not configurated." - A41
+ // completely ignored, as if RBAC was not configured." - A41
if rbacCfg.GetRules().GetAction() != v3rbacpb.RBAC_LOG {
return nil, fmt.Errorf("rbac: error constructing matching engine: %v", err)
}
@@ -141,12 +141,12 @@ func (builder) ParseFilterConfig(cfg proto.Message) (httpfilter.FilterConfig, er
if cfg == nil {
return nil, fmt.Errorf("rbac: nil configuration message provided")
}
- any, ok := cfg.(*anypb.Any)
+ m, ok := cfg.(*anypb.Any)
if !ok {
return nil, fmt.Errorf("rbac: error parsing config %v: unknown type %T", cfg, cfg)
}
msg := new(rpb.RBAC)
- if err := any.UnmarshalTo(msg); err != nil {
+ if err := m.UnmarshalTo(msg); err != nil {
return nil, fmt.Errorf("rbac: error parsing config %v: %v", cfg, err)
}
return parseConfig(msg)
@@ -156,12 +156,12 @@ func (builder) ParseFilterConfigOverride(override proto.Message) (httpfilter.Fil
if override == nil {
return nil, fmt.Errorf("rbac: nil configuration message provided")
}
- any, ok := override.(*anypb.Any)
+ m, ok := override.(*anypb.Any)
if !ok {
return nil, fmt.Errorf("rbac: error parsing override config %v: unknown type %T", override, override)
}
msg := new(rpb.RBACPerRoute)
- if err := any.UnmarshalTo(msg); err != nil {
+ if err := m.UnmarshalTo(msg); err != nil {
return nil, fmt.Errorf("rbac: error parsing override config %v: %v", override, err)
}
return parseConfig(msg.Rbac)
@@ -198,7 +198,7 @@ func (builder) BuildServerInterceptor(cfg httpfilter.FilterConfig, override http
// "If absent, no enforcing RBAC policy will be applied" - RBAC
// Documentation for Rules field.
// "At this time, if the RBAC.action is Action.LOG then the policy will be
- // completely ignored, as if RBAC was not configurated." - A41
+ // completely ignored, as if RBAC was not configured." - A41
if c.chainEngine == nil {
return nil, nil
}
diff --git a/vendor/google.golang.org/grpc/xds/internal/httpfilter/router/router.go b/vendor/google.golang.org/grpc/xds/internal/httpfilter/router/router.go
index 1675ec86ec11c..a781523d371e1 100644
--- a/vendor/google.golang.org/grpc/xds/internal/httpfilter/router/router.go
+++ b/vendor/google.golang.org/grpc/xds/internal/httpfilter/router/router.go
@@ -54,12 +54,12 @@ func (builder) ParseFilterConfig(cfg proto.Message) (httpfilter.FilterConfig, er
if cfg == nil {
return nil, fmt.Errorf("router: nil configuration message provided")
}
- any, ok := cfg.(*anypb.Any)
+ m, ok := cfg.(*anypb.Any)
if !ok {
return nil, fmt.Errorf("router: error parsing config %v: unknown type %T", cfg, cfg)
}
msg := new(pb.Router)
- if err := any.UnmarshalTo(msg); err != nil {
+ if err := m.UnmarshalTo(msg); err != nil {
return nil, fmt.Errorf("router: error parsing config %v: %v", cfg, err)
}
return config{}, nil
diff --git a/vendor/google.golang.org/grpc/xds/internal/internal.go b/vendor/google.golang.org/grpc/xds/internal/internal.go
index 7091990500f97..1d8a6b03f1b3b 100644
--- a/vendor/google.golang.org/grpc/xds/internal/internal.go
+++ b/vendor/google.golang.org/grpc/xds/internal/internal.go
@@ -55,6 +55,11 @@ func (l LocalityID) Equal(o any) bool {
return l.Region == ol.Region && l.Zone == ol.Zone && l.SubZone == ol.SubZone
}
+// Empty returns whether or not the locality ID is empty.
+func (l LocalityID) Empty() bool {
+ return l.Region == "" && l.Zone == "" && l.SubZone == ""
+}
+
// LocalityIDFromString converts a json representation of locality, into a
// LocalityID struct.
func LocalityIDFromString(s string) (ret LocalityID, _ error) {
diff --git a/vendor/google.golang.org/grpc/xds/internal/resolver/internal/internal.go b/vendor/google.golang.org/grpc/xds/internal/resolver/internal/internal.go
index f505eeb4394ec..d9c23278281fc 100644
--- a/vendor/google.golang.org/grpc/xds/internal/resolver/internal/internal.go
+++ b/vendor/google.golang.org/grpc/xds/internal/resolver/internal/internal.go
@@ -26,5 +26,5 @@ var (
NewWRR any // func() wrr.WRR
// NewXDSClient is the function used to create a new xDS client.
- NewXDSClient any // func() (xdsclient.XDSClient, func(), error)
+ NewXDSClient any // func(string) (xdsclient.XDSClient, func(), error)
)
diff --git a/vendor/google.golang.org/grpc/xds/internal/resolver/serviceconfig.go b/vendor/google.golang.org/grpc/xds/internal/resolver/serviceconfig.go
index f5bfc500c11a6..36776f3debdfd 100644
--- a/vendor/google.golang.org/grpc/xds/internal/resolver/serviceconfig.go
+++ b/vendor/google.golang.org/grpc/xds/internal/resolver/serviceconfig.go
@@ -182,7 +182,7 @@ func (cs *configSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*iresolver.RP
if v := atomic.AddInt32(ref, -1); v == 0 {
// This entry will be removed from activeClusters when
// producing the service config for the empty update.
- cs.r.serializer.Schedule(func(context.Context) {
+ cs.r.serializer.TrySchedule(func(context.Context) {
cs.r.onClusterRefDownToZero()
})
}
@@ -326,7 +326,7 @@ func (cs *configSelector) stop() {
// selector; we need another update to delete clusters from the config (if
// we don't have another update pending already).
if needUpdate {
- cs.r.serializer.Schedule(func(context.Context) {
+ cs.r.serializer.TrySchedule(func(context.Context) {
cs.r.onClusterRefDownToZero()
})
}
@@ -336,7 +336,7 @@ type interceptorList struct {
interceptors []iresolver.ClientInterceptor
}
-func (il *interceptorList) NewStream(ctx context.Context, ri iresolver.RPCInfo, done func(), newStream func(ctx context.Context, done func()) (iresolver.ClientStream, error)) (iresolver.ClientStream, error) {
+func (il *interceptorList) NewStream(ctx context.Context, ri iresolver.RPCInfo, _ func(), newStream func(ctx context.Context, _ func()) (iresolver.ClientStream, error)) (iresolver.ClientStream, error) {
for i := len(il.interceptors) - 1; i >= 0; i-- {
ns := newStream
interceptor := il.interceptors[i]
diff --git a/vendor/google.golang.org/grpc/xds/internal/resolver/watch_service.go b/vendor/google.golang.org/grpc/xds/internal/resolver/watch_service.go
index abb3c2c5acf18..0de6604484b1b 100644
--- a/vendor/google.golang.org/grpc/xds/internal/resolver/watch_service.go
+++ b/vendor/google.golang.org/grpc/xds/internal/resolver/watch_service.go
@@ -36,22 +36,19 @@ func newListenerWatcher(resourceName string, parent *xdsResolver) *listenerWatch
return lw
}
-func (l *listenerWatcher) OnUpdate(update *xdsresource.ListenerResourceData) {
- l.parent.serializer.Schedule(func(context.Context) {
- l.parent.onListenerResourceUpdate(update.Resource)
- })
+func (l *listenerWatcher) OnUpdate(update *xdsresource.ListenerResourceData, onDone xdsresource.OnDoneFunc) {
+ handleUpdate := func(context.Context) { l.parent.onListenerResourceUpdate(update.Resource); onDone() }
+ l.parent.serializer.ScheduleOr(handleUpdate, onDone)
}
-func (l *listenerWatcher) OnError(err error) {
- l.parent.serializer.Schedule(func(context.Context) {
- l.parent.onListenerResourceError(err)
- })
+func (l *listenerWatcher) OnError(err error, onDone xdsresource.OnDoneFunc) {
+ handleError := func(context.Context) { l.parent.onListenerResourceError(err); onDone() }
+ l.parent.serializer.ScheduleOr(handleError, onDone)
}
-func (l *listenerWatcher) OnResourceDoesNotExist() {
- l.parent.serializer.Schedule(func(context.Context) {
- l.parent.onListenerResourceNotFound()
- })
+func (l *listenerWatcher) OnResourceDoesNotExist(onDone xdsresource.OnDoneFunc) {
+ handleNotFound := func(context.Context) { l.parent.onListenerResourceNotFound(); onDone() }
+ l.parent.serializer.ScheduleOr(handleNotFound, onDone)
}
func (l *listenerWatcher) stop() {
@@ -71,22 +68,22 @@ func newRouteConfigWatcher(resourceName string, parent *xdsResolver) *routeConfi
return rw
}
-func (r *routeConfigWatcher) OnUpdate(update *xdsresource.RouteConfigResourceData) {
- r.parent.serializer.Schedule(func(context.Context) {
- r.parent.onRouteConfigResourceUpdate(r.resourceName, update.Resource)
- })
+func (r *routeConfigWatcher) OnUpdate(u *xdsresource.RouteConfigResourceData, onDone xdsresource.OnDoneFunc) {
+ handleUpdate := func(context.Context) {
+ r.parent.onRouteConfigResourceUpdate(r.resourceName, u.Resource)
+ onDone()
+ }
+ r.parent.serializer.ScheduleOr(handleUpdate, onDone)
}
-func (r *routeConfigWatcher) OnError(err error) {
- r.parent.serializer.Schedule(func(context.Context) {
- r.parent.onRouteConfigResourceError(r.resourceName, err)
- })
+func (r *routeConfigWatcher) OnError(err error, onDone xdsresource.OnDoneFunc) {
+ handleError := func(context.Context) { r.parent.onRouteConfigResourceError(r.resourceName, err); onDone() }
+ r.parent.serializer.ScheduleOr(handleError, onDone)
}
-func (r *routeConfigWatcher) OnResourceDoesNotExist() {
- r.parent.serializer.Schedule(func(context.Context) {
- r.parent.onRouteConfigResourceNotFound(r.resourceName)
- })
+func (r *routeConfigWatcher) OnResourceDoesNotExist(onDone xdsresource.OnDoneFunc) {
+ handleNotFound := func(context.Context) { r.parent.onRouteConfigResourceNotFound(r.resourceName); onDone() }
+ r.parent.serializer.ScheduleOr(handleNotFound, onDone)
}
func (r *routeConfigWatcher) stop() {
diff --git a/vendor/google.golang.org/grpc/xds/internal/resolver/xds_resolver.go b/vendor/google.golang.org/grpc/xds/internal/resolver/xds_resolver.go
index 40dd972678112..b5d24e4bf214b 100644
--- a/vendor/google.golang.org/grpc/xds/internal/resolver/xds_resolver.go
+++ b/vendor/google.golang.org/grpc/xds/internal/resolver/xds_resolver.go
@@ -49,8 +49,8 @@ const Scheme = "xds"
// ClientConns at the same time.
func newBuilderForTesting(config []byte) (resolver.Builder, error) {
return &xdsResolverBuilder{
- newXDSClient: func() (xdsclient.XDSClient, func(), error) {
- return xdsclient.NewForTesting(xdsclient.OptionsForTesting{Contents: config})
+ newXDSClient: func(name string) (xdsclient.XDSClient, func(), error) {
+ return xdsclient.NewForTesting(xdsclient.OptionsForTesting{Name: name, Contents: config})
},
}, nil
}
@@ -64,7 +64,7 @@ func init() {
}
type xdsResolverBuilder struct {
- newXDSClient func() (xdsclient.XDSClient, func(), error)
+ newXDSClient func(string) (xdsclient.XDSClient, func(), error)
}
// Build helps implement the resolver.Builder interface.
@@ -97,16 +97,16 @@ func (b *xdsResolverBuilder) Build(target resolver.Target, cc resolver.ClientCon
r.serializerCancel = cancel
// Initialize the xDS client.
- newXDSClient := rinternal.NewXDSClient.(func() (xdsclient.XDSClient, func(), error))
+ newXDSClient := rinternal.NewXDSClient.(func(string) (xdsclient.XDSClient, func(), error))
if b.newXDSClient != nil {
newXDSClient = b.newXDSClient
}
- client, close, err := newXDSClient()
+ client, closeFn, err := newXDSClient(target.String())
if err != nil {
return nil, fmt.Errorf("xds: failed to create xds-client: %v", err)
}
r.xdsClient = client
- r.xdsClientClose = close
+ r.xdsClientClose = closeFn
// Determine the listener resource name and start a watcher for it.
template, err := r.sanityChecksOnBootstrapConfig(target, opts, r.xdsClient)
@@ -128,7 +128,7 @@ func (b *xdsResolverBuilder) Build(target resolver.Target, cc resolver.ClientCon
//
// Returns the listener resource name template to use. If any of the above
// validations fail, a non-nil error is returned.
-func (r *xdsResolver) sanityChecksOnBootstrapConfig(target resolver.Target, opts resolver.BuildOptions, client xdsclient.XDSClient) (string, error) {
+func (r *xdsResolver) sanityChecksOnBootstrapConfig(target resolver.Target, _ resolver.BuildOptions, client xdsclient.XDSClient) (string, error) {
bootstrapConfig := client.BootstrapConfig()
if bootstrapConfig == nil {
// This is never expected to happen after a successful xDS client
@@ -139,9 +139,13 @@ func (r *xdsResolver) sanityChecksOnBootstrapConfig(target resolver.Target, opts
// Find the client listener template to use from the bootstrap config:
// - If authority is not set in the target, use the top level template
// - If authority is set, use the template from the authority map.
- template := bootstrapConfig.ClientDefaultListenerResourceNameTemplate
+ template := bootstrapConfig.ClientDefaultListenerResourceNameTemplate()
if authority := target.URL.Host; authority != "" {
- a := bootstrapConfig.Authorities[authority]
+ authorities := bootstrapConfig.Authorities()
+ if authorities == nil {
+ return "", fmt.Errorf("xds: authority %q specified in dial target %q is not found in the bootstrap file", authority, target)
+ }
+ a := authorities[authority]
if a == nil {
return "", fmt.Errorf("xds: authority %q specified in dial target %q is not found in the bootstrap file", authority, target)
}
@@ -210,7 +214,7 @@ type xdsResolver struct {
}
// ResolveNow is a no-op at this point.
-func (*xdsResolver) ResolveNow(o resolver.ResolveNowOptions) {}
+func (*xdsResolver) ResolveNow(resolver.ResolveNowOptions) {}
func (r *xdsResolver) Close() {
// Cancel the context passed to the serializer and wait for any scheduled
diff --git a/vendor/google.golang.org/grpc/xds/internal/server/conn_wrapper.go b/vendor/google.golang.org/grpc/xds/internal/server/conn_wrapper.go
index fdba769294dec..92d07e7fb6d13 100644
--- a/vendor/google.golang.org/grpc/xds/internal/server/conn_wrapper.go
+++ b/vendor/google.golang.org/grpc/xds/internal/server/conn_wrapper.go
@@ -47,7 +47,7 @@ type connWrapper struct {
// The specific filter chain picked for handling this connection.
filterChain *xdsresource.FilterChain
- // A reference fo the listenerWrapper on which this connection was accepted.
+ // A reference to the listenerWrapper on which this connection was accepted.
parent *listenerWrapper
// The certificate providers created for this connection.
@@ -107,7 +107,7 @@ func (c *connWrapper) XDSHandshakeInfo() (*xdsinternal.HandshakeInfo, error) {
return xdsinternal.NewHandshakeInfo(nil, nil, nil, false), nil
}
- cpc := c.parent.xdsC.BootstrapConfig().CertProviderConfigs
+ cpc := c.parent.xdsC.BootstrapConfig().CertProviderConfigs()
// Identity provider name is mandatory on the server-side, and this is
// enforced when the resource is received at the XDSClient layer.
secCfg := c.filterChain.SecurityCfg
@@ -161,6 +161,7 @@ func (c *connWrapper) Close() error {
if c.rootProvider != nil {
c.rootProvider.Close()
}
+ c.parent.removeConn(c)
return c.Conn.Close()
}
diff --git a/vendor/google.golang.org/grpc/xds/internal/server/listener_wrapper.go b/vendor/google.golang.org/grpc/xds/internal/server/listener_wrapper.go
index 174b54c441174..09d320018aeec 100644
--- a/vendor/google.golang.org/grpc/xds/internal/server/listener_wrapper.go
+++ b/vendor/google.golang.org/grpc/xds/internal/server/listener_wrapper.go
@@ -86,6 +86,7 @@ func NewListenerWrapper(params ListenerWrapperParams) net.Listener {
xdsC: params.XDSClient,
modeCallback: params.ModeCallback,
isUnspecifiedAddr: params.Listener.Addr().(*net.TCPAddr).IP.IsUnspecified(),
+ conns: make(map[*connWrapper]bool),
mode: connectivity.ServingModeNotServing,
closed: grpcsync.NewEvent(),
@@ -135,13 +136,13 @@ type listenerWrapper struct {
// mu guards access to the current serving mode and the active filter chain
// manager.
- mu sync.RWMutex
+ mu sync.Mutex
// Current serving mode.
mode connectivity.ServingMode
// Filter chain manager currently serving.
activeFilterChainManager *xdsresource.FilterChainManager
// conns accepted with configuration from activeFilterChainManager.
- conns []*connWrapper
+ conns map[*connWrapper]bool
// These fields are read/written to in the context of xDS updates, which are
// guaranteed to be emitted synchronously from the xDS Client. Thus, they do
@@ -202,17 +203,14 @@ func (l *listenerWrapper) maybeUpdateFilterChains() {
// gracefully shut down with a grace period of 10 minutes for long-lived
// RPC's, such that clients will reconnect and have the updated
// configuration apply." - A36
- var connsToClose []*connWrapper
- if l.activeFilterChainManager != nil { // If there is a filter chain manager to clean up.
- connsToClose = l.conns
- l.conns = nil
- }
+ connsToClose := l.conns
+ l.conns = make(map[*connWrapper]bool)
l.activeFilterChainManager = l.pendingFilterChainManager
l.pendingFilterChainManager = nil
l.instantiateFilterChainRoutingConfigurationsLocked()
l.mu.Unlock()
go func() {
- for _, conn := range connsToClose {
+ for conn := range connsToClose {
conn.Drain()
}
}()
@@ -304,7 +302,7 @@ func (l *listenerWrapper) Accept() (net.Conn, error) {
return nil, fmt.Errorf("received connection with non-TCP address (local: %T, remote %T)", conn.LocalAddr(), conn.RemoteAddr())
}
- l.mu.RLock()
+ l.mu.Lock()
if l.mode == connectivity.ServingModeNotServing {
// Close connections as soon as we accept them when we are in
// "not-serving" mode. Since we accept a net.Listener from the user
@@ -312,7 +310,7 @@ func (l *listenerWrapper) Accept() (net.Conn, error) {
// "not-serving". Closing the connection immediately upon accepting
// is one of the other ways to implement the "not-serving" mode as
// outlined in gRFC A36.
- l.mu.RUnlock()
+ l.mu.Unlock()
conn.Close()
continue
}
@@ -324,7 +322,7 @@ func (l *listenerWrapper) Accept() (net.Conn, error) {
SourcePort: srcAddr.Port,
})
if err != nil {
- l.mu.RUnlock()
+ l.mu.Unlock()
// When a matching filter chain is not found, we close the
// connection right away, but do not return an error back to
// `grpc.Serve()` from where this Accept() was invoked. Returning an
@@ -341,12 +339,18 @@ func (l *listenerWrapper) Accept() (net.Conn, error) {
continue
}
cw := &connWrapper{Conn: conn, filterChain: fc, parent: l, urc: fc.UsableRouteConfiguration}
- l.conns = append(l.conns, cw)
- l.mu.RUnlock()
+ l.conns[cw] = true
+ l.mu.Unlock()
return cw, nil
}
}
+func (l *listenerWrapper) removeConn(conn *connWrapper) {
+ l.mu.Lock()
+ defer l.mu.Unlock()
+ delete(l.conns, conn)
+}
+
// Close closes the underlying listener. It also cancels the xDS watch
// registered in Serve() and closes any certificate provider instances created
// based on security configuration received in the LDS response.
@@ -376,9 +380,9 @@ func (l *listenerWrapper) switchModeLocked(newMode connectivity.ServingMode, err
l.mode = newMode
if l.mode == connectivity.ServingModeNotServing {
connsToClose := l.conns
- l.conns = nil
+ l.conns = make(map[*connWrapper]bool)
go func() {
- for _, conn := range connsToClose {
+ for conn := range connsToClose {
conn.Drain()
}
}()
@@ -410,7 +414,8 @@ type ldsWatcher struct {
name string
}
-func (lw *ldsWatcher) OnUpdate(update *xdsresource.ListenerResourceData) {
+func (lw *ldsWatcher) OnUpdate(update *xdsresource.ListenerResourceData, onDone xdsresource.OnDoneFunc) {
+ defer onDone()
if lw.parent.closed.HasFired() {
lw.logger.Warningf("Resource %q received update: %#v after listener was closed", lw.name, update)
return
@@ -421,7 +426,8 @@ func (lw *ldsWatcher) OnUpdate(update *xdsresource.ListenerResourceData) {
lw.parent.handleLDSUpdate(update.Resource)
}
-func (lw *ldsWatcher) OnError(err error) {
+func (lw *ldsWatcher) OnError(err error, onDone xdsresource.OnDoneFunc) {
+ defer onDone()
if lw.parent.closed.HasFired() {
lw.logger.Warningf("Resource %q received error: %v after listener was closed", lw.name, err)
return
@@ -433,7 +439,8 @@ func (lw *ldsWatcher) OnError(err error) {
// continue to use the old configuration.
}
-func (lw *ldsWatcher) OnResourceDoesNotExist() {
+func (lw *ldsWatcher) OnResourceDoesNotExist(onDone xdsresource.OnDoneFunc) {
+ defer onDone()
if lw.parent.closed.HasFired() {
lw.logger.Warningf("Resource %q received resource-does-not-exist error after listener was closed", lw.name)
return
diff --git a/vendor/google.golang.org/grpc/xds/internal/server/rds_handler.go b/vendor/google.golang.org/grpc/xds/internal/server/rds_handler.go
index 67cde4602894a..bcd3938e6f1ab 100644
--- a/vendor/google.golang.org/grpc/xds/internal/server/rds_handler.go
+++ b/vendor/google.golang.org/grpc/xds/internal/server/rds_handler.go
@@ -147,7 +147,8 @@ type rdsWatcher struct {
canceled bool // eats callbacks if true
}
-func (rw *rdsWatcher) OnUpdate(update *xdsresource.RouteConfigResourceData) {
+func (rw *rdsWatcher) OnUpdate(update *xdsresource.RouteConfigResourceData, onDone xdsresource.OnDoneFunc) {
+ defer onDone()
rw.mu.Lock()
if rw.canceled {
rw.mu.Unlock()
@@ -160,7 +161,8 @@ func (rw *rdsWatcher) OnUpdate(update *xdsresource.RouteConfigResourceData) {
rw.parent.handleRouteUpdate(rw.routeName, rdsWatcherUpdate{data: &update.Resource})
}
-func (rw *rdsWatcher) OnError(err error) {
+func (rw *rdsWatcher) OnError(err error, onDone xdsresource.OnDoneFunc) {
+ defer onDone()
rw.mu.Lock()
if rw.canceled {
rw.mu.Unlock()
@@ -173,7 +175,8 @@ func (rw *rdsWatcher) OnError(err error) {
rw.parent.handleRouteUpdate(rw.routeName, rdsWatcherUpdate{err: err})
}
-func (rw *rdsWatcher) OnResourceDoesNotExist() {
+func (rw *rdsWatcher) OnResourceDoesNotExist(onDone xdsresource.OnDoneFunc) {
+ defer onDone()
rw.mu.Lock()
if rw.canceled {
rw.mu.Unlock()
diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/authority.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/authority.go
index b0763a0240318..3251737f181ea 100644
--- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/authority.go
+++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/authority.go
@@ -23,6 +23,7 @@ import (
"fmt"
"strings"
"sync"
+ "sync/atomic"
"time"
"google.golang.org/grpc/internal/grpclog"
@@ -118,12 +119,12 @@ func newAuthority(args authorityArgs) (*authority, error) {
}
tr, err := transport.New(transport.Options{
- ServerCfg: *args.serverCfg,
+ ServerCfg: args.serverCfg,
OnRecvHandler: ret.handleResourceUpdate,
OnErrorHandler: ret.newConnectionError,
OnSendHandler: ret.transportOnSendHandler,
Logger: args.logger,
- NodeProto: args.bootstrapCfg.NodeProto,
+ NodeProto: args.bootstrapCfg.Node(),
})
if err != nil {
return nil, fmt.Errorf("creating new transport to %q: %v", args.serverCfg, err)
@@ -148,7 +149,7 @@ func (a *authority) transportOnSendHandler(u *transport.ResourceSendInfo) {
a.startWatchTimersLocked(rType, u.ResourceNames)
}
-func (a *authority) handleResourceUpdate(resourceUpdate transport.ResourceUpdate) error {
+func (a *authority) handleResourceUpdate(resourceUpdate transport.ResourceUpdate, onDone func()) error {
rType := a.resourceTypeGetter(resourceUpdate.URL)
if rType == nil {
return xdsresource.NewErrorf(xdsresource.ErrorTypeResourceTypeUnsupported, "Resource URL %v unknown in response from server", resourceUpdate.URL)
@@ -159,14 +160,40 @@ func (a *authority) handleResourceUpdate(resourceUpdate transport.ResourceUpdate
ServerConfig: a.serverCfg,
}
updates, md, err := decodeAllResources(opts, rType, resourceUpdate)
- a.updateResourceStateAndScheduleCallbacks(rType, updates, md)
+ a.updateResourceStateAndScheduleCallbacks(rType, updates, md, onDone)
return err
}
-func (a *authority) updateResourceStateAndScheduleCallbacks(rType xdsresource.Type, updates map[string]resourceDataErrTuple, md xdsresource.UpdateMetadata) {
+func (a *authority) updateResourceStateAndScheduleCallbacks(rType xdsresource.Type, updates map[string]resourceDataErrTuple, md xdsresource.UpdateMetadata, onDone func()) {
a.resourcesMu.Lock()
defer a.resourcesMu.Unlock()
+ // We build a list of callback funcs to invoke, and invoke them at the end
+ // of this method instead of inline (when handling the update for a
+ // particular resource), because we want to make sure that all calls to
+ // increment watcherCnt happen before any callbacks are invoked. This will
+ // ensure that the onDone callback is never invoked before all watcher
+ // callbacks are invoked, and the watchers have processed the update.
+ watcherCnt := new(atomic.Int64)
+ done := func() {
+ watcherCnt.Add(-1)
+ if watcherCnt.Load() == 0 {
+ onDone()
+ }
+ }
+ funcsToSchedule := []func(context.Context){}
+ defer func() {
+ if len(funcsToSchedule) == 0 {
+ // When there are no watchers for the resources received as part of
+ // this update, invoke onDone explicitly to unblock the next read on
+ // the ADS stream.
+ onDone()
+ }
+ for _, f := range funcsToSchedule {
+ a.serializer.ScheduleOr(f, onDone)
+ }
+ }()
+
resourceStates := a.resources[rType]
for name, uErr := range updates {
if state, ok := resourceStates[name]; ok {
@@ -210,7 +237,8 @@ func (a *authority) updateResourceStateAndScheduleCallbacks(rType xdsresource.Ty
for watcher := range state.watchers {
watcher := watcher
err := uErr.err
- a.serializer.Schedule(func(context.Context) { watcher.OnError(err) })
+ watcherCnt.Add(1)
+ funcsToSchedule = append(funcsToSchedule, func(context.Context) { watcher.OnError(err, done) })
}
continue
}
@@ -225,11 +253,14 @@ func (a *authority) updateResourceStateAndScheduleCallbacks(rType xdsresource.Ty
for watcher := range state.watchers {
watcher := watcher
resource := uErr.resource
- a.serializer.Schedule(func(context.Context) { watcher.OnUpdate(resource) })
+ watcherCnt.Add(1)
+ funcsToSchedule = append(funcsToSchedule, func(context.Context) { watcher.OnUpdate(resource, done) })
}
}
// Sync cache.
- a.logger.Debugf("Resource type %q with name %q added to cache", rType.TypeName(), name)
+ if a.logger.V(2) {
+ a.logger.Infof("Resource type %q with name %q added to cache", rType.TypeName(), name)
+ }
state.cache = uErr.resource
// Set status to ACK, and clear error state. The metadata might be a
// NACK metadata because some other resources in the same response
@@ -283,7 +314,7 @@ func (a *authority) updateResourceStateAndScheduleCallbacks(rType xdsresource.Ty
// resource deletion is to be ignored, the resource is not removed from
// the cache and the corresponding OnResourceDoesNotExist() callback is
// not invoked on the watchers.
- if a.serverCfg.IgnoreResourceDeletion {
+ if a.serverCfg.ServerFeaturesIgnoreResourceDeletion() {
if !state.deletionIgnored {
state.deletionIgnored = true
a.logger.Warningf("Ignoring resource deletion for resource %q of type %q", name, rType.TypeName())
@@ -298,7 +329,8 @@ func (a *authority) updateResourceStateAndScheduleCallbacks(rType xdsresource.Ty
state.md = xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist}
for watcher := range state.watchers {
watcher := watcher
- a.serializer.Schedule(func(context.Context) { watcher.OnResourceDoesNotExist() })
+ watcherCnt.Add(1)
+ funcsToSchedule = append(funcsToSchedule, func(context.Context) { watcher.OnResourceDoesNotExist(done) })
}
}
}
@@ -426,8 +458,8 @@ func (a *authority) newConnectionError(err error) {
// Propagate the connection error from the transport layer to all watchers.
for watcher := range state.watchers {
watcher := watcher
- a.serializer.Schedule(func(context.Context) {
- watcher.OnError(xdsresource.NewErrorf(xdsresource.ErrorTypeConnection, "xds: error received from xDS stream: %v", err))
+ a.serializer.TrySchedule(func(context.Context) {
+ watcher.OnError(xdsresource.NewErrorf(xdsresource.ErrorTypeConnection, "xds: error received from xDS stream: %v", err), func() {})
})
}
}
@@ -454,7 +486,9 @@ func (a *authority) close() {
}
func (a *authority) watchResource(rType xdsresource.Type, resourceName string, watcher xdsresource.ResourceWatcher) func() {
- a.logger.Debugf("New watch for type %q, resource name %q", rType.TypeName(), resourceName)
+ if a.logger.V(2) {
+ a.logger.Infof("New watch for type %q, resource name %q", rType.TypeName(), resourceName)
+ }
a.resourcesMu.Lock()
defer a.resourcesMu.Unlock()
@@ -471,7 +505,9 @@ func (a *authority) watchResource(rType xdsresource.Type, resourceName string, w
// instruct the transport layer to send a DiscoveryRequest for the same.
state := resources[resourceName]
if state == nil {
- a.logger.Debugf("First watch for type %q, resource name %q", rType.TypeName(), resourceName)
+ if a.logger.V(2) {
+ a.logger.Infof("First watch for type %q, resource name %q", rType.TypeName(), resourceName)
+ }
state = &resourceState{
watchers: make(map[xdsresource.ResourceWatcher]bool),
md: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested},
@@ -489,7 +525,7 @@ func (a *authority) watchResource(rType xdsresource.Type, resourceName string, w
a.logger.Infof("Resource type %q with resource name %q found in cache: %s", rType.TypeName(), resourceName, state.cache.ToJSON())
}
resource := state.cache
- a.serializer.Schedule(func(context.Context) { watcher.OnUpdate(resource) })
+ a.serializer.TrySchedule(func(context.Context) { watcher.OnUpdate(resource, func() {}) })
}
return func() {
@@ -510,7 +546,9 @@ func (a *authority) watchResource(rType xdsresource.Type, resourceName string, w
// There are no more watchers for this resource, delete the state
// associated with it, and instruct the transport to send a request
// which does not include this resource name.
- a.logger.Debugf("Removing last watch for type %q, resource name %q", rType.TypeName(), resourceName)
+ if a.logger.V(2) {
+ a.logger.Infof("Removing last watch for type %q, resource name %q", rType.TypeName(), resourceName)
+ }
delete(resources, resourceName)
a.sendDiscoveryRequestLocked(rType, resources)
}
@@ -540,7 +578,7 @@ func (a *authority) handleWatchTimerExpiryLocked(rType xdsresource.Type, resourc
state.md = xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist}
for watcher := range state.watchers {
watcher := watcher
- a.serializer.Schedule(func(context.Context) { watcher.OnResourceDoesNotExist() })
+ a.serializer.TrySchedule(func(context.Context) { watcher.OnResourceDoesNotExist(func() {}) })
}
}
@@ -566,13 +604,13 @@ func (a *authority) triggerResourceNotFoundForTesting(rType xdsresource.Type, re
state.md = xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist}
for watcher := range state.watchers {
watcher := watcher
- a.serializer.Schedule(func(context.Context) { watcher.OnResourceDoesNotExist() })
+ a.serializer.TrySchedule(func(context.Context) { watcher.OnResourceDoesNotExist(func() {}) })
}
}
// sendDiscoveryRequestLocked sends a discovery request for the specified
// resource type and resource names. Even though this method does not directly
-// access the resource cache, it is important that `resourcesMu` be beld when
+// access the resource cache, it is important that `resourcesMu` be held when
// calling this method to ensure that a consistent snapshot of resource names is
// being requested.
func (a *authority) sendDiscoveryRequestLocked(rType xdsresource.Type, resources map[string]*resourceState) {
@@ -589,7 +627,7 @@ func (a *authority) reportLoad() (*load.Store, func()) {
return a.transport.ReportLoad()
}
-func (a *authority) dumpResources() ([]*v3statuspb.ClientConfig_GenericXdsConfig, error) {
+func (a *authority) dumpResources() []*v3statuspb.ClientConfig_GenericXdsConfig {
a.resourcesMu.Lock()
defer a.resourcesMu.Unlock()
@@ -619,7 +657,7 @@ func (a *authority) dumpResources() ([]*v3statuspb.ClientConfig_GenericXdsConfig
ret = append(ret, config)
}
}
- return ret, nil
+ return ret
}
func serviceStatusToProto(serviceStatus xdsresource.ServiceStatus) v3adminpb.ClientResourceStatus {
diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go
index 468c5fb31b9b1..144cb5bd7686f 100644
--- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go
+++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go
@@ -24,8 +24,6 @@ import (
"google.golang.org/grpc/internal/xds/bootstrap"
"google.golang.org/grpc/xds/internal/xdsclient/load"
"google.golang.org/grpc/xds/internal/xdsclient/xdsresource"
-
- v3statuspb "github.com/envoyproxy/go-control-plane/envoy/service/status/v3"
)
// XDSClient is a full fledged gRPC client which queries a set of discovery APIs
@@ -48,10 +46,6 @@ type XDSClient interface {
// the watcher is canceled. Callers need to handle this case.
WatchResource(rType xdsresource.Type, resourceName string, watcher xdsresource.ResourceWatcher) (cancel func())
- // DumpResources returns the status of the xDS resources. Returns a map of
- // resource type URLs to a map of resource names to resource state.
- DumpResources() (*v3statuspb.ClientStatusResponse, error)
-
ReportLoad(*bootstrap.ServerConfig) (*load.Store, func())
BootstrapConfig() *bootstrap.Config
diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_new.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_new.go
index 8dec8f34b2096..6097e86925e6f 100644
--- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_new.go
+++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_new.go
@@ -19,9 +19,7 @@
package xdsclient
import (
- "bytes"
"context"
- "encoding/json"
"fmt"
"sync"
"time"
@@ -33,44 +31,33 @@ import (
"google.golang.org/grpc/xds/internal/xdsclient/xdsresource"
)
-// New returns a new xDS client configured by the bootstrap file specified in env
-// variable GRPC_XDS_BOOTSTRAP or GRPC_XDS_BOOTSTRAP_CONFIG.
-//
-// The returned client is a reference counted singleton instance. This function
-// creates a new client only when one doesn't already exist.
-//
-// The second return value represents a close function which releases the
-// caller's reference on the returned client. The caller is expected to invoke
-// it once they are done using the client. The underlying client will be closed
-// only when all references are released, and it is safe for the caller to
-// invoke this close function multiple times.
-func New() (XDSClient, func(), error) {
- return newRefCountedWithConfig(nil)
-}
+// NameForServer represents the value to be passed as name when creating an xDS
+// client from xDS-enabled gRPC servers. This is a well-known dedicated key
+// value, and is defined in gRFC A71.
+const NameForServer = "#server"
-// NewWithConfig is similar to New, except that it uses the provided bootstrap
-// configuration to create the xDS client if and only if the bootstrap
-// environment variables are not defined.
+// New returns an xDS client configured with bootstrap configuration specified
+// by the ordered list:
+// - file name containing the configuration specified by GRPC_XDS_BOOTSTRAP
+// - actual configuration specified by GRPC_XDS_BOOTSTRAP_CONFIG
+// - fallback configuration set using bootstrap.SetFallbackBootstrapConfig
//
-// The returned client is a reference counted singleton instance. This function
-// creates a new client only when one doesn't already exist.
+// gRPC client implementations are expected to pass the channel's target URI for
+// the name field, while server implementations are expected to pass a dedicated
+// well-known value "#server", as specified in gRFC A71. The returned client is
+// a reference counted implementation shared among callers using the same name.
//
// The second return value represents a close function which releases the
// caller's reference on the returned client. The caller is expected to invoke
// it once they are done using the client. The underlying client will be closed
// only when all references are released, and it is safe for the caller to
// invoke this close function multiple times.
-//
-// # Internal Only
-//
-// This function should ONLY be used by the internal google-c2p resolver.
-// DO NOT use this elsewhere. Use New() instead.
-func NewWithConfig(config *bootstrap.Config) (XDSClient, func(), error) {
- return newRefCountedWithConfig(config)
+func New(name string) (XDSClient, func(), error) {
+ return newRefCounted(name, defaultWatchExpiryTimeout, defaultIdleAuthorityDeleteTimeout)
}
-// newWithConfig returns a new xdsClient with the given config.
-func newWithConfig(config *bootstrap.Config, watchExpiryTimeout time.Duration, idleAuthorityDeleteTimeout time.Duration) (*clientImpl, error) {
+// newClientImpl returns a new xdsClient with the given config.
+func newClientImpl(config *bootstrap.Config, watchExpiryTimeout time.Duration, idleAuthorityDeleteTimeout time.Duration) (*clientImpl, error) {
ctx, cancel := context.WithCancel(context.Background())
c := &clientImpl{
done: grpcsync.NewEvent(),
@@ -84,13 +71,14 @@ func newWithConfig(config *bootstrap.Config, watchExpiryTimeout time.Duration, i
}
c.logger = prefixLogger(c)
- c.logger.Infof("Created client to xDS management server: %s", config.XDSServer)
return c, nil
}
// OptionsForTesting contains options to configure xDS client creation for
// testing purposes only.
type OptionsForTesting struct {
+ // Name is a unique name for this xDS client.
+ Name string
// Contents contain a JSON representation of the bootstrap configuration to
// be used when creating the xDS client.
Contents []byte
@@ -114,6 +102,9 @@ type OptionsForTesting struct {
//
// This function should ONLY be used for testing purposes.
func NewForTesting(opts OptionsForTesting) (XDSClient, func(), error) {
+ if opts.Name == "" {
+ return nil, nil, fmt.Errorf("opts.Name field must be non-empty")
+ }
if opts.WatchExpiryTimeout == 0 {
opts.WatchExpiryTimeout = defaultWatchExpiryTimeout
}
@@ -121,49 +112,32 @@ func NewForTesting(opts OptionsForTesting) (XDSClient, func(), error) {
opts.AuthorityIdleTimeout = defaultIdleAuthorityDeleteTimeout
}
- // Normalize the input configuration, as this is used as the key in the map
- // of xDS clients created for testing.
- buf := bytes.Buffer{}
- err := json.Indent(&buf, opts.Contents, "", "")
- if err != nil {
- return nil, nil, fmt.Errorf("xds: error normalizing JSON: %v", err)
+ if err := bootstrap.SetFallbackBootstrapConfig(opts.Contents); err != nil {
+ return nil, nil, err
}
- opts.Contents = bytes.TrimSpace(buf.Bytes())
+ client, cancel, err := newRefCounted(opts.Name, opts.WatchExpiryTimeout, opts.AuthorityIdleTimeout)
+ return client, func() { bootstrap.UnsetFallbackBootstrapConfigForTesting(); cancel() }, err
+}
+// GetForTesting returns an xDS client created earlier using the given name.
+//
+// The second return value represents a close function which the caller is
+// expected to invoke once they are done using the client. It is safe for the
+// caller to invoke this close function multiple times.
+//
+// # Testing Only
+//
+// This function should ONLY be used for testing purposes.
+func GetForTesting(name string) (XDSClient, func(), error) {
clientsMu.Lock()
defer clientsMu.Unlock()
- var client *clientRefCounted
- closeFunc := grpcsync.OnceFunc(func() {
- clientsMu.Lock()
- defer clientsMu.Unlock()
- if client.decrRef() == 0 {
- client.close()
- delete(clients, string(opts.Contents))
- }
- })
-
- // If an xDS client exists for the given configuration, increment its
- // reference count and return it.
- if c := clients[string(opts.Contents)]; c != nil {
- c.incrRef()
- client = c
- return c, closeFunc, nil
- }
-
- // Create a new xDS client for the given configuration
- bcfg, err := bootstrap.NewConfigFromContents(opts.Contents)
- if err != nil {
- return nil, nil, fmt.Errorf("bootstrap config %s: %v", string(opts.Contents), err)
- }
- cImpl, err := newWithConfig(bcfg, opts.WatchExpiryTimeout, opts.AuthorityIdleTimeout)
- if err != nil {
- return nil, nil, fmt.Errorf("creating xDS client: %v", err)
+ c, ok := clients[name]
+ if !ok {
+ return nil, nil, fmt.Errorf("xDS client with name %q not found", name)
}
- client = &clientRefCounted{clientImpl: cImpl, refCount: 1}
- clients[string(opts.Contents)] = client
-
- return client, closeFunc, nil
+ c.incrRef()
+ return c, grpcsync.OnceFunc(func() { clientRefCountedClose(name) }), nil
}
func init() {
diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_refcounted.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_refcounted.go
new file mode 100644
index 0000000000000..1efb4de42eb2e
--- /dev/null
+++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_refcounted.go
@@ -0,0 +1,104 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package xdsclient
+
+import (
+ "fmt"
+ "sync/atomic"
+ "time"
+
+ "google.golang.org/grpc/internal/grpcsync"
+ "google.golang.org/grpc/internal/xds/bootstrap"
+)
+
+const (
+ defaultWatchExpiryTimeout = 15 * time.Second
+ defaultIdleAuthorityDeleteTimeout = 5 * time.Minute
+)
+
+var (
+ // The following functions are no-ops in the actual code, but can be
+ // overridden in tests to give them visibility into certain events.
+ xdsClientImplCreateHook = func(string) {}
+ xdsClientImplCloseHook = func(string) {}
+)
+
+func clientRefCountedClose(name string) {
+ clientsMu.Lock()
+ defer clientsMu.Unlock()
+
+ client, ok := clients[name]
+ if !ok {
+ logger.Errorf("Attempt to close a non-existent xDS client with name %s", name)
+ return
+ }
+ if client.decrRef() != 0 {
+ return
+ }
+ client.clientImpl.close()
+ xdsClientImplCloseHook(name)
+ delete(clients, name)
+
+}
+
+// newRefCounted creates a new reference counted xDS client implementation for
+// name, if one does not exist already. If an xDS client for the given name
+// exists, it gets a reference to it and returns it.
+func newRefCounted(name string, watchExpiryTimeout, idleAuthorityTimeout time.Duration) (XDSClient, func(), error) {
+ clientsMu.Lock()
+ defer clientsMu.Unlock()
+
+ if c := clients[name]; c != nil {
+ c.incrRef()
+ return c, grpcsync.OnceFunc(func() { clientRefCountedClose(name) }), nil
+ }
+
+ // Create the new client implementation.
+ config, err := bootstrap.GetConfiguration()
+ if err != nil {
+ return nil, nil, fmt.Errorf("xds: failed to get xDS bootstrap config: %v", err)
+ }
+ c, err := newClientImpl(config, watchExpiryTimeout, idleAuthorityTimeout)
+ if err != nil {
+ return nil, nil, err
+ }
+ c.logger.Infof("Created client with name %q and bootstrap configuration:\n %s", name, config)
+ client := &clientRefCounted{clientImpl: c, refCount: 1}
+ clients[name] = client
+ xdsClientImplCreateHook(name)
+
+ logger.Infof("xDS node ID: %s", config.Node().GetId())
+ return client, grpcsync.OnceFunc(func() { clientRefCountedClose(name) }), nil
+}
+
+// clientRefCounted is ref-counted, and to be shared by the xds resolver and
+// balancer implementations, across multiple ClientConns and Servers.
+type clientRefCounted struct {
+ *clientImpl
+
+ refCount int32 // accessed atomically
+}
+
+func (c *clientRefCounted) incrRef() int32 {
+ return atomic.AddInt32(&c.refCount, 1)
+}
+
+func (c *clientRefCounted) decrRef() int32 {
+ return atomic.AddInt32(&c.refCount, -1)
+}
diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl.go
index 7321250d6ab26..9f619016a08e1 100644
--- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl.go
+++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl.go
@@ -85,17 +85,17 @@ func (c *clientImpl) close() {
c.authorityMu.Unlock()
c.serializerClose()
- for _, f := range c.config.XDSServer.Cleanups {
- f()
- }
- for _, a := range c.config.Authorities {
- if a.XDSServer == nil {
- // The server for this authority is the top-level one, cleaned up above.
- continue
- }
- for _, f := range a.XDSServer.Cleanups {
+ for _, s := range c.config.XDSServers() {
+ for _, f := range s.Cleanups() {
f()
}
}
+ for _, a := range c.config.Authorities() {
+ for _, s := range a.XDSServers {
+ for _, f := range s.Cleanups() {
+ f()
+ }
+ }
+ }
c.logger.Infof("Shutdown")
}
diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_authority.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_authority.go
index 69db79ee89137..1ce20fabdf830 100644
--- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_authority.go
+++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_authority.go
@@ -45,14 +45,18 @@ func (c *clientImpl) findAuthority(n *xdsresource.Name) (*authority, func(), err
return nil, nil, errors.New("the xds-client is closed")
}
- config := c.config.XDSServer
+ config := c.config.XDSServers()[0]
if scheme == xdsresource.FederationScheme {
- cfg, ok := c.config.Authorities[authority]
+ authorities := c.config.Authorities()
+ if authorities == nil {
+ return nil, nil, fmt.Errorf("xds: failed to find authority %q", authority)
+ }
+ cfg, ok := authorities[authority]
if !ok {
return nil, nil, fmt.Errorf("xds: failed to find authority %q", authority)
}
- if cfg.XDSServer != nil {
- config = cfg.XDSServer
+ if len(cfg.XDSServers) >= 1 {
+ config = cfg.XDSServers[0]
}
}
@@ -110,7 +114,7 @@ func (c *clientImpl) newAuthorityLocked(config *bootstrap.ServerConfig) (_ *auth
serializer: c.serializer,
resourceTypeGetter: c.resourceTypes.get,
watchExpiryTimeout: c.watchExpiryTimeout,
- logger: grpclog.NewPrefixLogger(logger, authorityPrefix(c, config.ServerURI)),
+ logger: grpclog.NewPrefixLogger(logger, authorityPrefix(c, config.ServerURI())),
})
if err != nil {
return nil, fmt.Errorf("creating new authority for config %q: %v", config.String(), err)
diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_dump.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_dump.go
index 8fbc010f743d4..f4d7b0a0115c2 100644
--- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_dump.go
+++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_dump.go
@@ -22,27 +22,32 @@ import (
v3statuspb "github.com/envoyproxy/go-control-plane/envoy/service/status/v3"
)
-// DumpResources returns the status and contents of all xDS resources.
-func (c *clientImpl) DumpResources() (*v3statuspb.ClientStatusResponse, error) {
+// dumpResources returns the status and contents of all xDS resources.
+func (c *clientImpl) dumpResources() *v3statuspb.ClientConfig {
c.authorityMu.Lock()
defer c.authorityMu.Unlock()
var retCfg []*v3statuspb.ClientConfig_GenericXdsConfig
for _, a := range c.authorities {
- cfg, err := a.dumpResources()
- if err != nil {
- return nil, err
- }
- retCfg = append(retCfg, cfg...)
+ retCfg = append(retCfg, a.dumpResources()...)
+ }
+
+ return &v3statuspb.ClientConfig{
+ Node: c.config.Node(),
+ GenericXdsConfigs: retCfg,
}
+}
- return &v3statuspb.ClientStatusResponse{
- Config: []*v3statuspb.ClientConfig{
- {
- // TODO: Populate ClientScope. Need to update go-control-plane dependency.
- Node: c.config.NodeProto,
- GenericXdsConfigs: retCfg,
- },
- },
- }, nil
+// DumpResources returns the status and contents of all xDS resources.
+func DumpResources() *v3statuspb.ClientStatusResponse {
+ clientsMu.Lock()
+ defer clientsMu.Unlock()
+
+ resp := &v3statuspb.ClientStatusResponse{}
+ for key, client := range clients {
+ cfg := client.dumpResources()
+ cfg.ClientScope = key
+ resp.Config = append(resp.Config, cfg)
+ }
+ return resp
}
diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_loadreport.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_loadreport.go
index ff2f5e9d67285..b42e43a569763 100644
--- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_loadreport.go
+++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_loadreport.go
@@ -32,7 +32,7 @@ func (c *clientImpl) ReportLoad(server *bootstrap.ServerConfig) (*load.Store, fu
a, err := c.newAuthorityLocked(server)
if err != nil {
c.authorityMu.Unlock()
- c.logger.Infof("xds: failed to connect to the control plane to do load reporting for authority %q: %v", server, err)
+ c.logger.Warningf("Failed to connect to the management server to report load for authority %q: %v", server, err)
return nil, func() {}
}
// Hold the ref before starting load reporting.
diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_watchers.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_watchers.go
index 22b8eb0107c93..b9af85db63a85 100644
--- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_watchers.go
+++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_watchers.go
@@ -44,7 +44,7 @@ func (c *clientImpl) WatchResource(rType xdsresource.Type, resourceName string,
if err := c.resourceTypes.maybeRegister(rType); err != nil {
logger.Warningf("Watch registered for name %q of type %q which is already registered", rType.TypeName(), resourceName)
- c.serializer.Schedule(func(context.Context) { watcher.OnError(err) })
+ c.serializer.TrySchedule(func(context.Context) { watcher.OnError(err, func() {}) })
return func() {}
}
@@ -54,7 +54,7 @@ func (c *clientImpl) WatchResource(rType xdsresource.Type, resourceName string,
a, unref, err := c.findAuthority(n)
if err != nil {
logger.Warningf("Watch registered for name %q of type %q, authority %q is not found", rType.TypeName(), resourceName, n.Authority)
- c.serializer.Schedule(func(context.Context) { watcher.OnError(err) })
+ c.serializer.TrySchedule(func(context.Context) { watcher.OnError(err, func() {}) })
return func() {}
}
cancelF := a.watchResource(rType, n.String(), watcher)
diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/internal/internal.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/internal/internal.go
new file mode 100644
index 0000000000000..e126107441093
--- /dev/null
+++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/internal/internal.go
@@ -0,0 +1,25 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Package internal contains functionality internal to the xdsclient package.
+package internal
+
+// The following vars can be overridden by tests.
+var (
+ // NewADSStream is a function that returns a new ADS stream.
+ NewADSStream any // func(context.Context, *grpc.ClientConn) (v3adsgrpc.AggregatedDiscoveryService_StreamAggregatedResourcesClient, error)
+)
diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/load/store.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/load/store.go
index 1f266ae20185b..f1e265ee7ddf5 100644
--- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/load/store.go
+++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/load/store.go
@@ -174,6 +174,7 @@ func (ls *perClusterStore) CallStarted(locality string) {
p, _ = ls.localityRPCCount.LoadOrStore(locality, tp)
}
p.(*rpcCountData).incrInProgress()
+ p.(*rpcCountData).incrIssued()
}
// CallFinished adds one call finished record for the given locality.
@@ -248,6 +249,8 @@ type RequestData struct {
Errored uint64
// InProgress is the number of requests in flight.
InProgress uint64
+ // Issued is the total number requests that were sent.
+ Issued uint64
}
// ServerLoadData contains server load data.
@@ -296,7 +299,8 @@ func (ls *perClusterStore) stats() *Data {
succeeded := countData.loadAndClearSucceeded()
inProgress := countData.loadInProgress()
errored := countData.loadAndClearErrored()
- if succeeded == 0 && inProgress == 0 && errored == 0 {
+ issued := countData.loadAndClearIssued()
+ if succeeded == 0 && inProgress == 0 && errored == 0 && issued == 0 {
return true
}
@@ -305,6 +309,7 @@ func (ls *perClusterStore) stats() *Data {
Succeeded: succeeded,
Errored: errored,
InProgress: inProgress,
+ Issued: issued,
},
LoadStats: make(map[string]ServerLoadData),
}
@@ -339,6 +344,7 @@ type rpcCountData struct {
succeeded *uint64
errored *uint64
inProgress *uint64
+ issued *uint64
// Map from load desc to load data (sum+count). Loading data from map is
// atomic, but updating data takes a lock, which could cause contention when
@@ -353,6 +359,7 @@ func newRPCCountData() *rpcCountData {
succeeded: new(uint64),
errored: new(uint64),
inProgress: new(uint64),
+ issued: new(uint64),
}
}
@@ -384,6 +391,14 @@ func (rcd *rpcCountData) loadInProgress() uint64 {
return atomic.LoadUint64(rcd.inProgress) // InProgress count is not clear when reading.
}
+func (rcd *rpcCountData) incrIssued() {
+ atomic.AddUint64(rcd.issued, 1)
+}
+
+func (rcd *rpcCountData) loadAndClearIssued() uint64 {
+ return atomic.SwapUint64(rcd.issued, 0)
+}
+
func (rcd *rpcCountData) addServerLoad(name string, d float64) {
loads, ok := rcd.serverLoads.Load(name)
if !ok {
diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/singleton.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/singleton.go
deleted file mode 100644
index f981bfebb582c..0000000000000
--- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/singleton.go
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- *
- * Copyright 2020 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package xdsclient
-
-import (
- "fmt"
- "sync"
- "sync/atomic"
- "time"
-
- "google.golang.org/grpc/internal/envconfig"
- "google.golang.org/grpc/internal/grpcsync"
- "google.golang.org/grpc/internal/xds/bootstrap"
-)
-
-const (
- defaultWatchExpiryTimeout = 15 * time.Second
- defaultIdleAuthorityDeleteTimeout = 5 * time.Minute
-)
-
-var (
- // This is the client returned by New(). It contains one client implementation,
- // and maintains the refcount.
- singletonMu sync.Mutex
- singletonClient *clientRefCounted
-
- // The following functions are no-ops in the actual code, but can be
- // overridden in tests to give them visibility into certain events.
- singletonClientImplCreateHook = func() {}
- singletonClientImplCloseHook = func() {}
-)
-
-// To override in tests.
-var bootstrapNewConfig = bootstrap.NewConfig
-
-func clientRefCountedClose() {
- singletonMu.Lock()
- defer singletonMu.Unlock()
-
- if singletonClient.decrRef() != 0 {
- return
- }
- singletonClient.clientImpl.close()
- singletonClientImplCloseHook()
- singletonClient = nil
-}
-
-func newRefCountedWithConfig(fallbackConfig *bootstrap.Config) (XDSClient, func(), error) {
- singletonMu.Lock()
- defer singletonMu.Unlock()
-
- if singletonClient != nil {
- singletonClient.incrRef()
- return singletonClient, grpcsync.OnceFunc(clientRefCountedClose), nil
-
- }
-
- // Use fallbackConfig only if bootstrap env vars are unspecified.
- var config *bootstrap.Config
- if envconfig.XDSBootstrapFileName == "" && envconfig.XDSBootstrapFileContent == "" {
- if fallbackConfig == nil {
- return nil, nil, fmt.Errorf("xds: bootstrap env vars are unspecified and provided fallback config is nil")
- }
- config = fallbackConfig
- } else {
- var err error
- config, err = bootstrapNewConfig()
- if err != nil {
- return nil, nil, fmt.Errorf("xds: failed to read bootstrap file: %v", err)
- }
- }
-
- // Create the new client implementation.
- c, err := newWithConfig(config, defaultWatchExpiryTimeout, defaultIdleAuthorityDeleteTimeout)
- if err != nil {
- return nil, nil, err
- }
- singletonClient = &clientRefCounted{clientImpl: c, refCount: 1}
- singletonClientImplCreateHook()
-
- logger.Infof("xDS node ID: %s", config.NodeProto.GetId())
- return singletonClient, grpcsync.OnceFunc(clientRefCountedClose), nil
-}
-
-// clientRefCounted is ref-counted, and to be shared by the xds resolver and
-// balancer implementations, across multiple ClientConns and Servers.
-type clientRefCounted struct {
- *clientImpl
-
- refCount int32 // accessed atomically
-}
-
-func (c *clientRefCounted) incrRef() int32 {
- return atomic.AddInt32(&c.refCount, 1)
-}
-
-func (c *clientRefCounted) decrRef() int32 {
- return atomic.AddInt32(&c.refCount, -1)
-}
diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/internal/internal.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/internal/internal.go
new file mode 100644
index 0000000000000..9acc33cbbf8de
--- /dev/null
+++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/internal/internal.go
@@ -0,0 +1,25 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Package internal contains functionality internal to the transport package.
+package internal
+
+// The following vars can be overridden by tests.
+var (
+ // GRPCNewClient creates a new gRPC Client.
+ GRPCNewClient any // func(string, ...grpc.DialOption) (*grpc.ClientConn, error)
+)
diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/loadreport.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/loadreport.go
index 289fd62cbc75d..e47fdd9846ba6 100644
--- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/loadreport.go
+++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/loadreport.go
@@ -223,6 +223,7 @@ func (t *Transport) sendLoadStatsRequest(stream lrsStream, loads []*load.Data) e
TotalSuccessfulRequests: localityData.RequestStats.Succeeded,
TotalRequestsInProgress: localityData.RequestStats.InProgress,
TotalErrorRequests: localityData.RequestStats.Errored,
+ TotalIssuedRequests: localityData.RequestStats.Issued,
LoadMetricStats: loadMetricStats,
UpstreamEndpointStats: nil, // TODO: populate for per endpoint loads.
})
diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/transport.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/transport.go
index 421ba78074c0a..0bc0d386802d3 100644
--- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/transport.go
+++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/transport.go
@@ -24,6 +24,7 @@ import (
"errors"
"fmt"
"sync"
+ "sync/atomic"
"time"
"google.golang.org/grpc"
@@ -35,7 +36,9 @@ import (
"google.golang.org/grpc/internal/pretty"
"google.golang.org/grpc/internal/xds/bootstrap"
"google.golang.org/grpc/keepalive"
+ xdsclientinternal "google.golang.org/grpc/xds/internal/xdsclient/internal"
"google.golang.org/grpc/xds/internal/xdsclient/load"
+ transportinternal "google.golang.org/grpc/xds/internal/xdsclient/transport/internal"
"google.golang.org/grpc/xds/internal/xdsclient/xdsresource"
"google.golang.org/protobuf/types/known/anypb"
@@ -45,17 +48,23 @@ import (
statuspb "google.golang.org/genproto/googleapis/rpc/status"
)
+type adsStream = v3adsgrpc.AggregatedDiscoveryService_StreamAggregatedResourcesClient
+
+func init() {
+ transportinternal.GRPCNewClient = grpc.NewClient
+ xdsclientinternal.NewADSStream = func(ctx context.Context, cc *grpc.ClientConn) (adsStream, error) {
+ return v3adsgrpc.NewAggregatedDiscoveryServiceClient(cc).StreamAggregatedResources(ctx)
+ }
+}
+
// Any per-RPC level logs which print complete request or response messages
// should be gated at this verbosity level. Other per-RPC level logs which print
-// terse output should be at `INFO` and verbosity 2, which corresponds to using
-// the `Debugf` method on the logger.
+// terse output should be at `INFO` and verbosity 2.
const perRPCVerbosityLevel = 9
-type adsStream = v3adsgrpc.AggregatedDiscoveryService_StreamAggregatedResourcesClient
-
// Transport provides a resource-type agnostic implementation of the xDS
// transport protocol. At this layer, resource contents are supposed to be
-// opaque blobs which should be be meaningful only to the xDS data model layer
+// opaque blobs which should be meaningful only to the xDS data model layer
// which is implemented by the `xdsresource` package.
//
// Under the hood, it owns the gRPC connection to a single management server and
@@ -77,7 +86,7 @@ type Transport struct {
lrsRunnerDoneCh chan struct{} // To notify exit of LRS goroutine.
// These channels enable synchronization amongst the different goroutines
- // spawned by the transport, and between asynchorous events resulting from
+ // spawned by the transport, and between asynchronous events resulting from
// receipt of responses from the management server.
adsStreamCh chan adsStream // New ADS streams are pushed here.
adsRequestCh *buffer.Unbounded // Resource and ack requests are pushed here.
@@ -112,7 +121,10 @@ type Transport struct {
// cause the transport layer to send an ACK to the management server. A non-nil
// error is returned from this function when the data model layer believes
// otherwise, and this will cause the transport layer to send a NACK.
-type OnRecvHandlerFunc func(update ResourceUpdate) error
+//
+// The implementation is expected to invoke onDone when local processing of the
+// update is complete, i.e. it is consumed by all watchers.
+type OnRecvHandlerFunc func(update ResourceUpdate, onDone func()) error
// OnSendHandlerFunc is the implementation at the authority, which handles state
// changes for the resource watch and stop watch timers accordingly.
@@ -135,7 +147,7 @@ type ResourceUpdate struct {
type Options struct {
// ServerCfg contains all the configuration required to connect to the xDS
// management server.
- ServerCfg bootstrap.ServerConfig
+ ServerCfg *bootstrap.ServerConfig
// OnRecvHandler is the component which makes ACK/NACK decisions based on
// the received resources.
//
@@ -169,16 +181,9 @@ type Options struct {
NodeProto *v3corepb.Node
}
-// For overriding in unit tests.
-var grpcDial = grpc.Dial
-
// New creates a new Transport.
func New(opts Options) (*Transport, error) {
switch {
- case opts.ServerCfg.ServerURI == "":
- return nil, errors.New("missing server URI when creating a new transport")
- case opts.ServerCfg.CredsDialOption() == nil:
- return nil, errors.New("missing credentials when creating a new transport")
case opts.OnRecvHandler == nil:
return nil, errors.New("missing OnRecv callback handler when creating a new transport")
case opts.OnErrorHandler == nil:
@@ -197,11 +202,13 @@ func New(opts Options) (*Transport, error) {
Timeout: 20 * time.Second,
}),
}
- cc, err := grpcDial(opts.ServerCfg.ServerURI, dopts...)
+ grpcNewClient := transportinternal.GRPCNewClient.(func(string, ...grpc.DialOption) (*grpc.ClientConn, error))
+ cc, err := grpcNewClient(opts.ServerCfg.ServerURI(), dopts...)
if err != nil {
// An error from a non-blocking dial indicates something serious.
- return nil, fmt.Errorf("failed to create a transport to the management server %q: %v", opts.ServerCfg.ServerURI, err)
+ return nil, fmt.Errorf("failed to create a transport to the management server %q: %v", opts.ServerCfg.ServerURI(), err)
}
+ cc.Connect()
boff := opts.Backoff
if boff == nil {
@@ -209,7 +216,7 @@ func New(opts Options) (*Transport, error) {
}
ret := &Transport{
cc: cc,
- serverURI: opts.ServerCfg.ServerURI,
+ serverURI: opts.ServerCfg.ServerURI(),
onRecvHandler: opts.OnRecvHandler,
onErrorHandler: opts.OnErrorHandler,
onSendHandler: opts.OnSendHandler,
@@ -263,12 +270,6 @@ func (t *Transport) SendRequest(url string, resources []string) {
})
}
-func (t *Transport) newAggregatedDiscoveryServiceStream(ctx context.Context, cc *grpc.ClientConn) (adsStream, error) {
- // The transport retries the stream with an exponential backoff whenever the
- // stream breaks without ever having seen a response.
- return v3adsgrpc.NewAggregatedDiscoveryServiceClient(cc).StreamAggregatedResources(ctx)
-}
-
// ResourceSendInfo wraps the names and url of resources sent to the management
// server. This is used by the `authority` type to start/stop the watch timer
// associated with every resource in the update.
@@ -298,7 +299,9 @@ func (t *Transport) sendAggregatedDiscoveryServiceRequest(stream adsStream, send
if t.logger.V(perRPCVerbosityLevel) {
t.logger.Infof("ADS request sent: %v", pretty.ToJSON(req))
} else {
- t.logger.Debugf("ADS request sent for type %q, resources: %v, version %q, nonce %q", resourceURL, resourceNames, version, nonce)
+ if t.logger.V(2) {
+ t.logger.Infof("ADS request sent for type %q, resources: %v, version %q, nonce %q", resourceURL, resourceNames, version, nonce)
+ }
}
t.onSendHandler(&ResourceSendInfo{URL: resourceURL, ResourceNames: resourceNames})
return nil
@@ -311,8 +314,8 @@ func (t *Transport) recvAggregatedDiscoveryServiceResponse(stream adsStream) (re
}
if t.logger.V(perRPCVerbosityLevel) {
t.logger.Infof("ADS response received: %v", pretty.ToJSON(resp))
- } else {
- t.logger.Debugf("ADS response received for type %q, version %q, nonce %q", resp.GetTypeUrl(), resp.GetVersionInfo(), resp.GetNonce())
+ } else if t.logger.V(2) {
+ t.logger.Infof("ADS response received for type %q, version %q, nonce %q", resp.GetTypeUrl(), resp.GetVersionInfo(), resp.GetNonce())
}
return resp.GetResources(), resp.GetTypeUrl(), resp.GetVersionInfo(), resp.GetNonce(), nil
}
@@ -328,7 +331,8 @@ func (t *Transport) adsRunner(ctx context.Context) {
// We reset backoff state when we successfully receive at least one
// message from the server.
runStreamWithBackoff := func() error {
- stream, err := t.newAggregatedDiscoveryServiceStream(ctx, t.cc)
+ newStream := xdsclientinternal.NewADSStream.(func(context.Context, *grpc.ClientConn) (adsStream, error))
+ stream, err := newStream(ctx, t.cc)
if err != nil {
t.onErrorHandler(err)
t.logger.Warningf("Creating new ADS stream failed: %v", err)
@@ -341,7 +345,7 @@ func (t *Transport) adsRunner(ctx context.Context) {
default:
}
t.adsStreamCh <- stream
- msgReceived := t.recv(stream)
+ msgReceived := t.recv(ctx, stream)
if msgReceived {
return backoff.ErrResetBackoff
}
@@ -461,9 +465,21 @@ func (t *Transport) sendExisting(stream adsStream) (sentNodeProto bool, err erro
// recv receives xDS responses on the provided ADS stream and branches out to
// message specific handlers. Returns true if at least one message was
// successfully received.
-func (t *Transport) recv(stream adsStream) bool {
+func (t *Transport) recv(ctx context.Context, stream adsStream) bool {
+ // Initialize the flow control quota for the stream. This helps to block the
+ // next read until the previous one is consumed by all watchers.
+ fc := newADSFlowControl()
+
msgReceived := false
for {
+ // Wait for ADS stream level flow control to be available.
+ if !fc.wait(ctx) {
+ if t.logger.V(2) {
+ t.logger.Infof("ADS stream context canceled")
+ }
+ return msgReceived
+ }
+
resources, url, rVersion, nonce, err := t.recvAggregatedDiscoveryServiceResponse(stream)
if err != nil {
// Note that we do not consider it an error if the ADS stream was closed
@@ -481,12 +497,13 @@ func (t *Transport) recv(stream adsStream) bool {
}
msgReceived = true
- err = t.onRecvHandler(ResourceUpdate{
+ u := ResourceUpdate{
Resources: resources,
URL: url,
Version: rVersion,
- })
- if xdsresource.ErrType(err) == xdsresource.ErrorTypeResourceTypeUnsupported {
+ }
+ fc.setPending()
+ if err = t.onRecvHandler(u, fc.onDone); xdsresource.ErrType(err) == xdsresource.ErrorTypeResourceTypeUnsupported {
t.logger.Warningf("%v", err)
continue
}
@@ -512,7 +529,9 @@ func (t *Transport) recv(stream adsStream) bool {
stream: stream,
version: rVersion,
})
- t.logger.Debugf("Sending ACK for resource type: %q, version: %q, nonce: %q", url, rVersion, nonce)
+ if t.logger.V(2) {
+ t.logger.Infof("Sending ACK for resource type: %q, version: %q, nonce: %q", url, rVersion, nonce)
+ }
}
}
@@ -618,3 +637,68 @@ func (t *Transport) Close() {
func (t *Transport) ChannelConnectivityStateForTesting() connectivity.State {
return t.cc.GetState()
}
+
+// adsFlowControl implements ADS stream level flow control that enables the
+// transport to block the reading of the next message off of the stream until
+// the previous update is consumed by all watchers.
+//
+// The lifetime of the flow control is tied to the lifetime of the stream.
+type adsFlowControl struct {
+ logger *grpclog.PrefixLogger
+
+ // Whether the most recent update is pending consumption by all watchers.
+ pending atomic.Bool
+ // Channel used to notify when all the watchers have consumed the most
+ // recent update. Wait() blocks on reading a value from this channel.
+ readyCh chan struct{}
+}
+
+// newADSFlowControl returns a new adsFlowControl.
+func newADSFlowControl() *adsFlowControl {
+ return &adsFlowControl{readyCh: make(chan struct{}, 1)}
+}
+
+// setPending changes the internal state to indicate that there is an update
+// pending consumption by all watchers.
+func (fc *adsFlowControl) setPending() {
+ fc.pending.Store(true)
+}
+
+// wait blocks until all the watchers have consumed the most recent update and
+// returns true. If the context expires before that, it returns false.
+func (fc *adsFlowControl) wait(ctx context.Context) bool {
+ // If there is no pending update, there is no need to block.
+ if !fc.pending.Load() {
+ // If all watchers finished processing the most recent update before the
+ // `recv` goroutine made the next call to `Wait()`, there would be an
+ // entry in the readyCh channel that needs to be drained to ensure that
+ // the next call to `Wait()` doesn't unblock before it actually should.
+ select {
+ case <-fc.readyCh:
+ default:
+ }
+ return true
+ }
+
+ select {
+ case <-ctx.Done():
+ return false
+ case <-fc.readyCh:
+ return true
+ }
+}
+
+// onDone indicates that all watchers have consumed the most recent update.
+func (fc *adsFlowControl) onDone() {
+ fc.pending.Store(false)
+
+ select {
+ // Writes to the readyCh channel should not block ideally. The default
+ // branch here is to appease the paranoid mind.
+ case fc.readyCh <- struct{}{}:
+ default:
+ if fc.logger.V(2) {
+ fc.logger.Infof("ADS stream flow control readyCh is full")
+ }
+ }
+}
diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/cluster_resource_type.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/cluster_resource_type.go
index 5ac7f03122390..18d47cbc101d7 100644
--- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/cluster_resource_type.go
+++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/cluster_resource_type.go
@@ -111,7 +111,7 @@ func (c *ClusterResourceData) Raw() *anypb.Any {
// corresponding to the cluster resource being watched.
type ClusterWatcher interface {
// OnUpdate is invoked to report an update for the resource being watched.
- OnUpdate(*ClusterResourceData)
+ OnUpdate(*ClusterResourceData, OnDoneFunc)
// OnError is invoked under different error conditions including but not
// limited to the following:
@@ -121,28 +121,28 @@ type ClusterWatcher interface {
// - resource validation error
// - ADS stream failure
// - connection failure
- OnError(error)
+ OnError(error, OnDoneFunc)
// OnResourceDoesNotExist is invoked for a specific error condition where
// the requested resource is not found on the xDS management server.
- OnResourceDoesNotExist()
+ OnResourceDoesNotExist(OnDoneFunc)
}
type delegatingClusterWatcher struct {
watcher ClusterWatcher
}
-func (d *delegatingClusterWatcher) OnUpdate(data ResourceData) {
+func (d *delegatingClusterWatcher) OnUpdate(data ResourceData, onDone OnDoneFunc) {
c := data.(*ClusterResourceData)
- d.watcher.OnUpdate(c)
+ d.watcher.OnUpdate(c, onDone)
}
-func (d *delegatingClusterWatcher) OnError(err error) {
- d.watcher.OnError(err)
+func (d *delegatingClusterWatcher) OnError(err error, onDone OnDoneFunc) {
+ d.watcher.OnError(err, onDone)
}
-func (d *delegatingClusterWatcher) OnResourceDoesNotExist() {
- d.watcher.OnResourceDoesNotExist()
+func (d *delegatingClusterWatcher) OnResourceDoesNotExist(onDone OnDoneFunc) {
+ d.watcher.OnResourceDoesNotExist(onDone)
}
// WatchCluster uses xDS to discover the configuration associated with the
diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go
index 775a8aa194231..66c0ae0b2022a 100644
--- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go
+++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go
@@ -54,7 +54,7 @@ type endpointsResourceType struct {
// Decode deserializes and validates an xDS resource serialized inside the
// provided `Any` proto, as received from the xDS management server.
-func (endpointsResourceType) Decode(opts *DecodeOptions, resource *anypb.Any) (*DecodeResult, error) {
+func (endpointsResourceType) Decode(_ *DecodeOptions, resource *anypb.Any) (*DecodeResult, error) {
name, rc, err := unmarshalEndpointsResource(resource)
switch {
case name == "":
@@ -107,7 +107,7 @@ func (e *EndpointsResourceData) Raw() *anypb.Any {
// events corresponding to the endpoints resource being watched.
type EndpointsWatcher interface {
// OnUpdate is invoked to report an update for the resource being watched.
- OnUpdate(*EndpointsResourceData)
+ OnUpdate(*EndpointsResourceData, OnDoneFunc)
// OnError is invoked under different error conditions including but not
// limited to the following:
@@ -117,28 +117,28 @@ type EndpointsWatcher interface {
// - resource validation error
// - ADS stream failure
// - connection failure
- OnError(error)
+ OnError(error, OnDoneFunc)
// OnResourceDoesNotExist is invoked for a specific error condition where
// the requested resource is not found on the xDS management server.
- OnResourceDoesNotExist()
+ OnResourceDoesNotExist(OnDoneFunc)
}
type delegatingEndpointsWatcher struct {
watcher EndpointsWatcher
}
-func (d *delegatingEndpointsWatcher) OnUpdate(data ResourceData) {
+func (d *delegatingEndpointsWatcher) OnUpdate(data ResourceData, onDone OnDoneFunc) {
e := data.(*EndpointsResourceData)
- d.watcher.OnUpdate(e)
+ d.watcher.OnUpdate(e, onDone)
}
-func (d *delegatingEndpointsWatcher) OnError(err error) {
- d.watcher.OnError(err)
+func (d *delegatingEndpointsWatcher) OnError(err error, onDone OnDoneFunc) {
+ d.watcher.OnError(err, onDone)
}
-func (d *delegatingEndpointsWatcher) OnResourceDoesNotExist() {
- d.watcher.OnResourceDoesNotExist()
+func (d *delegatingEndpointsWatcher) OnResourceDoesNotExist(onDone OnDoneFunc) {
+ d.watcher.OnResourceDoesNotExist(onDone)
}
// WatchEndpoints uses xDS to discover the configuration associated with the
diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/filter_chain.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/filter_chain.go
index bef1277d220c7..196bb9f873f24 100644
--- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/filter_chain.go
+++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/filter_chain.go
@@ -536,12 +536,12 @@ func (fcm *FilterChainManager) filterChainFromProto(fc *v3listenerpb.FilterChain
if name := ts.GetName(); name != transportSocketName {
return nil, fmt.Errorf("transport_socket field has unexpected name: %s", name)
}
- any := ts.GetTypedConfig()
- if any == nil || any.TypeUrl != version.V3DownstreamTLSContextURL {
- return nil, fmt.Errorf("transport_socket field has unexpected typeURL: %s", any.TypeUrl)
+ tc := ts.GetTypedConfig()
+ if tc == nil || tc.TypeUrl != version.V3DownstreamTLSContextURL {
+ return nil, fmt.Errorf("transport_socket field has unexpected typeURL: %s", tc.TypeUrl)
}
downstreamCtx := &v3tlspb.DownstreamTlsContext{}
- if err := proto.Unmarshal(any.GetValue(), downstreamCtx); err != nil {
+ if err := proto.Unmarshal(tc.GetValue(), downstreamCtx); err != nil {
return nil, fmt.Errorf("failed to unmarshal DownstreamTlsContext in LDS response: %v", err)
}
if downstreamCtx.GetRequireSni().GetValue() {
diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/listener_resource_type.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/listener_resource_type.go
index 4337e4e063f75..80fa5e6a21ec9 100644
--- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/listener_resource_type.go
+++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/listener_resource_type.go
@@ -60,12 +60,12 @@ func securityConfigValidator(bc *bootstrap.Config, sc *SecurityConfig) error {
return nil
}
if sc.IdentityInstanceName != "" {
- if _, ok := bc.CertProviderConfigs[sc.IdentityInstanceName]; !ok {
+ if _, ok := bc.CertProviderConfigs()[sc.IdentityInstanceName]; !ok {
return fmt.Errorf("identity certificate provider instance name %q missing in bootstrap configuration", sc.IdentityInstanceName)
}
}
if sc.RootInstanceName != "" {
- if _, ok := bc.CertProviderConfigs[sc.RootInstanceName]; !ok {
+ if _, ok := bc.CertProviderConfigs()[sc.RootInstanceName]; !ok {
return fmt.Errorf("root certificate provider instance name %q missing in bootstrap configuration", sc.RootInstanceName)
}
}
@@ -144,7 +144,7 @@ func (l *ListenerResourceData) Raw() *anypb.Any {
// events corresponding to the listener resource being watched.
type ListenerWatcher interface {
// OnUpdate is invoked to report an update for the resource being watched.
- OnUpdate(*ListenerResourceData)
+ OnUpdate(*ListenerResourceData, OnDoneFunc)
// OnError is invoked under different error conditions including but not
// limited to the following:
@@ -154,28 +154,28 @@ type ListenerWatcher interface {
// - resource validation error
// - ADS stream failure
// - connection failure
- OnError(error)
+ OnError(error, OnDoneFunc)
// OnResourceDoesNotExist is invoked for a specific error condition where
// the requested resource is not found on the xDS management server.
- OnResourceDoesNotExist()
+ OnResourceDoesNotExist(OnDoneFunc)
}
type delegatingListenerWatcher struct {
watcher ListenerWatcher
}
-func (d *delegatingListenerWatcher) OnUpdate(data ResourceData) {
+func (d *delegatingListenerWatcher) OnUpdate(data ResourceData, onDone OnDoneFunc) {
l := data.(*ListenerResourceData)
- d.watcher.OnUpdate(l)
+ d.watcher.OnUpdate(l, onDone)
}
-func (d *delegatingListenerWatcher) OnError(err error) {
- d.watcher.OnError(err)
+func (d *delegatingListenerWatcher) OnError(err error, onDone OnDoneFunc) {
+ d.watcher.OnError(err, onDone)
}
-func (d *delegatingListenerWatcher) OnResourceDoesNotExist() {
- d.watcher.OnResourceDoesNotExist()
+func (d *delegatingListenerWatcher) OnResourceDoesNotExist(onDone OnDoneFunc) {
+ d.watcher.OnResourceDoesNotExist(onDone)
}
// WatchListener uses xDS to discover the configuration associated with the
diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/resource_type.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/resource_type.go
index 3b3a8e79c2b92..55cfd6fbb15b2 100644
--- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/resource_type.go
+++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/resource_type.go
@@ -52,13 +52,19 @@ type Producer interface {
WatchResource(rType Type, resourceName string, watcher ResourceWatcher) (cancel func())
}
+// OnDoneFunc is a function to be invoked by watcher implementations upon
+// completing the processing of a callback from the xDS client. Failure to
+// invoke this callback prevents the xDS client from reading further messages
+// from the xDS server.
+type OnDoneFunc func()
+
// ResourceWatcher wraps the callbacks to be invoked for different events
// corresponding to the resource being watched.
type ResourceWatcher interface {
// OnUpdate is invoked to report an update for the resource being watched.
// The ResourceData parameter needs to be type asserted to the appropriate
// type for the resource being watched.
- OnUpdate(ResourceData)
+ OnUpdate(ResourceData, OnDoneFunc)
// OnError is invoked under different error conditions including but not
// limited to the following:
@@ -68,11 +74,11 @@ type ResourceWatcher interface {
// - resource validation error
// - ADS stream failure
// - connection failure
- OnError(error)
+ OnError(error, OnDoneFunc)
// OnResourceDoesNotExist is invoked for a specific error condition where
// the requested resource is not found on the xDS management server.
- OnResourceDoesNotExist()
+ OnResourceDoesNotExist(OnDoneFunc)
}
// TODO: Once the implementation is complete, rename this interface as
diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/route_config_resource_type.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/route_config_resource_type.go
index 8ce5cb28596e7..ed32abb8333d3 100644
--- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/route_config_resource_type.go
+++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/route_config_resource_type.go
@@ -54,7 +54,7 @@ type routeConfigResourceType struct {
// Decode deserializes and validates an xDS resource serialized inside the
// provided `Any` proto, as received from the xDS management server.
-func (routeConfigResourceType) Decode(opts *DecodeOptions, resource *anypb.Any) (*DecodeResult, error) {
+func (routeConfigResourceType) Decode(_ *DecodeOptions, resource *anypb.Any) (*DecodeResult, error) {
name, rc, err := unmarshalRouteConfigResource(resource)
switch {
case name == "":
@@ -108,7 +108,7 @@ func (r *RouteConfigResourceData) Raw() *anypb.Any {
// events corresponding to the route configuration resource being watched.
type RouteConfigWatcher interface {
// OnUpdate is invoked to report an update for the resource being watched.
- OnUpdate(*RouteConfigResourceData)
+ OnUpdate(*RouteConfigResourceData, OnDoneFunc)
// OnError is invoked under different error conditions including but not
// limited to the following:
@@ -118,28 +118,28 @@ type RouteConfigWatcher interface {
// - resource validation error
// - ADS stream failure
// - connection failure
- OnError(error)
+ OnError(error, OnDoneFunc)
// OnResourceDoesNotExist is invoked for a specific error condition where
// the requested resource is not found on the xDS management server.
- OnResourceDoesNotExist()
+ OnResourceDoesNotExist(OnDoneFunc)
}
type delegatingRouteConfigWatcher struct {
watcher RouteConfigWatcher
}
-func (d *delegatingRouteConfigWatcher) OnUpdate(data ResourceData) {
+func (d *delegatingRouteConfigWatcher) OnUpdate(data ResourceData, onDone OnDoneFunc) {
rc := data.(*RouteConfigResourceData)
- d.watcher.OnUpdate(rc)
+ d.watcher.OnUpdate(rc, onDone)
}
-func (d *delegatingRouteConfigWatcher) OnError(err error) {
- d.watcher.OnError(err)
+func (d *delegatingRouteConfigWatcher) OnError(err error, onDone OnDoneFunc) {
+ d.watcher.OnError(err, onDone)
}
-func (d *delegatingRouteConfigWatcher) OnResourceDoesNotExist() {
- d.watcher.OnResourceDoesNotExist()
+func (d *delegatingRouteConfigWatcher) OnResourceDoesNotExist(onDone OnDoneFunc) {
+ d.watcher.OnResourceDoesNotExist(onDone)
}
// WatchRouteConfig uses xDS to discover the configuration associated with the
diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_cds.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_cds.go
index 8ede639abee60..ab024b57c4600 100644
--- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_cds.go
+++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_cds.go
@@ -278,7 +278,7 @@ func dnsHostNameFromCluster(cluster *v3clusterpb.Cluster) (string, error) {
// the received Cluster resource.
func securityConfigFromCluster(cluster *v3clusterpb.Cluster) (*SecurityConfig, error) {
if tsm := cluster.GetTransportSocketMatches(); len(tsm) != 0 {
- return nil, fmt.Errorf("unsupport transport_socket_matches field is non-empty: %+v", tsm)
+ return nil, fmt.Errorf("unsupported transport_socket_matches field is non-empty: %+v", tsm)
}
// The Cluster resource contains a `transport_socket` field, which contains
// a oneof `typed_config` field of type `protobuf.Any`. The any proto
@@ -290,12 +290,12 @@ func securityConfigFromCluster(cluster *v3clusterpb.Cluster) (*SecurityConfig, e
if name := ts.GetName(); name != transportSocketName {
return nil, fmt.Errorf("transport_socket field has unexpected name: %s", name)
}
- any := ts.GetTypedConfig()
- if any == nil || any.TypeUrl != version.V3UpstreamTLSContextURL {
- return nil, fmt.Errorf("transport_socket field has unexpected typeURL: %s", any.TypeUrl)
+ tc := ts.GetTypedConfig()
+ if tc == nil || tc.TypeUrl != version.V3UpstreamTLSContextURL {
+ return nil, fmt.Errorf("transport_socket field has unexpected typeURL: %s", tc.TypeUrl)
}
upstreamCtx := &v3tlspb.UpstreamTlsContext{}
- if err := proto.Unmarshal(any.GetValue(), upstreamCtx); err != nil {
+ if err := proto.Unmarshal(tc.GetValue(), upstreamCtx); err != nil {
return nil, fmt.Errorf("failed to unmarshal UpstreamTlsContext in CDS response: %v", err)
}
// The following fields from `UpstreamTlsContext` are ignored:
@@ -477,7 +477,7 @@ func securityConfigFromCommonTLSContextUsingNewFields(common *v3tlspb.CommonTlsC
case len(validationCtx.GetVerifyCertificateHash()) != 0:
return nil, fmt.Errorf("unsupported verify_certificate_hash field in CommonTlsContext message: %+v", common)
case validationCtx.GetRequireSignedCertificateTimestamp().GetValue():
- return nil, fmt.Errorf("unsupported require_sugned_ceritificate_timestamp field in CommonTlsContext message: %+v", common)
+ return nil, fmt.Errorf("unsupported require_signed_certificate_timestamp field in CommonTlsContext message: %+v", common)
case validationCtx.GetCrl() != nil:
return nil, fmt.Errorf("unsupported crl field in CommonTlsContext message: %+v", common)
case validationCtx.GetCustomValidatorConfig() != nil:
diff --git a/vendor/google.golang.org/grpc/xds/server.go b/vendor/google.golang.org/grpc/xds/server.go
index 126aff067c4ca..1fea8c8309367 100644
--- a/vendor/google.golang.org/grpc/xds/server.go
+++ b/vendor/google.golang.org/grpc/xds/server.go
@@ -43,8 +43,8 @@ const serverPrefix = "[xds-server %p] "
var (
// These new functions will be overridden in unit tests.
- newXDSClient = func() (xdsclient.XDSClient, func(), error) {
- return xdsclient.New()
+ newXDSClient = func(name string) (xdsclient.XDSClient, func(), error) {
+ return xdsclient.New(name)
}
newGRPCServer = func(opts ...grpc.ServerOption) grpcServer {
return grpc.NewServer(opts...)
@@ -95,11 +95,14 @@ func NewGRPCServer(opts ...grpc.ServerOption) (*GRPCServer, error) {
newXDSClient := newXDSClient
if s.opts.bootstrapContentsForTesting != nil {
// Bootstrap file contents may be specified as a server option for tests.
- newXDSClient = func() (xdsclient.XDSClient, func(), error) {
- return xdsclient.NewForTesting(xdsclient.OptionsForTesting{Contents: s.opts.bootstrapContentsForTesting})
+ newXDSClient = func(name string) (xdsclient.XDSClient, func(), error) {
+ return xdsclient.NewForTesting(xdsclient.OptionsForTesting{
+ Name: name,
+ Contents: s.opts.bootstrapContentsForTesting,
+ })
}
}
- xdsClient, xdsClientClose, err := newXDSClient()
+ xdsClient, xdsClientClose, err := newXDSClient(xdsclient.NameForServer)
if err != nil {
return nil, fmt.Errorf("xDS client creation failed: %v", err)
}
@@ -108,7 +111,7 @@ func NewGRPCServer(opts ...grpc.ServerOption) (*GRPCServer, error) {
// Listener resource name template is mandatory on the server side.
cfg := xdsClient.BootstrapConfig()
- if cfg.ServerListenerResourceNameTemplate == "" {
+ if cfg.ServerListenerResourceNameTemplate() == "" {
xdsClientClose()
return nil, errors.New("missing server_listener_resource_name_template in the bootstrap configuration")
}
@@ -191,7 +194,7 @@ func (s *GRPCServer) Serve(lis net.Listener) error {
// string, it will be replaced with the server's listening "IP:port" (e.g.,
// "0.0.0.0:8080", "[::]:8080").
cfg := s.xdsC.BootstrapConfig()
- name := bootstrap.PopulateResourceTemplate(cfg.ServerListenerResourceNameTemplate, lis.Addr().String())
+ name := bootstrap.PopulateResourceTemplate(cfg.ServerListenerResourceNameTemplate(), lis.Addr().String())
// Create a listenerWrapper which handles all functionality required by
// this particular instance of Serve().
diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go
index bb2966e3b4c69..8f9e592f87012 100644
--- a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go
+++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go
@@ -351,7 +351,7 @@ func (d decoder) unmarshalScalar(fd protoreflect.FieldDescriptor) (protoreflect.
panic(fmt.Sprintf("unmarshalScalar: invalid scalar kind %v", kind))
}
- return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString())
+ return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v field %v: %v", kind, fd.JSONName(), tok.RawString())
}
func unmarshalInt(tok json.Token, bitSize int) (protoreflect.Value, bool) {
diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go
index 29846df222c38..0e72d85378b31 100644
--- a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go
+++ b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go
@@ -216,9 +216,7 @@ func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, proto
}
v := m.Get(fd)
- isProto2Scalar := fd.Syntax() == protoreflect.Proto2 && fd.Default().IsValid()
- isSingularMessage := fd.Cardinality() != protoreflect.Repeated && fd.Message() != nil
- if isProto2Scalar || isSingularMessage {
+ if fd.HasPresence() {
if m.skipNull {
continue
}
diff --git a/vendor/google.golang.org/protobuf/internal/descopts/options.go b/vendor/google.golang.org/protobuf/internal/descopts/options.go
index 8401be8c84fa1..024ffebd3ddef 100644
--- a/vendor/google.golang.org/protobuf/internal/descopts/options.go
+++ b/vendor/google.golang.org/protobuf/internal/descopts/options.go
@@ -9,7 +9,7 @@
// dependency on the descriptor proto package).
package descopts
-import pref "google.golang.org/protobuf/reflect/protoreflect"
+import "google.golang.org/protobuf/reflect/protoreflect"
// These variables are set by the init function in descriptor.pb.go via logic
// in internal/filetype. In other words, so long as the descriptor proto package
@@ -17,13 +17,13 @@ import pref "google.golang.org/protobuf/reflect/protoreflect"
//
// Each variable is populated with a nil pointer to the options struct.
var (
- File pref.ProtoMessage
- Enum pref.ProtoMessage
- EnumValue pref.ProtoMessage
- Message pref.ProtoMessage
- Field pref.ProtoMessage
- Oneof pref.ProtoMessage
- ExtensionRange pref.ProtoMessage
- Service pref.ProtoMessage
- Method pref.ProtoMessage
+ File protoreflect.ProtoMessage
+ Enum protoreflect.ProtoMessage
+ EnumValue protoreflect.ProtoMessage
+ Message protoreflect.ProtoMessage
+ Field protoreflect.ProtoMessage
+ Oneof protoreflect.ProtoMessage
+ ExtensionRange protoreflect.ProtoMessage
+ Service protoreflect.ProtoMessage
+ Method protoreflect.ProtoMessage
)
diff --git a/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go
index 029a6a12d7423..08dad7692c64b 100644
--- a/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go
+++ b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go
@@ -5,7 +5,7 @@
// Package editionssupport defines constants for editions that are supported.
package editionssupport
-import descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+import "google.golang.org/protobuf/types/descriptorpb"
const (
Minimum = descriptorpb.Edition_EDITION_PROTO2
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
index df53ff40b25ae..fa790e0ff1968 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
@@ -258,6 +258,7 @@ type (
StringName stringName
IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto
IsWeak bool // promoted from google.protobuf.FieldOptions
+ IsLazy bool // promoted from google.protobuf.FieldOptions
Default defaultValue
ContainingOneof protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields
Enum protoreflect.EnumDescriptor
@@ -351,6 +352,7 @@ func (fd *Field) IsPacked() bool {
}
func (fd *Field) IsExtension() bool { return false }
func (fd *Field) IsWeak() bool { return fd.L1.IsWeak }
+func (fd *Field) IsLazy() bool { return fd.L1.IsLazy }
func (fd *Field) IsList() bool { return fd.Cardinality() == protoreflect.Repeated && !fd.IsMap() }
func (fd *Field) IsMap() bool { return fd.Message() != nil && fd.Message().IsMapEntry() }
func (fd *Field) MapKey() protoreflect.FieldDescriptor {
@@ -425,6 +427,7 @@ type (
Extendee protoreflect.MessageDescriptor
Cardinality protoreflect.Cardinality
Kind protoreflect.Kind
+ IsLazy bool
EditionFeatures EditionFeatures
}
ExtensionL2 struct {
@@ -465,6 +468,7 @@ func (xd *Extension) IsPacked() bool {
}
func (xd *Extension) IsExtension() bool { return true }
func (xd *Extension) IsWeak() bool { return false }
+func (xd *Extension) IsLazy() bool { return xd.L1.IsLazy }
func (xd *Extension) IsList() bool { return xd.Cardinality() == protoreflect.Repeated }
func (xd *Extension) IsMap() bool { return false }
func (xd *Extension) MapKey() protoreflect.FieldDescriptor { return nil }
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
index 8a57d60b08c13..d2f549497eb7d 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
@@ -495,6 +495,8 @@ func (xd *Extension) unmarshalOptions(b []byte) {
switch num {
case genid.FieldOptions_Packed_field_number:
xd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v)
+ case genid.FieldOptions_Lazy_field_number:
+ xd.L1.IsLazy = protowire.DecodeBool(v)
}
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
index e56c91a8dbe0d..67a51b327c5c2 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
@@ -504,6 +504,8 @@ func (fd *Field) unmarshalOptions(b []byte) {
fd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v)
case genid.FieldOptions_Weak_field_number:
fd.L1.IsWeak = protowire.DecodeBool(v)
+ case genid.FieldOptions_Lazy_field_number:
+ fd.L1.IsLazy = protowire.DecodeBool(v)
case FieldOptions_EnforceUTF8:
fd.L1.EditionFeatures.IsUTF8Validated = protowire.DecodeBool(v)
}
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
index 11f5f356b660b..fd4d0c83d2575 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
@@ -68,7 +68,7 @@ func unmarshalFeatureSet(b []byte, parent EditionFeatures) EditionFeatures {
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
- case genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number:
+ case genid.FeatureSet_Go_ext_number:
parent = unmarshalGoFeature(v, parent)
}
}
diff --git a/vendor/google.golang.org/protobuf/internal/genid/doc.go b/vendor/google.golang.org/protobuf/internal/genid/doc.go
index 45ccd01211ce4..d9b9d916a20e2 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/doc.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/doc.go
@@ -6,6 +6,6 @@
// and the well-known types.
package genid
-import protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+import "google.golang.org/protobuf/reflect/protoreflect"
const GoogleProtobuf_package protoreflect.FullName = "google.protobuf"
diff --git a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go
index 9a652a2b42421..7f67cbb6e97e5 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go
@@ -12,20 +12,25 @@ import (
const File_google_protobuf_go_features_proto = "google/protobuf/go_features.proto"
-// Names for google.protobuf.GoFeatures.
+// Names for pb.GoFeatures.
const (
GoFeatures_message_name protoreflect.Name = "GoFeatures"
- GoFeatures_message_fullname protoreflect.FullName = "google.protobuf.GoFeatures"
+ GoFeatures_message_fullname protoreflect.FullName = "pb.GoFeatures"
)
-// Field names for google.protobuf.GoFeatures.
+// Field names for pb.GoFeatures.
const (
GoFeatures_LegacyUnmarshalJsonEnum_field_name protoreflect.Name = "legacy_unmarshal_json_enum"
- GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "google.protobuf.GoFeatures.legacy_unmarshal_json_enum"
+ GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "pb.GoFeatures.legacy_unmarshal_json_enum"
)
-// Field numbers for google.protobuf.GoFeatures.
+// Field numbers for pb.GoFeatures.
const (
GoFeatures_LegacyUnmarshalJsonEnum_field_number protoreflect.FieldNumber = 1
)
+
+// Extension numbers
+const (
+ FeatureSet_Go_ext_number protoreflect.FieldNumber = 1002
+)
diff --git a/vendor/google.golang.org/protobuf/internal/genid/map_entry.go b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go
index 8f9ea02ff2a4c..bef5a25fbbf02 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/map_entry.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go
@@ -4,7 +4,7 @@
package genid
-import protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+import "google.golang.org/protobuf/reflect/protoreflect"
// Generic field names and numbers for synthetic map entry messages.
const (
diff --git a/vendor/google.golang.org/protobuf/internal/genid/wrappers.go b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go
index 429384b85b02d..9404270de0ba3 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/wrappers.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go
@@ -4,7 +4,7 @@
package genid
-import protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+import "google.golang.org/protobuf/reflect/protoreflect"
// Generic field name and number for messages in wrappers.proto.
const (
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go
index 4bb0a7a20ce22..0d5b546e0eed4 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go
@@ -67,7 +67,6 @@ type lazyExtensionValue struct {
xi *extensionFieldInfo
value protoreflect.Value
b []byte
- fn func() protoreflect.Value
}
type ExtensionField struct {
@@ -158,10 +157,9 @@ func (f *ExtensionField) lazyInit() {
}
f.lazy.value = val
} else {
- f.lazy.value = f.lazy.fn()
+ panic("No support for lazy fns for ExtensionField")
}
f.lazy.xi = nil
- f.lazy.fn = nil
f.lazy.b = nil
atomic.StoreUint32(&f.lazy.atomicOnce, 1)
}
@@ -174,13 +172,6 @@ func (f *ExtensionField) Set(t protoreflect.ExtensionType, v protoreflect.Value)
f.lazy = nil
}
-// SetLazy sets the type and a value that is to be lazily evaluated upon first use.
-// This must not be called concurrently.
-func (f *ExtensionField) SetLazy(t protoreflect.ExtensionType, fn func() protoreflect.Value) {
- f.typ = t
- f.lazy = &lazyExtensionValue{fn: fn}
-}
-
// Value returns the value of the extension field.
// This may be called concurrently.
func (f *ExtensionField) Value() protoreflect.Value {
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go
index 78ee47e44b92b..7c1f66c8c1956 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go
@@ -65,6 +65,9 @@ func (mi *MessageInfo) initOneofFieldCoders(od protoreflect.OneofDescriptor, si
if err != nil {
return out, err
}
+ if cf.funcs.isInit == nil {
+ out.initialized = true
+ }
vi.Set(vw)
return out, nil
}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go
index 6b2fdbb739a23..78be9df3420de 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go
@@ -189,6 +189,9 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) {
if mi.methods.Merge == nil {
mi.methods.Merge = mi.merge
}
+ if mi.methods.Equal == nil {
+ mi.methods.Equal = equal
+ }
}
// getUnknownBytes returns a *[]byte for the unknown fields.
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go
deleted file mode 100644
index 145c577bd6b24..0000000000000
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go
+++ /dev/null
@@ -1,210 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build purego || appengine
-// +build purego appengine
-
-package impl
-
-import (
- "reflect"
-
- "google.golang.org/protobuf/encoding/protowire"
-)
-
-func sizeEnum(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
- v := p.v.Elem().Int()
- return f.tagsize + protowire.SizeVarint(uint64(v))
-}
-
-func appendEnum(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
- v := p.v.Elem().Int()
- b = protowire.AppendVarint(b, f.wiretag)
- b = protowire.AppendVarint(b, uint64(v))
- return b, nil
-}
-
-func consumeEnum(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
- if wtyp != protowire.VarintType {
- return out, errUnknown
- }
- v, n := protowire.ConsumeVarint(b)
- if n < 0 {
- return out, errDecode
- }
- p.v.Elem().SetInt(int64(v))
- out.n = n
- return out, nil
-}
-
-func mergeEnum(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
- dst.v.Elem().Set(src.v.Elem())
-}
-
-var coderEnum = pointerCoderFuncs{
- size: sizeEnum,
- marshal: appendEnum,
- unmarshal: consumeEnum,
- merge: mergeEnum,
-}
-
-func sizeEnumNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
- if p.v.Elem().Int() == 0 {
- return 0
- }
- return sizeEnum(p, f, opts)
-}
-
-func appendEnumNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
- if p.v.Elem().Int() == 0 {
- return b, nil
- }
- return appendEnum(b, p, f, opts)
-}
-
-func mergeEnumNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
- if src.v.Elem().Int() != 0 {
- dst.v.Elem().Set(src.v.Elem())
- }
-}
-
-var coderEnumNoZero = pointerCoderFuncs{
- size: sizeEnumNoZero,
- marshal: appendEnumNoZero,
- unmarshal: consumeEnum,
- merge: mergeEnumNoZero,
-}
-
-func sizeEnumPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
- return sizeEnum(pointer{p.v.Elem()}, f, opts)
-}
-
-func appendEnumPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
- return appendEnum(b, pointer{p.v.Elem()}, f, opts)
-}
-
-func consumeEnumPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
- if wtyp != protowire.VarintType {
- return out, errUnknown
- }
- if p.v.Elem().IsNil() {
- p.v.Elem().Set(reflect.New(p.v.Elem().Type().Elem()))
- }
- return consumeEnum(b, pointer{p.v.Elem()}, wtyp, f, opts)
-}
-
-func mergeEnumPtr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
- if !src.v.Elem().IsNil() {
- v := reflect.New(dst.v.Type().Elem().Elem())
- v.Elem().Set(src.v.Elem().Elem())
- dst.v.Elem().Set(v)
- }
-}
-
-var coderEnumPtr = pointerCoderFuncs{
- size: sizeEnumPtr,
- marshal: appendEnumPtr,
- unmarshal: consumeEnumPtr,
- merge: mergeEnumPtr,
-}
-
-func sizeEnumSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
- s := p.v.Elem()
- for i, llen := 0, s.Len(); i < llen; i++ {
- size += protowire.SizeVarint(uint64(s.Index(i).Int())) + f.tagsize
- }
- return size
-}
-
-func appendEnumSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
- s := p.v.Elem()
- for i, llen := 0, s.Len(); i < llen; i++ {
- b = protowire.AppendVarint(b, f.wiretag)
- b = protowire.AppendVarint(b, uint64(s.Index(i).Int()))
- }
- return b, nil
-}
-
-func consumeEnumSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
- s := p.v.Elem()
- if wtyp == protowire.BytesType {
- b, n := protowire.ConsumeBytes(b)
- if n < 0 {
- return out, errDecode
- }
- for len(b) > 0 {
- v, n := protowire.ConsumeVarint(b)
- if n < 0 {
- return out, errDecode
- }
- rv := reflect.New(s.Type().Elem()).Elem()
- rv.SetInt(int64(v))
- s.Set(reflect.Append(s, rv))
- b = b[n:]
- }
- out.n = n
- return out, nil
- }
- if wtyp != protowire.VarintType {
- return out, errUnknown
- }
- v, n := protowire.ConsumeVarint(b)
- if n < 0 {
- return out, errDecode
- }
- rv := reflect.New(s.Type().Elem()).Elem()
- rv.SetInt(int64(v))
- s.Set(reflect.Append(s, rv))
- out.n = n
- return out, nil
-}
-
-func mergeEnumSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
- dst.v.Elem().Set(reflect.AppendSlice(dst.v.Elem(), src.v.Elem()))
-}
-
-var coderEnumSlice = pointerCoderFuncs{
- size: sizeEnumSlice,
- marshal: appendEnumSlice,
- unmarshal: consumeEnumSlice,
- merge: mergeEnumSlice,
-}
-
-func sizeEnumPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
- s := p.v.Elem()
- llen := s.Len()
- if llen == 0 {
- return 0
- }
- n := 0
- for i := 0; i < llen; i++ {
- n += protowire.SizeVarint(uint64(s.Index(i).Int()))
- }
- return f.tagsize + protowire.SizeBytes(n)
-}
-
-func appendEnumPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
- s := p.v.Elem()
- llen := s.Len()
- if llen == 0 {
- return b, nil
- }
- b = protowire.AppendVarint(b, f.wiretag)
- n := 0
- for i := 0; i < llen; i++ {
- n += protowire.SizeVarint(uint64(s.Index(i).Int()))
- }
- b = protowire.AppendVarint(b, uint64(n))
- for i := 0; i < llen; i++ {
- b = protowire.AppendVarint(b, uint64(s.Index(i).Int()))
- }
- return b, nil
-}
-
-var coderEnumPackedSlice = pointerCoderFuncs{
- size: sizeEnumPackedSlice,
- marshal: appendEnumPackedSlice,
- unmarshal: consumeEnumSlice,
- merge: mergeEnumSlice,
-}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go
index 757642e23c9ed..077712c2c5a3a 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build !purego && !appengine
-// +build !purego,!appengine
-
package impl
// When using unsafe pointers, we can just treat enum values as int32s.
diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go
index e06ece55a26c1..f72ddd882f324 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/convert.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go
@@ -322,7 +322,7 @@ func (c *stringConverter) PBValueOf(v reflect.Value) protoreflect.Value {
return protoreflect.ValueOfString(v.Convert(stringType).String())
}
func (c *stringConverter) GoValueOf(v protoreflect.Value) reflect.Value {
- // pref.Value.String never panics, so we go through an interface
+ // protoreflect.Value.String never panics, so we go through an interface
// conversion here to check the type.
s := v.Interface().(string)
if c.goType.Kind() == reflect.Slice && s == "" {
diff --git a/vendor/google.golang.org/protobuf/internal/impl/encode.go b/vendor/google.golang.org/protobuf/internal/impl/encode.go
index febd212247248..6254f5de41f5d 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/encode.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/encode.go
@@ -10,7 +10,7 @@ import (
"sync/atomic"
"google.golang.org/protobuf/internal/flags"
- proto "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/proto"
piface "google.golang.org/protobuf/runtime/protoiface"
)
diff --git a/vendor/google.golang.org/protobuf/internal/impl/equal.go b/vendor/google.golang.org/protobuf/internal/impl/equal.go
new file mode 100644
index 0000000000000..9f6c32a7d8cdf
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/equal.go
@@ -0,0 +1,224 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "bytes"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/runtime/protoiface"
+)
+
+func equal(in protoiface.EqualInput) protoiface.EqualOutput {
+ return protoiface.EqualOutput{Equal: equalMessage(in.MessageA, in.MessageB)}
+}
+
+// equalMessage is a fast-path variant of protoreflect.equalMessage.
+// It takes advantage of the internal messageState type to avoid
+// unnecessary allocations, type assertions.
+func equalMessage(mx, my protoreflect.Message) bool {
+ if mx == nil || my == nil {
+ return mx == my
+ }
+ if mx.Descriptor() != my.Descriptor() {
+ return false
+ }
+
+ msx, ok := mx.(*messageState)
+ if !ok {
+ return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my))
+ }
+ msy, ok := my.(*messageState)
+ if !ok {
+ return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my))
+ }
+
+ mi := msx.messageInfo()
+ miy := msy.messageInfo()
+ if mi != miy {
+ return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my))
+ }
+ mi.init()
+ // Compares regular fields
+ // Modified Message.Range code that compares two messages of the same type
+ // while going over the fields.
+ for _, ri := range mi.rangeInfos {
+ var fd protoreflect.FieldDescriptor
+ var vx, vy protoreflect.Value
+
+ switch ri := ri.(type) {
+ case *fieldInfo:
+ hx := ri.has(msx.pointer())
+ hy := ri.has(msy.pointer())
+ if hx != hy {
+ return false
+ }
+ if !hx {
+ continue
+ }
+ fd = ri.fieldDesc
+ vx = ri.get(msx.pointer())
+ vy = ri.get(msy.pointer())
+ case *oneofInfo:
+ fnx := ri.which(msx.pointer())
+ fny := ri.which(msy.pointer())
+ if fnx != fny {
+ return false
+ }
+ if fnx <= 0 {
+ continue
+ }
+ fi := mi.fields[fnx]
+ fd = fi.fieldDesc
+ vx = fi.get(msx.pointer())
+ vy = fi.get(msy.pointer())
+ }
+
+ if !equalValue(fd, vx, vy) {
+ return false
+ }
+ }
+
+ // Compare extensions.
+ // This is more complicated because mx or my could have empty/nil extension maps,
+ // however some populated extension map values are equal to nil extension maps.
+ emx := mi.extensionMap(msx.pointer())
+ emy := mi.extensionMap(msy.pointer())
+ if emx != nil {
+ for k, x := range *emx {
+ xd := x.Type().TypeDescriptor()
+ xv := x.Value()
+ var y ExtensionField
+ ok := false
+ if emy != nil {
+ y, ok = (*emy)[k]
+ }
+ // We need to treat empty lists as equal to nil values
+ if emy == nil || !ok {
+ if xd.IsList() && xv.List().Len() == 0 {
+ continue
+ }
+ return false
+ }
+
+ if !equalValue(xd, xv, y.Value()) {
+ return false
+ }
+ }
+ }
+ if emy != nil {
+ // emy may have extensions emx does not have, need to check them as well
+ for k, y := range *emy {
+ if emx != nil {
+ // emx has the field, so we already checked it
+ if _, ok := (*emx)[k]; ok {
+ continue
+ }
+ }
+ // Empty lists are equal to nil
+ if y.Type().TypeDescriptor().IsList() && y.Value().List().Len() == 0 {
+ continue
+ }
+
+ // Cant be equal if the extension is populated
+ return false
+ }
+ }
+
+ return equalUnknown(mx.GetUnknown(), my.GetUnknown())
+}
+
+func equalValue(fd protoreflect.FieldDescriptor, vx, vy protoreflect.Value) bool {
+ // slow path
+ if fd.Kind() != protoreflect.MessageKind {
+ return vx.Equal(vy)
+ }
+
+ // fast path special cases
+ if fd.IsMap() {
+ if fd.MapValue().Kind() == protoreflect.MessageKind {
+ return equalMessageMap(vx.Map(), vy.Map())
+ }
+ return vx.Equal(vy)
+ }
+
+ if fd.IsList() {
+ return equalMessageList(vx.List(), vy.List())
+ }
+
+ return equalMessage(vx.Message(), vy.Message())
+}
+
+// Mostly copied from protoreflect.equalMap.
+// This variant only works for messages as map types.
+// All other map types should be handled via Value.Equal.
+func equalMessageMap(mx, my protoreflect.Map) bool {
+ if mx.Len() != my.Len() {
+ return false
+ }
+ equal := true
+ mx.Range(func(k protoreflect.MapKey, vx protoreflect.Value) bool {
+ if !my.Has(k) {
+ equal = false
+ return false
+ }
+ vy := my.Get(k)
+ equal = equalMessage(vx.Message(), vy.Message())
+ return equal
+ })
+ return equal
+}
+
+// Mostly copied from protoreflect.equalList.
+// The only change is the usage of equalImpl instead of protoreflect.equalValue.
+func equalMessageList(lx, ly protoreflect.List) bool {
+ if lx.Len() != ly.Len() {
+ return false
+ }
+ for i := 0; i < lx.Len(); i++ {
+ // We only operate on messages here since equalImpl will not call us in any other case.
+ if !equalMessage(lx.Get(i).Message(), ly.Get(i).Message()) {
+ return false
+ }
+ }
+ return true
+}
+
+// equalUnknown compares unknown fields by direct comparison on the raw bytes
+// of each individual field number.
+// Copied from protoreflect.equalUnknown.
+func equalUnknown(x, y protoreflect.RawFields) bool {
+ if len(x) != len(y) {
+ return false
+ }
+ if bytes.Equal([]byte(x), []byte(y)) {
+ return true
+ }
+
+ mx := make(map[protoreflect.FieldNumber]protoreflect.RawFields)
+ my := make(map[protoreflect.FieldNumber]protoreflect.RawFields)
+ for len(x) > 0 {
+ fnum, _, n := protowire.ConsumeField(x)
+ mx[fnum] = append(mx[fnum], x[:n]...)
+ x = x[n:]
+ }
+ for len(y) > 0 {
+ fnum, _, n := protowire.ConsumeField(y)
+ my[fnum] = append(my[fnum], y[:n]...)
+ y = y[n:]
+ }
+ if len(mx) != len(my) {
+ return false
+ }
+
+ for k, v1 := range mx {
+ if v2, ok := my[k]; !ok || !bytes.Equal([]byte(v1), []byte(v2)) {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go
index 6e8677ee633f9..b6849d66927d2 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go
@@ -160,6 +160,7 @@ func (x placeholderExtension) HasPresence() bool
func (x placeholderExtension) HasOptionalKeyword() bool { return false }
func (x placeholderExtension) IsExtension() bool { return true }
func (x placeholderExtension) IsWeak() bool { return false }
+func (x placeholderExtension) IsLazy() bool { return false }
func (x placeholderExtension) IsPacked() bool { return false }
func (x placeholderExtension) IsList() bool { return false }
func (x placeholderExtension) IsMap() bool { return false }
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go
index 019399d454d32..741b5ed29cf84 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/message.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/message.go
@@ -30,8 +30,8 @@ type MessageInfo struct {
// Desc is the underlying message descriptor type and must be populated.
Desc protoreflect.MessageDescriptor
- // Exporter must be provided in a purego environment in order to provide
- // access to unexported fields.
+ // Deprecated: Exporter will be removed the next time we bump
+ // protoimpl.GenVersion. See https://github.com/golang/protobuf/issues/1640
Exporter exporter
// OneofWrappers is list of pointers to oneof wrapper struct types.
diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go
deleted file mode 100644
index da685e8a29d96..0000000000000
--- a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go
+++ /dev/null
@@ -1,215 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build purego || appengine
-// +build purego appengine
-
-package impl
-
-import (
- "fmt"
- "reflect"
- "sync"
-)
-
-const UnsafeEnabled = false
-
-// Pointer is an opaque pointer type.
-type Pointer any
-
-// offset represents the offset to a struct field, accessible from a pointer.
-// The offset is the field index into a struct.
-type offset struct {
- index int
- export exporter
-}
-
-// offsetOf returns a field offset for the struct field.
-func offsetOf(f reflect.StructField, x exporter) offset {
- if len(f.Index) != 1 {
- panic("embedded structs are not supported")
- }
- if f.PkgPath == "" {
- return offset{index: f.Index[0]} // field is already exported
- }
- if x == nil {
- panic("exporter must be provided for unexported field")
- }
- return offset{index: f.Index[0], export: x}
-}
-
-// IsValid reports whether the offset is valid.
-func (f offset) IsValid() bool { return f.index >= 0 }
-
-// invalidOffset is an invalid field offset.
-var invalidOffset = offset{index: -1}
-
-// zeroOffset is a noop when calling pointer.Apply.
-var zeroOffset = offset{index: 0}
-
-// pointer is an abstract representation of a pointer to a struct or field.
-type pointer struct{ v reflect.Value }
-
-// pointerOf returns p as a pointer.
-func pointerOf(p Pointer) pointer {
- return pointerOfIface(p)
-}
-
-// pointerOfValue returns v as a pointer.
-func pointerOfValue(v reflect.Value) pointer {
- return pointer{v: v}
-}
-
-// pointerOfIface returns the pointer portion of an interface.
-func pointerOfIface(v any) pointer {
- return pointer{v: reflect.ValueOf(v)}
-}
-
-// IsNil reports whether the pointer is nil.
-func (p pointer) IsNil() bool {
- return p.v.IsNil()
-}
-
-// Apply adds an offset to the pointer to derive a new pointer
-// to a specified field. The current pointer must be pointing at a struct.
-func (p pointer) Apply(f offset) pointer {
- if f.export != nil {
- if v := reflect.ValueOf(f.export(p.v.Interface(), f.index)); v.IsValid() {
- return pointer{v: v}
- }
- }
- return pointer{v: p.v.Elem().Field(f.index).Addr()}
-}
-
-// AsValueOf treats p as a pointer to an object of type t and returns the value.
-// It is equivalent to reflect.ValueOf(p.AsIfaceOf(t))
-func (p pointer) AsValueOf(t reflect.Type) reflect.Value {
- if got := p.v.Type().Elem(); got != t {
- panic(fmt.Sprintf("invalid type: got %v, want %v", got, t))
- }
- return p.v
-}
-
-// AsIfaceOf treats p as a pointer to an object of type t and returns the value.
-// It is equivalent to p.AsValueOf(t).Interface()
-func (p pointer) AsIfaceOf(t reflect.Type) any {
- return p.AsValueOf(t).Interface()
-}
-
-func (p pointer) Bool() *bool { return p.v.Interface().(*bool) }
-func (p pointer) BoolPtr() **bool { return p.v.Interface().(**bool) }
-func (p pointer) BoolSlice() *[]bool { return p.v.Interface().(*[]bool) }
-func (p pointer) Int32() *int32 { return p.v.Interface().(*int32) }
-func (p pointer) Int32Ptr() **int32 { return p.v.Interface().(**int32) }
-func (p pointer) Int32Slice() *[]int32 { return p.v.Interface().(*[]int32) }
-func (p pointer) Int64() *int64 { return p.v.Interface().(*int64) }
-func (p pointer) Int64Ptr() **int64 { return p.v.Interface().(**int64) }
-func (p pointer) Int64Slice() *[]int64 { return p.v.Interface().(*[]int64) }
-func (p pointer) Uint32() *uint32 { return p.v.Interface().(*uint32) }
-func (p pointer) Uint32Ptr() **uint32 { return p.v.Interface().(**uint32) }
-func (p pointer) Uint32Slice() *[]uint32 { return p.v.Interface().(*[]uint32) }
-func (p pointer) Uint64() *uint64 { return p.v.Interface().(*uint64) }
-func (p pointer) Uint64Ptr() **uint64 { return p.v.Interface().(**uint64) }
-func (p pointer) Uint64Slice() *[]uint64 { return p.v.Interface().(*[]uint64) }
-func (p pointer) Float32() *float32 { return p.v.Interface().(*float32) }
-func (p pointer) Float32Ptr() **float32 { return p.v.Interface().(**float32) }
-func (p pointer) Float32Slice() *[]float32 { return p.v.Interface().(*[]float32) }
-func (p pointer) Float64() *float64 { return p.v.Interface().(*float64) }
-func (p pointer) Float64Ptr() **float64 { return p.v.Interface().(**float64) }
-func (p pointer) Float64Slice() *[]float64 { return p.v.Interface().(*[]float64) }
-func (p pointer) String() *string { return p.v.Interface().(*string) }
-func (p pointer) StringPtr() **string { return p.v.Interface().(**string) }
-func (p pointer) StringSlice() *[]string { return p.v.Interface().(*[]string) }
-func (p pointer) Bytes() *[]byte { return p.v.Interface().(*[]byte) }
-func (p pointer) BytesPtr() **[]byte { return p.v.Interface().(**[]byte) }
-func (p pointer) BytesSlice() *[][]byte { return p.v.Interface().(*[][]byte) }
-func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.v.Interface().(*WeakFields)) }
-func (p pointer) Extensions() *map[int32]ExtensionField {
- return p.v.Interface().(*map[int32]ExtensionField)
-}
-
-func (p pointer) Elem() pointer {
- return pointer{v: p.v.Elem()}
-}
-
-// PointerSlice copies []*T from p as a new []pointer.
-// This behavior differs from the implementation in pointer_unsafe.go.
-func (p pointer) PointerSlice() []pointer {
- // TODO: reconsider this
- if p.v.IsNil() {
- return nil
- }
- n := p.v.Elem().Len()
- s := make([]pointer, n)
- for i := 0; i < n; i++ {
- s[i] = pointer{v: p.v.Elem().Index(i)}
- }
- return s
-}
-
-// AppendPointerSlice appends v to p, which must be a []*T.
-func (p pointer) AppendPointerSlice(v pointer) {
- sp := p.v.Elem()
- sp.Set(reflect.Append(sp, v.v))
-}
-
-// SetPointer sets *p to v.
-func (p pointer) SetPointer(v pointer) {
- p.v.Elem().Set(v.v)
-}
-
-func growSlice(p pointer, addCap int) {
- // TODO: Once we only support Go 1.20 and newer, use reflect.Grow.
- in := p.v.Elem()
- out := reflect.MakeSlice(in.Type(), in.Len(), in.Len()+addCap)
- reflect.Copy(out, in)
- p.v.Elem().Set(out)
-}
-
-func (p pointer) growBoolSlice(addCap int) {
- growSlice(p, addCap)
-}
-
-func (p pointer) growInt32Slice(addCap int) {
- growSlice(p, addCap)
-}
-
-func (p pointer) growUint32Slice(addCap int) {
- growSlice(p, addCap)
-}
-
-func (p pointer) growInt64Slice(addCap int) {
- growSlice(p, addCap)
-}
-
-func (p pointer) growUint64Slice(addCap int) {
- growSlice(p, addCap)
-}
-
-func (p pointer) growFloat64Slice(addCap int) {
- growSlice(p, addCap)
-}
-
-func (p pointer) growFloat32Slice(addCap int) {
- growSlice(p, addCap)
-}
-
-func (Export) MessageStateOf(p Pointer) *messageState { panic("not supported") }
-func (ms *messageState) pointer() pointer { panic("not supported") }
-func (ms *messageState) messageInfo() *MessageInfo { panic("not supported") }
-func (ms *messageState) LoadMessageInfo() *MessageInfo { panic("not supported") }
-func (ms *messageState) StoreMessageInfo(mi *MessageInfo) { panic("not supported") }
-
-type atomicNilMessage struct {
- once sync.Once
- m messageReflectWrapper
-}
-
-func (m *atomicNilMessage) Init(mi *MessageInfo) *messageReflectWrapper {
- m.once.Do(func() {
- m.m.p = pointerOfIface(reflect.Zero(mi.GoReflectType).Interface())
- m.m.mi = mi
- })
- return &m.m
-}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
index 5f20ca5d8ab5e..79e186667b70f 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build !purego && !appengine
-// +build !purego,!appengine
-
package impl
import (
diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go b/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go
deleted file mode 100644
index a1f6f333860e8..0000000000000
--- a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build purego || appengine
-// +build purego appengine
-
-package strs
-
-import pref "google.golang.org/protobuf/reflect/protoreflect"
-
-func UnsafeString(b []byte) string {
- return string(b)
-}
-
-func UnsafeBytes(s string) []byte {
- return []byte(s)
-}
-
-type Builder struct{}
-
-func (*Builder) AppendFullName(prefix pref.FullName, name pref.Name) pref.FullName {
- return prefix.Append(name)
-}
-
-func (*Builder) MakeString(b []byte) string {
- return string(b)
-}
diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go
index a008acd09082a..832a7988f145f 100644
--- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go
+++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go
@@ -2,8 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build !purego && !appengine && !go1.21
-// +build !purego,!appengine,!go1.21
+//go:build !go1.21
package strs
diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go
index 60166f2ba3cff..1ffddf6877a9a 100644
--- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go
+++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go
@@ -2,8 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build !purego && !appengine && go1.21
-// +build !purego,!appengine,go1.21
+//go:build go1.21
package strs
diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go
index dbbf1f6862c46..fb8e15e8dad50 100644
--- a/vendor/google.golang.org/protobuf/internal/version/version.go
+++ b/vendor/google.golang.org/protobuf/internal/version/version.go
@@ -51,8 +51,8 @@ import (
// 10. Send out the CL for review and submit it.
const (
Major = 1
- Minor = 34
- Patch = 2
+ Minor = 35
+ Patch = 1
PreRelease = ""
)
diff --git a/vendor/google.golang.org/protobuf/proto/equal.go b/vendor/google.golang.org/protobuf/proto/equal.go
index 1a0be1b03c73d..c36d4a9cd75b7 100644
--- a/vendor/google.golang.org/protobuf/proto/equal.go
+++ b/vendor/google.golang.org/protobuf/proto/equal.go
@@ -8,6 +8,7 @@ import (
"reflect"
"google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/runtime/protoiface"
)
// Equal reports whether two messages are equal,
@@ -51,6 +52,14 @@ func Equal(x, y Message) bool {
if mx.IsValid() != my.IsValid() {
return false
}
+
+ // Only one of the messages needs to implement the fast-path for it to work.
+ pmx := protoMethods(mx)
+ pmy := protoMethods(my)
+ if pmx != nil && pmy != nil && pmx.Equal != nil && pmy.Equal != nil {
+ return pmx.Equal(protoiface.EqualInput{MessageA: mx, MessageB: my}).Equal
+ }
+
vx := protoreflect.ValueOfMessage(mx)
vy := protoreflect.ValueOfMessage(my)
return vx.Equal(vy)
diff --git a/vendor/google.golang.org/protobuf/proto/extension.go b/vendor/google.golang.org/protobuf/proto/extension.go
index d248f29284684..78445d116f7bd 100644
--- a/vendor/google.golang.org/protobuf/proto/extension.go
+++ b/vendor/google.golang.org/protobuf/proto/extension.go
@@ -39,6 +39,48 @@ func ClearExtension(m Message, xt protoreflect.ExtensionType) {
// If the field is unpopulated, it returns the default value for
// scalars and an immutable, empty value for lists or messages.
// It panics if xt does not extend m.
+//
+// The type of the value is dependent on the field type of the extension.
+// For extensions generated by protoc-gen-go, the Go type is as follows:
+//
+// ╔═══════════════════╤═════════════════════════╗
+// ║ Go type │ Protobuf kind ║
+// ╠═══════════════════╪═════════════════════════╣
+// ║ bool │ bool ║
+// ║ int32 │ int32, sint32, sfixed32 ║
+// ║ int64 │ int64, sint64, sfixed64 ║
+// ║ uint32 │ uint32, fixed32 ║
+// ║ uint64 │ uint64, fixed64 ║
+// ║ float32 │ float ║
+// ║ float64 │ double ║
+// ║ string │ string ║
+// ║ []byte │ bytes ║
+// ║ protoreflect.Enum │ enum ║
+// ║ proto.Message │ message, group ║
+// ╚═══════════════════╧═════════════════════════╝
+//
+// The protoreflect.Enum and proto.Message types are the concrete Go type
+// associated with the named enum or message. Repeated fields are represented
+// using a Go slice of the base element type.
+//
+// If a generated extension descriptor variable is directly passed to
+// GetExtension, then the call should be followed immediately by a
+// type assertion to the expected output value. For example:
+//
+// mm := proto.GetExtension(m, foopb.E_MyExtension).(*foopb.MyMessage)
+//
+// This pattern enables static analysis tools to verify that the asserted type
+// matches the Go type associated with the extension field and
+// also enables a possible future migration to a type-safe extension API.
+//
+// Since singular messages are the most common extension type, the pattern of
+// calling HasExtension followed by GetExtension may be simplified to:
+//
+// if mm := proto.GetExtension(m, foopb.E_MyExtension).(*foopb.MyMessage); mm != nil {
+// ... // make use of mm
+// }
+//
+// The mm variable is non-nil if and only if HasExtension reports true.
func GetExtension(m Message, xt protoreflect.ExtensionType) any {
// Treat nil message interface as an empty message; return the default.
if m == nil {
@@ -51,6 +93,35 @@ func GetExtension(m Message, xt protoreflect.ExtensionType) any {
// SetExtension stores the value of an extension field.
// It panics if m is invalid, xt does not extend m, or if type of v
// is invalid for the specified extension field.
+//
+// The type of the value is dependent on the field type of the extension.
+// For extensions generated by protoc-gen-go, the Go type is as follows:
+//
+// ╔═══════════════════╤═════════════════════════╗
+// ║ Go type │ Protobuf kind ║
+// ╠═══════════════════╪═════════════════════════╣
+// ║ bool │ bool ║
+// ║ int32 │ int32, sint32, sfixed32 ║
+// ║ int64 │ int64, sint64, sfixed64 ║
+// ║ uint32 │ uint32, fixed32 ║
+// ║ uint64 │ uint64, fixed64 ║
+// ║ float32 │ float ║
+// ║ float64 │ double ║
+// ║ string │ string ║
+// ║ []byte │ bytes ║
+// ║ protoreflect.Enum │ enum ║
+// ║ proto.Message │ message, group ║
+// ╚═══════════════════╧═════════════════════════╝
+//
+// The protoreflect.Enum and proto.Message types are the concrete Go type
+// associated with the named enum or message. Repeated fields are represented
+// using a Go slice of the base element type.
+//
+// If a generated extension descriptor variable is directly passed to
+// SetExtension (e.g., foopb.E_MyExtension), then the value should be a
+// concrete type that matches the expected Go type for the extension descriptor
+// so that static analysis tools can verify type correctness.
+// This also enables a possible future migration to a type-safe extension API.
func SetExtension(m Message, xt protoreflect.ExtensionType, v any) {
xd := xt.TypeDescriptor()
pv := xt.ValueOf(v)
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go
index 85617554272cb..ebcb4a8ab138e 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go
@@ -150,6 +150,7 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc
opts = proto.Clone(opts).(*descriptorpb.FieldOptions)
f.L1.Options = func() protoreflect.ProtoMessage { return opts }
f.L1.IsWeak = opts.GetWeak()
+ f.L1.IsLazy = opts.GetLazy()
if opts.Packed != nil {
f.L1.EditionFeatures.IsPacked = opts.GetPacked()
}
@@ -214,6 +215,9 @@ func (r descsByName) initExtensionDeclarations(xds []*descriptorpb.FieldDescript
if xd.JsonName != nil {
x.L2.StringName.InitJSON(xd.GetJsonName())
}
+ if x.L1.Kind == protoreflect.MessageKind && x.L1.EditionFeatures.IsDelimitedEncoded {
+ x.L1.Kind = protoreflect.GroupKind
+ }
}
return xs, nil
}
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go
index 804830eda36f3..002e0047aeabb 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go
@@ -14,7 +14,7 @@ import (
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/types/descriptorpb"
- gofeaturespb "google.golang.org/protobuf/types/gofeaturespb"
+ "google.golang.org/protobuf/types/gofeaturespb"
)
var defaults = &descriptorpb.FeatureSetDefaults{}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go
index d5d5af6ebedb8..742cb518c40b1 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go
@@ -23,6 +23,7 @@ type (
Unmarshal func(unmarshalInput) (unmarshalOutput, error)
Merge func(mergeInput) mergeOutput
CheckInitialized func(checkInitializedInput) (checkInitializedOutput, error)
+ Equal func(equalInput) equalOutput
}
supportFlags = uint64
sizeInput = struct {
@@ -75,4 +76,13 @@ type (
checkInitializedOutput = struct {
pragma.NoUnkeyedLiterals
}
+ equalInput = struct {
+ pragma.NoUnkeyedLiterals
+ MessageA Message
+ MessageB Message
+ }
+ equalOutput = struct {
+ pragma.NoUnkeyedLiterals
+ Equal bool
+ }
)
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go
deleted file mode 100644
index 75f83a2af030c..0000000000000
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build purego || appengine
-// +build purego appengine
-
-package protoreflect
-
-import "google.golang.org/protobuf/internal/pragma"
-
-type valueType int
-
-const (
- nilType valueType = iota
- boolType
- int32Type
- int64Type
- uint32Type
- uint64Type
- float32Type
- float64Type
- stringType
- bytesType
- enumType
- ifaceType
-)
-
-// value is a union where only one type can be represented at a time.
-// This uses a distinct field for each type. This is type safe in Go, but
-// occupies more memory than necessary (72B).
-type value struct {
- pragma.DoNotCompare // 0B
-
- typ valueType // 8B
- num uint64 // 8B
- str string // 16B
- bin []byte // 24B
- iface any // 16B
-}
-
-func valueOfString(v string) Value {
- return Value{typ: stringType, str: v}
-}
-func valueOfBytes(v []byte) Value {
- return Value{typ: bytesType, bin: v}
-}
-func valueOfIface(v any) Value {
- return Value{typ: ifaceType, iface: v}
-}
-
-func (v Value) getString() string {
- return v.str
-}
-func (v Value) getBytes() []byte {
- return v.bin
-}
-func (v Value) getIface() any {
- return v.iface
-}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go
index 7f3583ead81a0..0015fcb35d832 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go
@@ -2,8 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build !purego && !appengine && !go1.21
-// +build !purego,!appengine,!go1.21
+//go:build !go1.21
package protoreflect
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go
index f7d386990a0f3..479527b58dd37 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go
@@ -2,8 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build !purego && !appengine && go1.21
-// +build !purego,!appengine,go1.21
+//go:build go1.21
package protoreflect
diff --git a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go
index 44cf467d8845d..246156561ce46 100644
--- a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go
+++ b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go
@@ -39,6 +39,9 @@ type Methods = struct {
// CheckInitialized returns an error if any required fields in the message are not set.
CheckInitialized func(CheckInitializedInput) (CheckInitializedOutput, error)
+
+ // Equal compares two messages and returns EqualOutput.Equal == true if they are equal.
+ Equal func(EqualInput) EqualOutput
}
// SupportFlags indicate support for optional features.
@@ -166,3 +169,18 @@ type CheckInitializedInput = struct {
type CheckInitializedOutput = struct {
pragma.NoUnkeyedLiterals
}
+
+// EqualInput is input to the Equal method.
+type EqualInput = struct {
+ pragma.NoUnkeyedLiterals
+
+ MessageA protoreflect.Message
+ MessageB protoreflect.Message
+}
+
+// EqualOutput is output from the Equal method.
+type EqualOutput = struct {
+ pragma.NoUnkeyedLiterals
+
+ Equal bool
+}
diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
index 9403eb075077f..6dea75cd5b15f 100644
--- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
+++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
@@ -1217,11 +1217,9 @@ type FileDescriptorSet struct {
func (x *FileDescriptorSet) Reset() {
*x = FileDescriptorSet{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *FileDescriptorSet) String() string {
@@ -1232,7 +1230,7 @@ func (*FileDescriptorSet) ProtoMessage() {}
func (x *FileDescriptorSet) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1291,11 +1289,9 @@ type FileDescriptorProto struct {
func (x *FileDescriptorProto) Reset() {
*x = FileDescriptorProto{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *FileDescriptorProto) String() string {
@@ -1306,7 +1302,7 @@ func (*FileDescriptorProto) ProtoMessage() {}
func (x *FileDescriptorProto) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1434,11 +1430,9 @@ type DescriptorProto struct {
func (x *DescriptorProto) Reset() {
*x = DescriptorProto{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *DescriptorProto) String() string {
@@ -1449,7 +1443,7 @@ func (*DescriptorProto) ProtoMessage() {}
func (x *DescriptorProto) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1561,11 +1555,9 @@ const (
func (x *ExtensionRangeOptions) Reset() {
*x = ExtensionRangeOptions{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ExtensionRangeOptions) String() string {
@@ -1576,7 +1568,7 @@ func (*ExtensionRangeOptions) ProtoMessage() {}
func (x *ExtensionRangeOptions) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1680,11 +1672,9 @@ type FieldDescriptorProto struct {
func (x *FieldDescriptorProto) Reset() {
*x = FieldDescriptorProto{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *FieldDescriptorProto) String() string {
@@ -1695,7 +1685,7 @@ func (*FieldDescriptorProto) ProtoMessage() {}
func (x *FieldDescriptorProto) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[4]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1799,11 +1789,9 @@ type OneofDescriptorProto struct {
func (x *OneofDescriptorProto) Reset() {
*x = OneofDescriptorProto{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[5]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *OneofDescriptorProto) String() string {
@@ -1814,7 +1802,7 @@ func (*OneofDescriptorProto) ProtoMessage() {}
func (x *OneofDescriptorProto) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[5]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1863,11 +1851,9 @@ type EnumDescriptorProto struct {
func (x *EnumDescriptorProto) Reset() {
*x = EnumDescriptorProto{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[6]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *EnumDescriptorProto) String() string {
@@ -1878,7 +1864,7 @@ func (*EnumDescriptorProto) ProtoMessage() {}
func (x *EnumDescriptorProto) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[6]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1941,11 +1927,9 @@ type EnumValueDescriptorProto struct {
func (x *EnumValueDescriptorProto) Reset() {
*x = EnumValueDescriptorProto{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[7]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *EnumValueDescriptorProto) String() string {
@@ -1956,7 +1940,7 @@ func (*EnumValueDescriptorProto) ProtoMessage() {}
func (x *EnumValueDescriptorProto) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[7]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2005,11 +1989,9 @@ type ServiceDescriptorProto struct {
func (x *ServiceDescriptorProto) Reset() {
*x = ServiceDescriptorProto{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[8]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ServiceDescriptorProto) String() string {
@@ -2020,7 +2002,7 @@ func (*ServiceDescriptorProto) ProtoMessage() {}
func (x *ServiceDescriptorProto) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[8]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2082,11 +2064,9 @@ const (
func (x *MethodDescriptorProto) Reset() {
*x = MethodDescriptorProto{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[9]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *MethodDescriptorProto) String() string {
@@ -2097,7 +2077,7 @@ func (*MethodDescriptorProto) ProtoMessage() {}
func (x *MethodDescriptorProto) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[9]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2267,11 +2247,9 @@ const (
func (x *FileOptions) Reset() {
*x = FileOptions{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[10]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *FileOptions) String() string {
@@ -2282,7 +2260,7 @@ func (*FileOptions) ProtoMessage() {}
func (x *FileOptions) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[10]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2534,11 +2512,9 @@ const (
func (x *MessageOptions) Reset() {
*x = MessageOptions{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[11]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *MessageOptions) String() string {
@@ -2549,7 +2525,7 @@ func (*MessageOptions) ProtoMessage() {}
func (x *MessageOptions) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[11]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2707,11 +2683,9 @@ const (
func (x *FieldOptions) Reset() {
*x = FieldOptions{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[12]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *FieldOptions) String() string {
@@ -2722,7 +2696,7 @@ func (*FieldOptions) ProtoMessage() {}
func (x *FieldOptions) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[12]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2849,11 +2823,9 @@ type OneofOptions struct {
func (x *OneofOptions) Reset() {
*x = OneofOptions{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[13]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *OneofOptions) String() string {
@@ -2864,7 +2836,7 @@ func (*OneofOptions) ProtoMessage() {}
func (x *OneofOptions) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[13]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2929,11 +2901,9 @@ const (
func (x *EnumOptions) Reset() {
*x = EnumOptions{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[14]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *EnumOptions) String() string {
@@ -2944,7 +2914,7 @@ func (*EnumOptions) ProtoMessage() {}
func (x *EnumOptions) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[14]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3026,11 +2996,9 @@ const (
func (x *EnumValueOptions) Reset() {
*x = EnumValueOptions{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[15]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *EnumValueOptions) String() string {
@@ -3041,7 +3009,7 @@ func (*EnumValueOptions) ProtoMessage() {}
func (x *EnumValueOptions) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[15]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3115,11 +3083,9 @@ const (
func (x *ServiceOptions) Reset() {
*x = ServiceOptions{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[16]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[16]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ServiceOptions) String() string {
@@ -3130,7 +3096,7 @@ func (*ServiceOptions) ProtoMessage() {}
func (x *ServiceOptions) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[16]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3192,11 +3158,9 @@ const (
func (x *MethodOptions) Reset() {
*x = MethodOptions{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[17]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[17]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *MethodOptions) String() string {
@@ -3207,7 +3171,7 @@ func (*MethodOptions) ProtoMessage() {}
func (x *MethodOptions) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[17]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3274,11 +3238,9 @@ type UninterpretedOption struct {
func (x *UninterpretedOption) Reset() {
*x = UninterpretedOption{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[18]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[18]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *UninterpretedOption) String() string {
@@ -3289,7 +3251,7 @@ func (*UninterpretedOption) ProtoMessage() {}
func (x *UninterpretedOption) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[18]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3375,11 +3337,9 @@ type FeatureSet struct {
func (x *FeatureSet) Reset() {
*x = FeatureSet{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[19]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[19]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *FeatureSet) String() string {
@@ -3390,7 +3350,7 @@ func (*FeatureSet) ProtoMessage() {}
func (x *FeatureSet) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[19]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3467,11 +3427,9 @@ type FeatureSetDefaults struct {
func (x *FeatureSetDefaults) Reset() {
*x = FeatureSetDefaults{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[20]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[20]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *FeatureSetDefaults) String() string {
@@ -3482,7 +3440,7 @@ func (*FeatureSetDefaults) ProtoMessage() {}
func (x *FeatureSetDefaults) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[20]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3578,11 +3536,9 @@ type SourceCodeInfo struct {
func (x *SourceCodeInfo) Reset() {
*x = SourceCodeInfo{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[21]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[21]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *SourceCodeInfo) String() string {
@@ -3593,7 +3549,7 @@ func (*SourceCodeInfo) ProtoMessage() {}
func (x *SourceCodeInfo) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[21]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3630,11 +3586,9 @@ type GeneratedCodeInfo struct {
func (x *GeneratedCodeInfo) Reset() {
*x = GeneratedCodeInfo{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[22]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[22]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *GeneratedCodeInfo) String() string {
@@ -3645,7 +3599,7 @@ func (*GeneratedCodeInfo) ProtoMessage() {}
func (x *GeneratedCodeInfo) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[22]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3679,11 +3633,9 @@ type DescriptorProto_ExtensionRange struct {
func (x *DescriptorProto_ExtensionRange) Reset() {
*x = DescriptorProto_ExtensionRange{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[23]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[23]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *DescriptorProto_ExtensionRange) String() string {
@@ -3694,7 +3646,7 @@ func (*DescriptorProto_ExtensionRange) ProtoMessage() {}
func (x *DescriptorProto_ExtensionRange) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[23]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3744,11 +3696,9 @@ type DescriptorProto_ReservedRange struct {
func (x *DescriptorProto_ReservedRange) Reset() {
*x = DescriptorProto_ReservedRange{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[24]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[24]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *DescriptorProto_ReservedRange) String() string {
@@ -3759,7 +3709,7 @@ func (*DescriptorProto_ReservedRange) ProtoMessage() {}
func (x *DescriptorProto_ReservedRange) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[24]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3813,11 +3763,9 @@ type ExtensionRangeOptions_Declaration struct {
func (x *ExtensionRangeOptions_Declaration) Reset() {
*x = ExtensionRangeOptions_Declaration{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[25]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[25]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ExtensionRangeOptions_Declaration) String() string {
@@ -3828,7 +3776,7 @@ func (*ExtensionRangeOptions_Declaration) ProtoMessage() {}
func (x *ExtensionRangeOptions_Declaration) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[25]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3895,11 +3843,9 @@ type EnumDescriptorProto_EnumReservedRange struct {
func (x *EnumDescriptorProto_EnumReservedRange) Reset() {
*x = EnumDescriptorProto_EnumReservedRange{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[26]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[26]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *EnumDescriptorProto_EnumReservedRange) String() string {
@@ -3910,7 +3856,7 @@ func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {}
func (x *EnumDescriptorProto_EnumReservedRange) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[26]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3950,11 +3896,9 @@ type FieldOptions_EditionDefault struct {
func (x *FieldOptions_EditionDefault) Reset() {
*x = FieldOptions_EditionDefault{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[27]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[27]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *FieldOptions_EditionDefault) String() string {
@@ -3965,7 +3909,7 @@ func (*FieldOptions_EditionDefault) ProtoMessage() {}
func (x *FieldOptions_EditionDefault) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[27]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -4018,11 +3962,9 @@ type FieldOptions_FeatureSupport struct {
func (x *FieldOptions_FeatureSupport) Reset() {
*x = FieldOptions_FeatureSupport{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[28]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[28]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *FieldOptions_FeatureSupport) String() string {
@@ -4033,7 +3975,7 @@ func (*FieldOptions_FeatureSupport) ProtoMessage() {}
func (x *FieldOptions_FeatureSupport) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[28]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -4092,11 +4034,9 @@ type UninterpretedOption_NamePart struct {
func (x *UninterpretedOption_NamePart) Reset() {
*x = UninterpretedOption_NamePart{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[29]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[29]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *UninterpretedOption_NamePart) String() string {
@@ -4107,7 +4047,7 @@ func (*UninterpretedOption_NamePart) ProtoMessage() {}
func (x *UninterpretedOption_NamePart) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[29]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -4154,11 +4094,9 @@ type FeatureSetDefaults_FeatureSetEditionDefault struct {
func (x *FeatureSetDefaults_FeatureSetEditionDefault) Reset() {
*x = FeatureSetDefaults_FeatureSetEditionDefault{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *FeatureSetDefaults_FeatureSetEditionDefault) String() string {
@@ -4169,7 +4107,7 @@ func (*FeatureSetDefaults_FeatureSetEditionDefault) ProtoMessage() {}
func (x *FeatureSetDefaults_FeatureSetEditionDefault) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -4305,11 +4243,9 @@ type SourceCodeInfo_Location struct {
func (x *SourceCodeInfo_Location) Reset() {
*x = SourceCodeInfo_Location{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *SourceCodeInfo_Location) String() string {
@@ -4320,7 +4256,7 @@ func (*SourceCodeInfo_Location) ProtoMessage() {}
func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -4392,11 +4328,9 @@ type GeneratedCodeInfo_Annotation struct {
func (x *GeneratedCodeInfo_Annotation) Reset() {
*x = GeneratedCodeInfo_Annotation{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[32]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[32]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *GeneratedCodeInfo_Annotation) String() string {
@@ -4407,7 +4341,7 @@ func (*GeneratedCodeInfo_Annotation) ProtoMessage() {}
func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[32]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -5385,424 +5319,6 @@ func file_google_protobuf_descriptor_proto_init() {
if File_google_protobuf_descriptor_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_google_protobuf_descriptor_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*FileDescriptorSet); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[1].Exporter = func(v any, i int) any {
- switch v := v.(*FileDescriptorProto); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[2].Exporter = func(v any, i int) any {
- switch v := v.(*DescriptorProto); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[3].Exporter = func(v any, i int) any {
- switch v := v.(*ExtensionRangeOptions); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- case 3:
- return &v.extensionFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[4].Exporter = func(v any, i int) any {
- switch v := v.(*FieldDescriptorProto); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[5].Exporter = func(v any, i int) any {
- switch v := v.(*OneofDescriptorProto); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[6].Exporter = func(v any, i int) any {
- switch v := v.(*EnumDescriptorProto); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[7].Exporter = func(v any, i int) any {
- switch v := v.(*EnumValueDescriptorProto); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[8].Exporter = func(v any, i int) any {
- switch v := v.(*ServiceDescriptorProto); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[9].Exporter = func(v any, i int) any {
- switch v := v.(*MethodDescriptorProto); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[10].Exporter = func(v any, i int) any {
- switch v := v.(*FileOptions); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- case 3:
- return &v.extensionFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[11].Exporter = func(v any, i int) any {
- switch v := v.(*MessageOptions); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- case 3:
- return &v.extensionFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[12].Exporter = func(v any, i int) any {
- switch v := v.(*FieldOptions); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- case 3:
- return &v.extensionFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[13].Exporter = func(v any, i int) any {
- switch v := v.(*OneofOptions); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- case 3:
- return &v.extensionFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[14].Exporter = func(v any, i int) any {
- switch v := v.(*EnumOptions); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- case 3:
- return &v.extensionFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[15].Exporter = func(v any, i int) any {
- switch v := v.(*EnumValueOptions); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- case 3:
- return &v.extensionFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[16].Exporter = func(v any, i int) any {
- switch v := v.(*ServiceOptions); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- case 3:
- return &v.extensionFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[17].Exporter = func(v any, i int) any {
- switch v := v.(*MethodOptions); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- case 3:
- return &v.extensionFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[18].Exporter = func(v any, i int) any {
- switch v := v.(*UninterpretedOption); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v any, i int) any {
- switch v := v.(*FeatureSet); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- case 3:
- return &v.extensionFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v any, i int) any {
- switch v := v.(*FeatureSetDefaults); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v any, i int) any {
- switch v := v.(*SourceCodeInfo); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v any, i int) any {
- switch v := v.(*GeneratedCodeInfo); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v any, i int) any {
- switch v := v.(*DescriptorProto_ExtensionRange); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v any, i int) any {
- switch v := v.(*DescriptorProto_ReservedRange); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v any, i int) any {
- switch v := v.(*ExtensionRangeOptions_Declaration); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v any, i int) any {
- switch v := v.(*EnumDescriptorProto_EnumReservedRange); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v any, i int) any {
- switch v := v.(*FieldOptions_EditionDefault); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[28].Exporter = func(v any, i int) any {
- switch v := v.(*FieldOptions_FeatureSupport); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[29].Exporter = func(v any, i int) any {
- switch v := v.(*UninterpretedOption_NamePart); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[30].Exporter = func(v any, i int) any {
- switch v := v.(*FeatureSetDefaults_FeatureSetEditionDefault); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[31].Exporter = func(v any, i int) any {
- switch v := v.(*SourceCodeInfo_Location); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[32].Exporter = func(v any, i int) any {
- switch v := v.(*GeneratedCodeInfo_Annotation); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go
index a2ca940c50fbd..c7e860fcd6d87 100644
--- a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go
+++ b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go
@@ -29,11 +29,9 @@ type GoFeatures struct {
func (x *GoFeatures) Reset() {
*x = GoFeatures{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_go_features_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_go_features_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *GoFeatures) String() string {
@@ -44,7 +42,7 @@ func (*GoFeatures) ProtoMessage() {}
func (x *GoFeatures) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_go_features_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -145,20 +143,6 @@ func file_google_protobuf_go_features_proto_init() {
if File_google_protobuf_go_features_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_google_protobuf_go_features_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*GoFeatures); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
index 7172b43d383f8..87da199a386e5 100644
--- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
@@ -368,11 +368,9 @@ func (x *Any) UnmarshalNew() (proto.Message, error) {
func (x *Any) Reset() {
*x = Any{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_any_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_any_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Any) String() string {
@@ -383,7 +381,7 @@ func (*Any) ProtoMessage() {}
func (x *Any) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_any_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -461,20 +459,6 @@ func file_google_protobuf_any_proto_init() {
if File_google_protobuf_any_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_google_protobuf_any_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*Any); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
diff --git a/vendor/google.golang.org/protobuf/types/known/apipb/api.pb.go b/vendor/google.golang.org/protobuf/types/known/apipb/api.pb.go
index 4f2fe89ef11cc..fdc3aef2c65ff 100644
--- a/vendor/google.golang.org/protobuf/types/known/apipb/api.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/apipb/api.pb.go
@@ -94,11 +94,9 @@ type Api struct {
func (x *Api) Reset() {
*x = Api{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_api_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_api_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Api) String() string {
@@ -109,7 +107,7 @@ func (*Api) ProtoMessage() {}
func (x *Api) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_api_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -197,11 +195,9 @@ type Method struct {
func (x *Method) Reset() {
*x = Method{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_api_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_api_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Method) String() string {
@@ -212,7 +208,7 @@ func (*Method) ProtoMessage() {}
func (x *Method) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_api_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -368,11 +364,9 @@ type Mixin struct {
func (x *Mixin) Reset() {
*x = Mixin{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_api_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_api_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Mixin) String() string {
@@ -383,7 +377,7 @@ func (*Mixin) ProtoMessage() {}
func (x *Mixin) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_api_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -516,44 +510,6 @@ func file_google_protobuf_api_proto_init() {
if File_google_protobuf_api_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_google_protobuf_api_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*Api); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_api_proto_msgTypes[1].Exporter = func(v any, i int) any {
- switch v := v.(*Method); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_api_proto_msgTypes[2].Exporter = func(v any, i int) any {
- switch v := v.(*Mixin); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
index 1b71bcd910af7..b99d4d2410927 100644
--- a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
@@ -245,11 +245,9 @@ func (x *Duration) check() uint {
func (x *Duration) Reset() {
*x = Duration{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_duration_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_duration_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Duration) String() string {
@@ -260,7 +258,7 @@ func (*Duration) ProtoMessage() {}
func (x *Duration) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_duration_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -339,20 +337,6 @@ func file_google_protobuf_duration_proto_init() {
if File_google_protobuf_duration_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_google_protobuf_duration_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*Duration); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
diff --git a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go
index d87b4fb8281d0..1761bc9c69a56 100644
--- a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go
@@ -55,11 +55,9 @@ type Empty struct {
func (x *Empty) Reset() {
*x = Empty{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_empty_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_empty_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Empty) String() string {
@@ -70,7 +68,7 @@ func (*Empty) ProtoMessage() {}
func (x *Empty) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_empty_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -131,20 +129,6 @@ func file_google_protobuf_empty_proto_init() {
if File_google_protobuf_empty_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_google_protobuf_empty_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*Empty); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
diff --git a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go
index ac1e91bb6ddb2..19de8d371fd90 100644
--- a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go
@@ -467,11 +467,9 @@ func rangeFields(path string, f func(field string) bool) bool {
func (x *FieldMask) Reset() {
*x = FieldMask{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_field_mask_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_field_mask_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *FieldMask) String() string {
@@ -482,7 +480,7 @@ func (*FieldMask) ProtoMessage() {}
func (x *FieldMask) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_field_mask_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -553,20 +551,6 @@ func file_google_protobuf_field_mask_proto_init() {
if File_google_protobuf_field_mask_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_google_protobuf_field_mask_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*FieldMask); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
diff --git a/vendor/google.golang.org/protobuf/types/known/sourcecontextpb/source_context.pb.go b/vendor/google.golang.org/protobuf/types/known/sourcecontextpb/source_context.pb.go
index fa185780056de..4d15e9748c927 100644
--- a/vendor/google.golang.org/protobuf/types/known/sourcecontextpb/source_context.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/sourcecontextpb/source_context.pb.go
@@ -54,11 +54,9 @@ type SourceContext struct {
func (x *SourceContext) Reset() {
*x = SourceContext{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_source_context_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_source_context_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *SourceContext) String() string {
@@ -69,7 +67,7 @@ func (*SourceContext) ProtoMessage() {}
func (x *SourceContext) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_source_context_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -141,20 +139,6 @@ func file_google_protobuf_source_context_proto_init() {
if File_google_protobuf_source_context_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_google_protobuf_source_context_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*SourceContext); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
diff --git a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go
index d45361cbc7295..8f206a661172c 100644
--- a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go
@@ -120,6 +120,7 @@ package structpb
import (
base64 "encoding/base64"
+ json "encoding/json"
protojson "google.golang.org/protobuf/encoding/protojson"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
@@ -233,11 +234,9 @@ func (x *Struct) UnmarshalJSON(b []byte) error {
func (x *Struct) Reset() {
*x = Struct{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_struct_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_struct_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Struct) String() string {
@@ -248,7 +247,7 @@ func (*Struct) ProtoMessage() {}
func (x *Struct) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_struct_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -296,19 +295,20 @@ type Value struct {
// NewValue constructs a Value from a general-purpose Go interface.
//
-// ╔════════════════════════╤════════════════════════════════════════════╗
-// ║ Go type │ Conversion ║
-// ╠════════════════════════╪════════════════════════════════════════════╣
-// ║ nil │ stored as NullValue ║
-// ║ bool │ stored as BoolValue ║
-// ║ int, int32, int64 │ stored as NumberValue ║
-// ║ uint, uint32, uint64 │ stored as NumberValue ║
-// ║ float32, float64 │ stored as NumberValue ║
-// ║ string │ stored as StringValue; must be valid UTF-8 ║
-// ║ []byte │ stored as StringValue; base64-encoded ║
-// ║ map[string]any │ stored as StructValue ║
-// ║ []any │ stored as ListValue ║
-// ╚════════════════════════╧════════════════════════════════════════════╝
+// ╔═══════════════════════════════════════╤════════════════════════════════════════════╗
+// ║ Go type │ Conversion ║
+// ╠═══════════════════════════════════════╪════════════════════════════════════════════╣
+// ║ nil │ stored as NullValue ║
+// ║ bool │ stored as BoolValue ║
+// ║ int, int8, int16, int32, int64 │ stored as NumberValue ║
+// ║ uint, uint8, uint16, uint32, uint64 │ stored as NumberValue ║
+// ║ float32, float64 │ stored as NumberValue ║
+// ║ json.Number │ stored as NumberValue ║
+// ║ string │ stored as StringValue; must be valid UTF-8 ║
+// ║ []byte │ stored as StringValue; base64-encoded ║
+// ║ map[string]any │ stored as StructValue ║
+// ║ []any │ stored as ListValue ║
+// ╚═══════════════════════════════════════╧════════════════════════════════════════════╝
//
// When converting an int64 or uint64 to a NumberValue, numeric precision loss
// is possible since they are stored as a float64.
@@ -320,12 +320,20 @@ func NewValue(v any) (*Value, error) {
return NewBoolValue(v), nil
case int:
return NewNumberValue(float64(v)), nil
+ case int8:
+ return NewNumberValue(float64(v)), nil
+ case int16:
+ return NewNumberValue(float64(v)), nil
case int32:
return NewNumberValue(float64(v)), nil
case int64:
return NewNumberValue(float64(v)), nil
case uint:
return NewNumberValue(float64(v)), nil
+ case uint8:
+ return NewNumberValue(float64(v)), nil
+ case uint16:
+ return NewNumberValue(float64(v)), nil
case uint32:
return NewNumberValue(float64(v)), nil
case uint64:
@@ -334,6 +342,12 @@ func NewValue(v any) (*Value, error) {
return NewNumberValue(float64(v)), nil
case float64:
return NewNumberValue(float64(v)), nil
+ case json.Number:
+ n, err := v.Float64()
+ if err != nil {
+ return nil, protoimpl.X.NewError("invalid number format %q, expected a float64: %v", v, err)
+ }
+ return NewNumberValue(n), nil
case string:
if !utf8.ValidString(v) {
return nil, protoimpl.X.NewError("invalid UTF-8 in string: %q", v)
@@ -441,11 +455,9 @@ func (x *Value) UnmarshalJSON(b []byte) error {
func (x *Value) Reset() {
*x = Value{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_struct_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_struct_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Value) String() string {
@@ -456,7 +468,7 @@ func (*Value) ProtoMessage() {}
func (x *Value) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_struct_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -613,11 +625,9 @@ func (x *ListValue) UnmarshalJSON(b []byte) error {
func (x *ListValue) Reset() {
*x = ListValue{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_struct_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_struct_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ListValue) String() string {
@@ -628,7 +638,7 @@ func (*ListValue) ProtoMessage() {}
func (x *ListValue) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_struct_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -742,44 +752,6 @@ func file_google_protobuf_struct_proto_init() {
if File_google_protobuf_struct_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_google_protobuf_struct_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*Struct); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_struct_proto_msgTypes[1].Exporter = func(v any, i int) any {
- switch v := v.(*Value); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_struct_proto_msgTypes[2].Exporter = func(v any, i int) any {
- switch v := v.(*ListValue); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
file_google_protobuf_struct_proto_msgTypes[1].OneofWrappers = []any{
(*Value_NullValue)(nil),
(*Value_NumberValue)(nil),
diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
index 83a5a645b0835..0d20722d70b77 100644
--- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
@@ -254,11 +254,9 @@ func (x *Timestamp) check() uint {
func (x *Timestamp) Reset() {
*x = Timestamp{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_timestamp_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_timestamp_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Timestamp) String() string {
@@ -269,7 +267,7 @@ func (*Timestamp) ProtoMessage() {}
func (x *Timestamp) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_timestamp_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -348,20 +346,6 @@ func file_google_protobuf_timestamp_proto_init() {
if File_google_protobuf_timestamp_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_google_protobuf_timestamp_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*Timestamp); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
diff --git a/vendor/google.golang.org/protobuf/types/known/typepb/type.pb.go b/vendor/google.golang.org/protobuf/types/known/typepb/type.pb.go
index 52887fd5db66e..f0ca52a01b354 100644
--- a/vendor/google.golang.org/protobuf/types/known/typepb/type.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/typepb/type.pb.go
@@ -293,11 +293,9 @@ type Type struct {
func (x *Type) Reset() {
*x = Type{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_type_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_type_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Type) String() string {
@@ -308,7 +306,7 @@ func (*Type) ProtoMessage() {}
func (x *Type) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_type_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -404,11 +402,9 @@ type Field struct {
func (x *Field) Reset() {
*x = Field{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_type_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_type_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Field) String() string {
@@ -419,7 +415,7 @@ func (*Field) ProtoMessage() {}
func (x *Field) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_type_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -526,11 +522,9 @@ type Enum struct {
func (x *Enum) Reset() {
*x = Enum{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_type_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_type_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Enum) String() string {
@@ -541,7 +535,7 @@ func (*Enum) ProtoMessage() {}
func (x *Enum) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_type_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -614,11 +608,9 @@ type EnumValue struct {
func (x *EnumValue) Reset() {
*x = EnumValue{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_type_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_type_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *EnumValue) String() string {
@@ -629,7 +621,7 @@ func (*EnumValue) ProtoMessage() {}
func (x *EnumValue) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_type_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -686,11 +678,9 @@ type Option struct {
func (x *Option) Reset() {
*x = Option{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_type_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_type_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Option) String() string {
@@ -701,7 +691,7 @@ func (*Option) ProtoMessage() {}
func (x *Option) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_type_proto_msgTypes[4]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -906,68 +896,6 @@ func file_google_protobuf_type_proto_init() {
if File_google_protobuf_type_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_google_protobuf_type_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*Type); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_type_proto_msgTypes[1].Exporter = func(v any, i int) any {
- switch v := v.(*Field); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_type_proto_msgTypes[2].Exporter = func(v any, i int) any {
- switch v := v.(*Enum); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_type_proto_msgTypes[3].Exporter = func(v any, i int) any {
- switch v := v.(*EnumValue); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_type_proto_msgTypes[4].Exporter = func(v any, i int) any {
- switch v := v.(*Option); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
diff --git a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go
index e473f826aa31b..006060e5695fc 100644
--- a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go
@@ -69,11 +69,9 @@ func Double(v float64) *DoubleValue {
func (x *DoubleValue) Reset() {
*x = DoubleValue{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_wrappers_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_wrappers_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *DoubleValue) String() string {
@@ -84,7 +82,7 @@ func (*DoubleValue) ProtoMessage() {}
func (x *DoubleValue) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_wrappers_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -125,11 +123,9 @@ func Float(v float32) *FloatValue {
func (x *FloatValue) Reset() {
*x = FloatValue{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_wrappers_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_wrappers_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *FloatValue) String() string {
@@ -140,7 +136,7 @@ func (*FloatValue) ProtoMessage() {}
func (x *FloatValue) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_wrappers_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -181,11 +177,9 @@ func Int64(v int64) *Int64Value {
func (x *Int64Value) Reset() {
*x = Int64Value{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_wrappers_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_wrappers_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Int64Value) String() string {
@@ -196,7 +190,7 @@ func (*Int64Value) ProtoMessage() {}
func (x *Int64Value) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_wrappers_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -237,11 +231,9 @@ func UInt64(v uint64) *UInt64Value {
func (x *UInt64Value) Reset() {
*x = UInt64Value{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_wrappers_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_wrappers_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *UInt64Value) String() string {
@@ -252,7 +244,7 @@ func (*UInt64Value) ProtoMessage() {}
func (x *UInt64Value) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_wrappers_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -293,11 +285,9 @@ func Int32(v int32) *Int32Value {
func (x *Int32Value) Reset() {
*x = Int32Value{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_wrappers_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_wrappers_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Int32Value) String() string {
@@ -308,7 +298,7 @@ func (*Int32Value) ProtoMessage() {}
func (x *Int32Value) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_wrappers_proto_msgTypes[4]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -349,11 +339,9 @@ func UInt32(v uint32) *UInt32Value {
func (x *UInt32Value) Reset() {
*x = UInt32Value{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_wrappers_proto_msgTypes[5]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_wrappers_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *UInt32Value) String() string {
@@ -364,7 +352,7 @@ func (*UInt32Value) ProtoMessage() {}
func (x *UInt32Value) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_wrappers_proto_msgTypes[5]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -405,11 +393,9 @@ func Bool(v bool) *BoolValue {
func (x *BoolValue) Reset() {
*x = BoolValue{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_wrappers_proto_msgTypes[6]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_wrappers_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *BoolValue) String() string {
@@ -420,7 +406,7 @@ func (*BoolValue) ProtoMessage() {}
func (x *BoolValue) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_wrappers_proto_msgTypes[6]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -461,11 +447,9 @@ func String(v string) *StringValue {
func (x *StringValue) Reset() {
*x = StringValue{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_wrappers_proto_msgTypes[7]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_wrappers_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *StringValue) String() string {
@@ -476,7 +460,7 @@ func (*StringValue) ProtoMessage() {}
func (x *StringValue) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_wrappers_proto_msgTypes[7]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -517,11 +501,9 @@ func Bytes(v []byte) *BytesValue {
func (x *BytesValue) Reset() {
*x = BytesValue{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_wrappers_proto_msgTypes[8]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_wrappers_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *BytesValue) String() string {
@@ -532,7 +514,7 @@ func (*BytesValue) ProtoMessage() {}
func (x *BytesValue) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_wrappers_proto_msgTypes[8]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -629,116 +611,6 @@ func file_google_protobuf_wrappers_proto_init() {
if File_google_protobuf_wrappers_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_google_protobuf_wrappers_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*DoubleValue); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_wrappers_proto_msgTypes[1].Exporter = func(v any, i int) any {
- switch v := v.(*FloatValue); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_wrappers_proto_msgTypes[2].Exporter = func(v any, i int) any {
- switch v := v.(*Int64Value); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_wrappers_proto_msgTypes[3].Exporter = func(v any, i int) any {
- switch v := v.(*UInt64Value); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_wrappers_proto_msgTypes[4].Exporter = func(v any, i int) any {
- switch v := v.(*Int32Value); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_wrappers_proto_msgTypes[5].Exporter = func(v any, i int) any {
- switch v := v.(*UInt32Value); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_wrappers_proto_msgTypes[6].Exporter = func(v any, i int) any {
- switch v := v.(*BoolValue); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_wrappers_proto_msgTypes[7].Exporter = func(v any, i int) any {
- switch v := v.(*StringValue); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_wrappers_proto_msgTypes[8].Exporter = func(v any, i int) any {
- switch v := v.(*BytesValue); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 49e7bb611899f..5c2359b42266b 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -1,8 +1,8 @@
-# cel.dev/expr v0.16.0
+# cel.dev/expr v0.16.1
## explicit; go 1.18
cel.dev/expr
-# cloud.google.com/go v0.115.1
-## explicit; go 1.20
+# cloud.google.com/go v0.116.0
+## explicit; go 1.21
cloud.google.com/go
cloud.google.com/go/internal
cloud.google.com/go/internal/detect
@@ -10,7 +10,7 @@ cloud.google.com/go/internal/optional
cloud.google.com/go/internal/pubsub
cloud.google.com/go/internal/trace
cloud.google.com/go/internal/version
-# cloud.google.com/go/auth v0.9.0
+# cloud.google.com/go/auth v0.9.8
## explicit; go 1.21
cloud.google.com/go/auth
cloud.google.com/go/auth/credentials
@@ -22,6 +22,7 @@ cloud.google.com/go/auth/credentials/internal/stsexchange
cloud.google.com/go/auth/grpctransport
cloud.google.com/go/auth/httptransport
cloud.google.com/go/auth/internal
+cloud.google.com/go/auth/internal/compute
cloud.google.com/go/auth/internal/credsfile
cloud.google.com/go/auth/internal/jwt
cloud.google.com/go/auth/internal/transport
@@ -29,32 +30,32 @@ cloud.google.com/go/auth/internal/transport/cert
# cloud.google.com/go/auth/oauth2adapt v0.2.4
## explicit; go 1.20
cloud.google.com/go/auth/oauth2adapt
-# cloud.google.com/go/bigtable v1.29.0
-## explicit; go 1.20
+# cloud.google.com/go/bigtable v1.33.0
+## explicit; go 1.21
cloud.google.com/go/bigtable
cloud.google.com/go/bigtable/admin/apiv2/adminpb
cloud.google.com/go/bigtable/apiv2/bigtablepb
cloud.google.com/go/bigtable/bttest
cloud.google.com/go/bigtable/internal
cloud.google.com/go/bigtable/internal/option
-# cloud.google.com/go/compute/metadata v0.5.0
-## explicit; go 1.20
+# cloud.google.com/go/compute/metadata v0.5.2
+## explicit; go 1.21
cloud.google.com/go/compute/metadata
-# cloud.google.com/go/iam v1.2.0
+# cloud.google.com/go/iam v1.2.1
## explicit; go 1.21
cloud.google.com/go/iam
cloud.google.com/go/iam/apiv1/iampb
-# cloud.google.com/go/longrunning v0.6.0
+# cloud.google.com/go/longrunning v0.6.1
## explicit; go 1.21
cloud.google.com/go/longrunning
cloud.google.com/go/longrunning/autogen
cloud.google.com/go/longrunning/autogen/longrunningpb
-# cloud.google.com/go/monitoring v1.21.0
+# cloud.google.com/go/monitoring v1.21.1
## explicit; go 1.21
cloud.google.com/go/monitoring/apiv3/v2
cloud.google.com/go/monitoring/apiv3/v2/monitoringpb
cloud.google.com/go/monitoring/internal
-# cloud.google.com/go/pubsub v1.42.0
+# cloud.google.com/go/pubsub v1.45.0
## explicit; go 1.21
cloud.google.com/go/pubsub
cloud.google.com/go/pubsub/apiv1
@@ -62,8 +63,8 @@ cloud.google.com/go/pubsub/apiv1/pubsubpb
cloud.google.com/go/pubsub/internal
cloud.google.com/go/pubsub/internal/distribution
cloud.google.com/go/pubsub/internal/scheduler
-# cloud.google.com/go/storage v1.43.0
-## explicit; go 1.20
+# cloud.google.com/go/storage v1.44.0
+## explicit; go 1.21
cloud.google.com/go/storage
cloud.google.com/go/storage/internal
cloud.google.com/go/storage/internal/apiv2
@@ -206,6 +207,15 @@ github.com/DataDog/sketches-go/ddsketch/store
# github.com/DmitriyVTitov/size v1.5.0
## explicit; go 1.14
github.com/DmitriyVTitov/size
+# github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1
+## explicit; go 1.21
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp
+# github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1
+## explicit; go 1.21
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric
+# github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1
+## explicit; go 1.21
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping
# github.com/IBM/go-sdk-core/v5 v5.17.5
## explicit; go 1.20
github.com/IBM/go-sdk-core/v5/core
@@ -515,7 +525,7 @@ github.com/cespare/xxhash
# github.com/cespare/xxhash/v2 v2.3.0
## explicit; go 1.11
github.com/cespare/xxhash/v2
-# github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20
+# github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78
## explicit; go 1.19
github.com/cncf/xds/go/udpa/annotations
github.com/cncf/xds/go/udpa/type/v1
@@ -666,6 +676,12 @@ github.com/eapache/go-xerial-snappy
# github.com/eapache/queue v1.1.0
## explicit
github.com/eapache/queue
+# github.com/ebitengine/purego v0.8.0
+## explicit; go 1.18
+github.com/ebitengine/purego
+github.com/ebitengine/purego/internal/cgo
+github.com/ebitengine/purego/internal/fakecgo
+github.com/ebitengine/purego/internal/strings
# github.com/edsrzf/mmap-go v1.1.0
## explicit; go 1.17
github.com/edsrzf/mmap-go
@@ -742,10 +758,12 @@ github.com/fluent/fluent-bit-go/output
# github.com/fsnotify/fsnotify v1.7.0
## explicit; go 1.17
github.com/fsnotify/fsnotify
-# github.com/fsouza/fake-gcs-server v1.7.0
-## explicit
+# github.com/fsouza/fake-gcs-server v1.50.2
+## explicit; go 1.22
github.com/fsouza/fake-gcs-server/fakestorage
github.com/fsouza/fake-gcs-server/internal/backend
+github.com/fsouza/fake-gcs-server/internal/checksum
+github.com/fsouza/fake-gcs-server/internal/notification
# github.com/gabriel-vasile/mimetype v1.4.3
## explicit; go 1.20
github.com/gabriel-vasile/mimetype
@@ -889,7 +907,7 @@ github.com/golang/protobuf/ptypes/timestamp
# github.com/golang/snappy v0.0.4
## explicit
github.com/golang/snappy
-# github.com/google/btree v1.1.2
+# github.com/google/btree v1.1.3
## explicit; go 1.18
github.com/google/btree
# github.com/google/gnostic-models v0.6.8
@@ -945,7 +963,7 @@ github.com/google/s2a-go/stream
# github.com/google/uuid v1.6.0
## explicit
github.com/google/uuid
-# github.com/googleapis/enterprise-certificate-proxy v0.3.2
+# github.com/googleapis/enterprise-certificate-proxy v0.3.4
## explicit; go 1.19
github.com/googleapis/enterprise-certificate-proxy/client
github.com/googleapis/enterprise-certificate-proxy/client/util
@@ -971,6 +989,9 @@ github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/oauth1
github.com/gophercloud/gophercloud/openstack/identity/v3/tokens
github.com/gophercloud/gophercloud/openstack/utils
github.com/gophercloud/gophercloud/pagination
+# github.com/gorilla/handlers v1.5.2
+## explicit; go 1.20
+github.com/gorilla/handlers
# github.com/gorilla/mux v1.8.1
## explicit; go 1.20
github.com/gorilla/mux
@@ -1204,7 +1225,7 @@ github.com/json-iterator/go
# github.com/julienschmidt/httprouter v1.3.0
## explicit; go 1.7
github.com/julienschmidt/httprouter
-# github.com/klauspost/compress v1.17.10
+# github.com/klauspost/compress v1.17.11
## explicit; go 1.21
github.com/klauspost/compress
github.com/klauspost/compress/flate
@@ -1267,8 +1288,8 @@ github.com/miekg/dns
# github.com/minio/md5-simd v1.1.2
## explicit; go 1.14
github.com/minio/md5-simd
-# github.com/minio/minio-go/v7 v7.0.77
-## explicit; go 1.21
+# github.com/minio/minio-go/v7 v7.0.78
+## explicit; go 1.22
github.com/minio/minio-go/v7
github.com/minio/minio-go/v7/pkg/cors
github.com/minio/minio-go/v7/pkg/credentials
@@ -1377,6 +1398,9 @@ github.com/pkg/browser
# github.com/pkg/errors v0.9.1
## explicit
github.com/pkg/errors
+# github.com/pkg/xattr v0.4.10
+## explicit; go 1.14
+github.com/pkg/xattr
# github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10
## explicit; go 1.20
github.com/planetscale/vtprotobuf/protohelpers
@@ -1527,7 +1551,7 @@ github.com/segmentio/fasthash/fnv1a
# github.com/sercand/kuberesolver/v5 v5.1.1
## explicit; go 1.18
github.com/sercand/kuberesolver/v5
-# github.com/shirou/gopsutil/v4 v4.24.8
+# github.com/shirou/gopsutil/v4 v4.24.9
## explicit; go 1.18
github.com/shirou/gopsutil/v4/common
github.com/shirou/gopsutil/v4/cpu
@@ -1535,9 +1559,6 @@ github.com/shirou/gopsutil/v4/internal/common
github.com/shirou/gopsutil/v4/mem
github.com/shirou/gopsutil/v4/net
github.com/shirou/gopsutil/v4/process
-# github.com/shoenig/go-m1cpu v0.1.6
-## explicit; go 1.20
-github.com/shoenig/go-m1cpu
# github.com/shopspring/decimal v1.2.0
## explicit; go 1.13
github.com/shopspring/decimal
@@ -1578,7 +1599,7 @@ github.com/stretchr/testify/assert
github.com/stretchr/testify/mock
github.com/stretchr/testify/require
github.com/stretchr/testify/suite
-# github.com/thanos-io/objstore v0.0.0-20240818203309-0363dadfdfb1
+# github.com/thanos-io/objstore v0.0.0-20241015070247-5f04b8b0b52a
## explicit; go 1.21
github.com/thanos-io/objstore
github.com/thanos-io/objstore/exthttp
@@ -1602,10 +1623,11 @@ github.com/twmb/franz-go/pkg/kgo
github.com/twmb/franz-go/pkg/kgo/internal/sticky
github.com/twmb/franz-go/pkg/kversion
github.com/twmb/franz-go/pkg/sasl
+github.com/twmb/franz-go/pkg/sasl/plain
# github.com/twmb/franz-go/pkg/kadm v1.13.0
## explicit; go 1.21
github.com/twmb/franz-go/pkg/kadm
-# github.com/twmb/franz-go/pkg/kfake v0.0.0-20240821035758-b77dd13e2bfa
+# github.com/twmb/franz-go/pkg/kfake v0.0.0-20241015013301-cea7aa5d8037
## explicit; go 1.21
github.com/twmb/franz-go/pkg/kfake
# github.com/twmb/franz-go/pkg/kmsg v1.8.0
@@ -1746,16 +1768,20 @@ go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp
# go.opentelemetry.io/collector/semconv v0.105.0
## explicit; go 1.21.0
go.opentelemetry.io/collector/semconv/v1.6.1
-# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0
+# go.opentelemetry.io/contrib/detectors/gcp v1.29.0
+## explicit; go 1.21
+go.opentelemetry.io/contrib/detectors/gcp
+# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0
## explicit; go 1.21
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal
-# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0
+# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0
## explicit; go 1.21
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil
-# go.opentelemetry.io/otel v1.28.0
+# go.opentelemetry.io/otel v1.29.0
## explicit; go 1.21
go.opentelemetry.io/otel
go.opentelemetry.io/otel/attribute
@@ -1772,18 +1798,18 @@ go.opentelemetry.io/otel/semconv/v1.20.0
go.opentelemetry.io/otel/semconv/v1.21.0
go.opentelemetry.io/otel/semconv/v1.24.0
go.opentelemetry.io/otel/semconv/v1.26.0
-# go.opentelemetry.io/otel/metric v1.28.0
+# go.opentelemetry.io/otel/metric v1.29.0
## explicit; go 1.21
go.opentelemetry.io/otel/metric
go.opentelemetry.io/otel/metric/embedded
go.opentelemetry.io/otel/metric/noop
-# go.opentelemetry.io/otel/sdk v1.28.0
+# go.opentelemetry.io/otel/sdk v1.29.0
## explicit; go 1.21
go.opentelemetry.io/otel/sdk
go.opentelemetry.io/otel/sdk/instrumentation
go.opentelemetry.io/otel/sdk/internal/x
go.opentelemetry.io/otel/sdk/resource
-# go.opentelemetry.io/otel/sdk/metric v1.28.0
+# go.opentelemetry.io/otel/sdk/metric v1.29.0
## explicit; go 1.21
go.opentelemetry.io/otel/sdk/metric
go.opentelemetry.io/otel/sdk/metric/internal
@@ -1791,7 +1817,7 @@ go.opentelemetry.io/otel/sdk/metric/internal/aggregate
go.opentelemetry.io/otel/sdk/metric/internal/exemplar
go.opentelemetry.io/otel/sdk/metric/internal/x
go.opentelemetry.io/otel/sdk/metric/metricdata
-# go.opentelemetry.io/otel/trace v1.28.0
+# go.opentelemetry.io/otel/trace v1.29.0
## explicit; go 1.21
go.opentelemetry.io/otel/trace
go.opentelemetry.io/otel/trace/embedded
@@ -1817,7 +1843,7 @@ go.uber.org/zap/zapgrpc
# go4.org/netipx v0.0.0-20230125063823-8449b0a6169f
## explicit; go 1.18
go4.org/netipx
-# golang.org/x/crypto v0.27.0
+# golang.org/x/crypto v0.28.0
## explicit; go 1.20
golang.org/x/crypto/argon2
golang.org/x/crypto/bcrypt
@@ -1844,7 +1870,7 @@ golang.org/x/exp/slices
# golang.org/x/mod v0.19.0
## explicit; go 1.18
golang.org/x/mod/semver
-# golang.org/x/net v0.29.0
+# golang.org/x/net v0.30.0
## explicit; go 1.18
golang.org/x/net/bpf
golang.org/x/net/context
@@ -1866,7 +1892,7 @@ golang.org/x/net/netutil
golang.org/x/net/proxy
golang.org/x/net/publicsuffix
golang.org/x/net/trace
-# golang.org/x/oauth2 v0.22.0
+# golang.org/x/oauth2 v0.23.0
## explicit; go 1.18
golang.org/x/oauth2
golang.org/x/oauth2/authhandler
@@ -1884,7 +1910,7 @@ golang.org/x/oauth2/jwt
golang.org/x/sync/errgroup
golang.org/x/sync/semaphore
golang.org/x/sync/singleflight
-# golang.org/x/sys v0.25.0
+# golang.org/x/sys v0.26.0
## explicit; go 1.18
golang.org/x/sys/cpu
golang.org/x/sys/plan9
@@ -1892,10 +1918,10 @@ golang.org/x/sys/unix
golang.org/x/sys/windows
golang.org/x/sys/windows/registry
golang.org/x/sys/windows/svc/eventlog
-# golang.org/x/term v0.24.0
+# golang.org/x/term v0.25.0
## explicit; go 1.18
golang.org/x/term
-# golang.org/x/text v0.18.0
+# golang.org/x/text v0.19.0
## explicit; go 1.18
golang.org/x/text/cases
golang.org/x/text/encoding
@@ -1919,7 +1945,7 @@ golang.org/x/text/secure/bidirule
golang.org/x/text/transform
golang.org/x/text/unicode/bidi
golang.org/x/text/unicode/norm
-# golang.org/x/time v0.6.0
+# golang.org/x/time v0.7.0
## explicit; go 1.18
golang.org/x/time/rate
# golang.org/x/tools v0.23.0
@@ -1940,7 +1966,7 @@ golang.org/x/tools/internal/stdlib
golang.org/x/tools/internal/tokeninternal
golang.org/x/tools/internal/typesinternal
golang.org/x/tools/internal/versions
-# google.golang.org/api v0.193.0
+# google.golang.org/api v0.201.0
## explicit; go 1.21
google.golang.org/api/cloudresourcemanager/v1
google.golang.org/api/compute/v1
@@ -1961,13 +1987,13 @@ google.golang.org/api/transport
google.golang.org/api/transport/grpc
google.golang.org/api/transport/http
google.golang.org/api/transport/http/internal/propagation
-# google.golang.org/genproto v0.0.0-20240820151423-278611b39280
+# google.golang.org/genproto v0.0.0-20241007155032-5fefd90f89a9
## explicit; go 1.21
google.golang.org/genproto/googleapis/type/calendarperiod
google.golang.org/genproto/googleapis/type/date
google.golang.org/genproto/googleapis/type/expr
google.golang.org/genproto/protobuf/api
-# google.golang.org/genproto/googleapis/api v0.0.0-20240820151423-278611b39280
+# google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f
## explicit; go 1.21
google.golang.org/genproto/googleapis/api
google.golang.org/genproto/googleapis/api/annotations
@@ -1976,12 +2002,12 @@ google.golang.org/genproto/googleapis/api/expr/v1alpha1
google.golang.org/genproto/googleapis/api/label
google.golang.org/genproto/googleapis/api/metric
google.golang.org/genproto/googleapis/api/monitoredres
-# google.golang.org/genproto/googleapis/rpc v0.0.0-20240820151423-278611b39280
+# google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9
## explicit; go 1.21
google.golang.org/genproto/googleapis/rpc/code
google.golang.org/genproto/googleapis/rpc/errdetails
google.golang.org/genproto/googleapis/rpc/status
-# google.golang.org/grpc v1.65.0
+# google.golang.org/grpc v1.67.1
## explicit; go 1.21
google.golang.org/grpc
google.golang.org/grpc/attributes
@@ -2023,8 +2049,9 @@ google.golang.org/grpc/credentials/tls/certprovider/pemfile
google.golang.org/grpc/encoding
google.golang.org/grpc/encoding/gzip
google.golang.org/grpc/encoding/proto
-google.golang.org/grpc/experimental
+google.golang.org/grpc/experimental/stats
google.golang.org/grpc/grpclog
+google.golang.org/grpc/grpclog/internal
google.golang.org/grpc/health
google.golang.org/grpc/health/grpc_health_v1
google.golang.org/grpc/internal
@@ -2068,6 +2095,7 @@ google.golang.org/grpc/internal/xds/bootstrap/tlscreds
google.golang.org/grpc/internal/xds/matcher
google.golang.org/grpc/internal/xds/rbac
google.golang.org/grpc/keepalive
+google.golang.org/grpc/mem
google.golang.org/grpc/metadata
google.golang.org/grpc/orca
google.golang.org/grpc/orca/internal
@@ -2105,14 +2133,20 @@ google.golang.org/grpc/xds/internal/resolver
google.golang.org/grpc/xds/internal/resolver/internal
google.golang.org/grpc/xds/internal/server
google.golang.org/grpc/xds/internal/xdsclient
+google.golang.org/grpc/xds/internal/xdsclient/internal
google.golang.org/grpc/xds/internal/xdsclient/load
google.golang.org/grpc/xds/internal/xdsclient/transport
+google.golang.org/grpc/xds/internal/xdsclient/transport/internal
google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry
google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry/converter
google.golang.org/grpc/xds/internal/xdsclient/xdsresource
google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version
-# google.golang.org/protobuf v1.34.2
-## explicit; go 1.20
+# google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a
+## explicit; go 1.21
+google.golang.org/grpc/stats/opentelemetry
+google.golang.org/grpc/stats/opentelemetry/internal
+# google.golang.org/protobuf v1.35.1
+## explicit; go 1.21
google.golang.org/protobuf/encoding/protodelim
google.golang.org/protobuf/encoding/protojson
google.golang.org/protobuf/encoding/prototext